Coverage Report

Created: 2022-08-24 06:11

/src/aom/av1/decoder/decodeframe.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <assert.h>
13
#include <stddef.h>
14
15
#include "config/aom_config.h"
16
#include "config/aom_dsp_rtcd.h"
17
#include "config/aom_scale_rtcd.h"
18
#include "config/av1_rtcd.h"
19
20
#include "aom/aom_codec.h"
21
#include "aom_dsp/aom_dsp_common.h"
22
#include "aom_dsp/binary_codes_reader.h"
23
#include "aom_dsp/bitreader.h"
24
#include "aom_dsp/bitreader_buffer.h"
25
#include "aom_mem/aom_mem.h"
26
#include "aom_ports/aom_timer.h"
27
#include "aom_ports/mem.h"
28
#include "aom_ports/mem_ops.h"
29
#include "aom_scale/aom_scale.h"
30
#include "aom_util/aom_thread.h"
31
32
#if CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
33
#include "aom_util/debug_util.h"
34
#endif  // CONFIG_BITSTREAM_DEBUG || CONFIG_MISMATCH_DEBUG
35
36
#include "av1/common/alloccommon.h"
37
#include "av1/common/cdef.h"
38
#include "av1/common/cfl.h"
39
#if CONFIG_INSPECTION
40
#include "av1/decoder/inspection.h"
41
#endif
42
#include "av1/common/common.h"
43
#include "av1/common/entropy.h"
44
#include "av1/common/entropymode.h"
45
#include "av1/common/entropymv.h"
46
#include "av1/common/frame_buffers.h"
47
#include "av1/common/idct.h"
48
#include "av1/common/mvref_common.h"
49
#include "av1/common/pred_common.h"
50
#include "av1/common/quant_common.h"
51
#include "av1/common/reconinter.h"
52
#include "av1/common/reconintra.h"
53
#include "av1/common/resize.h"
54
#include "av1/common/seg_common.h"
55
#include "av1/common/thread_common.h"
56
#include "av1/common/tile_common.h"
57
#include "av1/common/warped_motion.h"
58
#include "av1/common/obmc.h"
59
#include "av1/decoder/decodeframe.h"
60
#include "av1/decoder/decodemv.h"
61
#include "av1/decoder/decoder.h"
62
#include "av1/decoder/decodetxb.h"
63
#include "av1/decoder/detokenize.h"
64
65
#define ACCT_STR __func__
66
67
0
#define AOM_MIN_THREADS_PER_TILE 1
68
0
#define AOM_MAX_THREADS_PER_TILE 2
69
70
// This is needed by ext_tile related unit tests.
71
#define EXT_TILE_DEBUG 1
72
#define MC_TEMP_BUF_PELS                       \
73
0
  (((MAX_SB_SIZE)*2 + (AOM_INTERP_EXTEND)*2) * \
74
0
   ((MAX_SB_SIZE)*2 + (AOM_INTERP_EXTEND)*2))
75
76
// Checks that the remaining bits start with a 1 and ends with 0s.
77
// It consumes an additional byte, if already byte aligned before the check.
78
0
int av1_check_trailing_bits(AV1Decoder *pbi, struct aom_read_bit_buffer *rb) {
79
  // bit_offset is set to 0 (mod 8) when the reader is already byte aligned
80
0
  int bits_before_alignment = 8 - rb->bit_offset % 8;
81
0
  int trailing = aom_rb_read_literal(rb, bits_before_alignment);
82
0
  if (trailing != (1 << (bits_before_alignment - 1))) {
83
0
    pbi->error.error_code = AOM_CODEC_CORRUPT_FRAME;
84
0
    return -1;
85
0
  }
86
0
  return 0;
87
0
}
88
89
// Use only_chroma = 1 to only set the chroma planes
90
static AOM_INLINE void set_planes_to_neutral_grey(
91
    const SequenceHeader *const seq_params, const YV12_BUFFER_CONFIG *const buf,
92
0
    int only_chroma) {
93
0
  if (seq_params->use_highbitdepth) {
94
0
    const int val = 1 << (seq_params->bit_depth - 1);
95
0
    for (int plane = only_chroma; plane < MAX_MB_PLANE; plane++) {
96
0
      const int is_uv = plane > 0;
97
0
      uint16_t *const base = CONVERT_TO_SHORTPTR(buf->buffers[plane]);
98
      // Set the first row to neutral grey. Then copy the first row to all
99
      // subsequent rows.
100
0
      if (buf->crop_heights[is_uv] > 0) {
101
0
        aom_memset16(base, val, buf->crop_widths[is_uv]);
102
0
        for (int row_idx = 1; row_idx < buf->crop_heights[is_uv]; row_idx++) {
103
0
          memcpy(&base[row_idx * buf->strides[is_uv]], base,
104
0
                 sizeof(*base) * buf->crop_widths[is_uv]);
105
0
        }
106
0
      }
107
0
    }
108
0
  } else {
109
0
    for (int plane = only_chroma; plane < MAX_MB_PLANE; plane++) {
110
0
      const int is_uv = plane > 0;
111
0
      for (int row_idx = 0; row_idx < buf->crop_heights[is_uv]; row_idx++) {
112
0
        memset(&buf->buffers[plane][row_idx * buf->uv_stride], 1 << 7,
113
0
               buf->crop_widths[is_uv]);
114
0
      }
115
0
    }
116
0
  }
117
0
}
118
119
#if !CONFIG_REALTIME_ONLY
120
static AOM_INLINE void loop_restoration_read_sb_coeffs(
121
    const AV1_COMMON *const cm, MACROBLOCKD *xd, aom_reader *const r, int plane,
122
    int runit_idx);
123
#endif
124
125
0
static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
126
0
  return len != 0 && len <= (size_t)(end - start);
127
0
}
128
129
static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb,
130
0
                            int coded_lossless) {
131
0
  if (coded_lossless) return ONLY_4X4;
132
0
  return aom_rb_read_bit(rb) ? TX_MODE_SELECT : TX_MODE_LARGEST;
133
0
}
134
135
static REFERENCE_MODE read_frame_reference_mode(
136
0
    const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
137
0
  if (frame_is_intra_only(cm)) {
138
0
    return SINGLE_REFERENCE;
139
0
  } else {
140
0
    return aom_rb_read_bit(rb) ? REFERENCE_MODE_SELECT : SINGLE_REFERENCE;
141
0
  }
142
0
}
143
144
static AOM_INLINE void inverse_transform_block(DecoderCodingBlock *dcb,
145
                                               int plane, const TX_TYPE tx_type,
146
                                               const TX_SIZE tx_size,
147
                                               uint8_t *dst, int stride,
148
0
                                               int reduced_tx_set) {
149
0
  tran_low_t *const dqcoeff = dcb->dqcoeff_block[plane] + dcb->cb_offset[plane];
150
0
  eob_info *eob_data = dcb->eob_data[plane] + dcb->txb_offset[plane];
151
0
  uint16_t scan_line = eob_data->max_scan_line;
152
0
  uint16_t eob = eob_data->eob;
153
0
  av1_inverse_transform_block(&dcb->xd, dqcoeff, plane, tx_type, tx_size, dst,
154
0
                              stride, eob, reduced_tx_set);
155
0
  memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
156
0
}
157
158
static AOM_INLINE void read_coeffs_tx_intra_block(
159
    const AV1_COMMON *const cm, DecoderCodingBlock *dcb, aom_reader *const r,
160
0
    const int plane, const int row, const int col, const TX_SIZE tx_size) {
161
0
  MB_MODE_INFO *mbmi = dcb->xd.mi[0];
162
0
  if (!mbmi->skip_txfm) {
163
#if TXCOEFF_TIMER
164
    struct aom_usec_timer timer;
165
    aom_usec_timer_start(&timer);
166
#endif
167
0
    av1_read_coeffs_txb_facade(cm, dcb, r, plane, row, col, tx_size);
168
#if TXCOEFF_TIMER
169
    aom_usec_timer_mark(&timer);
170
    const int64_t elapsed_time = aom_usec_timer_elapsed(&timer);
171
    cm->txcoeff_timer += elapsed_time;
172
    ++cm->txb_count;
173
#endif
174
0
  }
175
0
}
176
177
static AOM_INLINE void decode_block_void(const AV1_COMMON *const cm,
178
                                         DecoderCodingBlock *dcb,
179
                                         aom_reader *const r, const int plane,
180
                                         const int row, const int col,
181
0
                                         const TX_SIZE tx_size) {
182
0
  (void)cm;
183
0
  (void)dcb;
184
0
  (void)r;
185
0
  (void)plane;
186
0
  (void)row;
187
0
  (void)col;
188
0
  (void)tx_size;
189
0
}
190
191
static AOM_INLINE void predict_inter_block_void(AV1_COMMON *const cm,
192
                                                DecoderCodingBlock *dcb,
193
0
                                                BLOCK_SIZE bsize) {
194
0
  (void)cm;
195
0
  (void)dcb;
196
0
  (void)bsize;
197
0
}
198
199
static AOM_INLINE void cfl_store_inter_block_void(AV1_COMMON *const cm,
200
0
                                                  MACROBLOCKD *const xd) {
201
0
  (void)cm;
202
0
  (void)xd;
203
0
}
204
205
static AOM_INLINE void predict_and_reconstruct_intra_block(
206
    const AV1_COMMON *const cm, DecoderCodingBlock *dcb, aom_reader *const r,
207
0
    const int plane, const int row, const int col, const TX_SIZE tx_size) {
208
0
  (void)r;
209
0
  MACROBLOCKD *const xd = &dcb->xd;
210
0
  MB_MODE_INFO *mbmi = xd->mi[0];
211
0
  PLANE_TYPE plane_type = get_plane_type(plane);
212
213
0
  av1_predict_intra_block_facade(cm, xd, plane, col, row, tx_size);
214
215
0
  if (!mbmi->skip_txfm) {
216
0
    eob_info *eob_data = dcb->eob_data[plane] + dcb->txb_offset[plane];
217
0
    if (eob_data->eob) {
218
0
      const bool reduced_tx_set_used = cm->features.reduced_tx_set_used;
219
      // tx_type was read out in av1_read_coeffs_txb.
220
0
      const TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, row, col, tx_size,
221
0
                                              reduced_tx_set_used);
222
0
      struct macroblockd_plane *const pd = &xd->plane[plane];
223
0
      uint8_t *dst = &pd->dst.buf[(row * pd->dst.stride + col) << MI_SIZE_LOG2];
224
0
      inverse_transform_block(dcb, plane, tx_type, tx_size, dst, pd->dst.stride,
225
0
                              reduced_tx_set_used);
226
0
    }
227
0
  }
228
0
  if (plane == AOM_PLANE_Y && store_cfl_required(cm, xd)) {
229
0
    cfl_store_tx(xd, row, col, tx_size, mbmi->bsize);
230
0
  }
231
0
}
232
233
static AOM_INLINE void inverse_transform_inter_block(
234
    const AV1_COMMON *const cm, DecoderCodingBlock *dcb, aom_reader *const r,
235
    const int plane, const int blk_row, const int blk_col,
236
0
    const TX_SIZE tx_size) {
237
0
  (void)r;
238
0
  MACROBLOCKD *const xd = &dcb->xd;
239
0
  PLANE_TYPE plane_type = get_plane_type(plane);
240
0
  const struct macroblockd_plane *const pd = &xd->plane[plane];
241
0
  const bool reduced_tx_set_used = cm->features.reduced_tx_set_used;
242
  // tx_type was read out in av1_read_coeffs_txb.
243
0
  const TX_TYPE tx_type = av1_get_tx_type(xd, plane_type, blk_row, blk_col,
244
0
                                          tx_size, reduced_tx_set_used);
245
246
0
  uint8_t *dst =
247
0
      &pd->dst.buf[(blk_row * pd->dst.stride + blk_col) << MI_SIZE_LOG2];
248
0
  inverse_transform_block(dcb, plane, tx_type, tx_size, dst, pd->dst.stride,
249
0
                          reduced_tx_set_used);
250
#if CONFIG_MISMATCH_DEBUG
251
  int pixel_c, pixel_r;
252
  BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
253
  int blk_w = block_size_wide[bsize];
254
  int blk_h = block_size_high[bsize];
255
  const int mi_row = -xd->mb_to_top_edge >> (3 + MI_SIZE_LOG2);
256
  const int mi_col = -xd->mb_to_left_edge >> (3 + MI_SIZE_LOG2);
257
  mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, blk_col, blk_row,
258
                  pd->subsampling_x, pd->subsampling_y);
259
  mismatch_check_block_tx(dst, pd->dst.stride, cm->current_frame.order_hint,
260
                          plane, pixel_c, pixel_r, blk_w, blk_h,
261
                          xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
262
#endif
263
0
}
264
265
static AOM_INLINE void set_cb_buffer_offsets(DecoderCodingBlock *dcb,
266
0
                                             TX_SIZE tx_size, int plane) {
267
0
  dcb->cb_offset[plane] += tx_size_wide[tx_size] * tx_size_high[tx_size];
268
0
  dcb->txb_offset[plane] =
269
0
      dcb->cb_offset[plane] / (TX_SIZE_W_MIN * TX_SIZE_H_MIN);
270
0
}
271
272
static AOM_INLINE void decode_reconstruct_tx(
273
    AV1_COMMON *cm, ThreadData *const td, aom_reader *r,
274
    MB_MODE_INFO *const mbmi, int plane, BLOCK_SIZE plane_bsize, int blk_row,
275
0
    int blk_col, int block, TX_SIZE tx_size, int *eob_total) {
276
0
  DecoderCodingBlock *const dcb = &td->dcb;
277
0
  MACROBLOCKD *const xd = &dcb->xd;
278
0
  const struct macroblockd_plane *const pd = &xd->plane[plane];
279
0
  const TX_SIZE plane_tx_size =
280
0
      plane ? av1_get_max_uv_txsize(mbmi->bsize, pd->subsampling_x,
281
0
                                    pd->subsampling_y)
282
0
            : mbmi->inter_tx_size[av1_get_txb_size_index(plane_bsize, blk_row,
283
0
                                                         blk_col)];
284
  // Scale to match transform block unit.
285
0
  const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
286
0
  const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
287
288
0
  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
289
290
0
  if (tx_size == plane_tx_size || plane) {
291
0
    td->read_coeffs_tx_inter_block_visit(cm, dcb, r, plane, blk_row, blk_col,
292
0
                                         tx_size);
293
294
0
    td->inverse_tx_inter_block_visit(cm, dcb, r, plane, blk_row, blk_col,
295
0
                                     tx_size);
296
0
    eob_info *eob_data = dcb->eob_data[plane] + dcb->txb_offset[plane];
297
0
    *eob_total += eob_data->eob;
298
0
    set_cb_buffer_offsets(dcb, tx_size, plane);
299
0
  } else {
300
0
    const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
301
0
    assert(IMPLIES(tx_size <= TX_4X4, sub_txs == tx_size));
302
0
    assert(IMPLIES(tx_size > TX_4X4, sub_txs < tx_size));
303
0
    const int bsw = tx_size_wide_unit[sub_txs];
304
0
    const int bsh = tx_size_high_unit[sub_txs];
305
0
    const int sub_step = bsw * bsh;
306
0
    const int row_end =
307
0
        AOMMIN(tx_size_high_unit[tx_size], max_blocks_high - blk_row);
308
0
    const int col_end =
309
0
        AOMMIN(tx_size_wide_unit[tx_size], max_blocks_wide - blk_col);
310
311
0
    assert(bsw > 0 && bsh > 0);
312
313
0
    for (int row = 0; row < row_end; row += bsh) {
314
0
      const int offsetr = blk_row + row;
315
0
      for (int col = 0; col < col_end; col += bsw) {
316
0
        const int offsetc = blk_col + col;
317
318
0
        decode_reconstruct_tx(cm, td, r, mbmi, plane, plane_bsize, offsetr,
319
0
                              offsetc, block, sub_txs, eob_total);
320
0
        block += sub_step;
321
0
      }
322
0
    }
323
0
  }
324
0
}
325
326
static AOM_INLINE void set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
327
                                   BLOCK_SIZE bsize, int mi_row, int mi_col,
328
0
                                   int bw, int bh, int x_mis, int y_mis) {
329
0
  const int num_planes = av1_num_planes(cm);
330
0
  const CommonModeInfoParams *const mi_params = &cm->mi_params;
331
0
  const TileInfo *const tile = &xd->tile;
332
333
0
  set_mi_offsets(mi_params, xd, mi_row, mi_col);
334
0
  xd->mi[0]->bsize = bsize;
335
#if CONFIG_RD_DEBUG
336
  xd->mi[0]->mi_row = mi_row;
337
  xd->mi[0]->mi_col = mi_col;
338
#endif
339
340
0
  assert(x_mis && y_mis);
341
0
  for (int x = 1; x < x_mis; ++x) xd->mi[x] = xd->mi[0];
342
0
  int idx = mi_params->mi_stride;
343
0
  for (int y = 1; y < y_mis; ++y) {
344
0
    memcpy(&xd->mi[idx], &xd->mi[0], x_mis * sizeof(xd->mi[0]));
345
0
    idx += mi_params->mi_stride;
346
0
  }
347
348
0
  set_plane_n4(xd, bw, bh, num_planes);
349
0
  set_entropy_context(xd, mi_row, mi_col, num_planes);
350
351
  // Distance of Mb to the various image edges. These are specified to 8th pel
352
  // as they are always compared to values that are in 1/8th pel units
353
0
  set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, mi_params->mi_rows,
354
0
                 mi_params->mi_cols);
355
356
0
  av1_setup_dst_planes(xd->plane, bsize, &cm->cur_frame->buf, mi_row, mi_col, 0,
357
0
                       num_planes);
358
0
}
359
360
static AOM_INLINE void decode_mbmi_block(AV1Decoder *const pbi,
361
                                         DecoderCodingBlock *dcb, int mi_row,
362
                                         int mi_col, aom_reader *r,
363
                                         PARTITION_TYPE partition,
364
0
                                         BLOCK_SIZE bsize) {
365
0
  AV1_COMMON *const cm = &pbi->common;
366
0
  const SequenceHeader *const seq_params = cm->seq_params;
367
0
  const int bw = mi_size_wide[bsize];
368
0
  const int bh = mi_size_high[bsize];
369
0
  const int x_mis = AOMMIN(bw, cm->mi_params.mi_cols - mi_col);
370
0
  const int y_mis = AOMMIN(bh, cm->mi_params.mi_rows - mi_row);
371
0
  MACROBLOCKD *const xd = &dcb->xd;
372
373
#if CONFIG_ACCOUNTING
374
  aom_accounting_set_context(&pbi->accounting, mi_col, mi_row);
375
#endif
376
0
  set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
377
0
  xd->mi[0]->partition = partition;
378
0
  av1_read_mode_info(pbi, dcb, r, x_mis, y_mis);
379
0
  if (bsize >= BLOCK_8X8 &&
380
0
      (seq_params->subsampling_x || seq_params->subsampling_y)) {
381
0
    const BLOCK_SIZE uv_subsize =
382
0
        ss_size_lookup[bsize][seq_params->subsampling_x]
383
0
                      [seq_params->subsampling_y];
384
0
    if (uv_subsize == BLOCK_INVALID)
385
0
      aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
386
0
                         "Invalid block size.");
387
0
  }
388
0
}
389
390
typedef struct PadBlock {
391
  int x0;
392
  int x1;
393
  int y0;
394
  int y1;
395
} PadBlock;
396
397
#if CONFIG_AV1_HIGHBITDEPTH
398
static AOM_INLINE void highbd_build_mc_border(const uint8_t *src8,
399
                                              int src_stride, uint8_t *dst8,
400
                                              int dst_stride, int x, int y,
401
0
                                              int b_w, int b_h, int w, int h) {
402
  // Get a pointer to the start of the real data for this row.
403
0
  const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
404
0
  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
405
0
  const uint16_t *ref_row = src - x - y * src_stride;
406
407
0
  if (y >= h)
408
0
    ref_row += (h - 1) * src_stride;
409
0
  else if (y > 0)
410
0
    ref_row += y * src_stride;
411
412
0
  do {
413
0
    int right = 0, copy;
414
0
    int left = x < 0 ? -x : 0;
415
416
0
    if (left > b_w) left = b_w;
417
418
0
    if (x + b_w > w) right = x + b_w - w;
419
420
0
    if (right > b_w) right = b_w;
421
422
0
    copy = b_w - left - right;
423
424
0
    if (left) aom_memset16(dst, ref_row[0], left);
425
426
0
    if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
427
428
0
    if (right) aom_memset16(dst + left + copy, ref_row[w - 1], right);
429
430
0
    dst += dst_stride;
431
0
    ++y;
432
433
0
    if (y > 0 && y < h) ref_row += src_stride;
434
0
  } while (--b_h);
435
0
}
436
#endif  // CONFIG_AV1_HIGHBITDEPTH
437
438
static AOM_INLINE void build_mc_border(const uint8_t *src, int src_stride,
439
                                       uint8_t *dst, int dst_stride, int x,
440
0
                                       int y, int b_w, int b_h, int w, int h) {
441
  // Get a pointer to the start of the real data for this row.
442
0
  const uint8_t *ref_row = src - x - y * src_stride;
443
444
0
  if (y >= h)
445
0
    ref_row += (h - 1) * src_stride;
446
0
  else if (y > 0)
447
0
    ref_row += y * src_stride;
448
449
0
  do {
450
0
    int right = 0, copy;
451
0
    int left = x < 0 ? -x : 0;
452
453
0
    if (left > b_w) left = b_w;
454
455
0
    if (x + b_w > w) right = x + b_w - w;
456
457
0
    if (right > b_w) right = b_w;
458
459
0
    copy = b_w - left - right;
460
461
0
    if (left) memset(dst, ref_row[0], left);
462
463
0
    if (copy) memcpy(dst + left, ref_row + x + left, copy);
464
465
0
    if (right) memset(dst + left + copy, ref_row[w - 1], right);
466
467
0
    dst += dst_stride;
468
0
    ++y;
469
470
0
    if (y > 0 && y < h) ref_row += src_stride;
471
0
  } while (--b_h);
472
0
}
473
474
static INLINE int update_extend_mc_border_params(
475
    const struct scale_factors *const sf, struct buf_2d *const pre_buf,
476
    MV32 scaled_mv, PadBlock *block, int subpel_x_mv, int subpel_y_mv,
477
0
    int do_warp, int is_intrabc, int *x_pad, int *y_pad) {
478
0
  const int is_scaled = av1_is_scaled(sf);
479
  // Get reference width and height.
480
0
  int frame_width = pre_buf->width;
481
0
  int frame_height = pre_buf->height;
482
483
  // Do border extension if there is motion or
484
  // width/height is not a multiple of 8 pixels.
485
0
  if ((!is_intrabc) && (!do_warp) &&
486
0
      (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) ||
487
0
       (frame_height & 0x7))) {
488
0
    if (subpel_x_mv || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
489
0
      block->x0 -= AOM_INTERP_EXTEND - 1;
490
0
      block->x1 += AOM_INTERP_EXTEND;
491
0
      *x_pad = 1;
492
0
    }
493
494
0
    if (subpel_y_mv || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
495
0
      block->y0 -= AOM_INTERP_EXTEND - 1;
496
0
      block->y1 += AOM_INTERP_EXTEND;
497
0
      *y_pad = 1;
498
0
    }
499
500
    // Skip border extension if block is inside the frame.
501
0
    if (block->x0 < 0 || block->x1 > frame_width - 1 || block->y0 < 0 ||
502
0
        block->y1 > frame_height - 1) {
503
0
      return 1;
504
0
    }
505
0
  }
506
0
  return 0;
507
0
}
508
509
static INLINE void extend_mc_border(const struct scale_factors *const sf,
510
                                    struct buf_2d *const pre_buf,
511
                                    MV32 scaled_mv, PadBlock block,
512
                                    int subpel_x_mv, int subpel_y_mv,
513
                                    int do_warp, int is_intrabc, int highbd,
514
                                    uint8_t *mc_buf, uint8_t **pre,
515
0
                                    int *src_stride) {
516
0
  int x_pad = 0, y_pad = 0;
517
0
  if (update_extend_mc_border_params(sf, pre_buf, scaled_mv, &block,
518
0
                                     subpel_x_mv, subpel_y_mv, do_warp,
519
0
                                     is_intrabc, &x_pad, &y_pad)) {
520
    // Get reference block pointer.
521
0
    const uint8_t *const buf_ptr =
522
0
        pre_buf->buf0 + block.y0 * pre_buf->stride + block.x0;
523
0
    int buf_stride = pre_buf->stride;
524
0
    const int b_w = block.x1 - block.x0;
525
0
    const int b_h = block.y1 - block.y0;
526
527
0
#if CONFIG_AV1_HIGHBITDEPTH
528
    // Extend the border.
529
0
    if (highbd) {
530
0
      highbd_build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0,
531
0
                             block.y0, b_w, b_h, pre_buf->width,
532
0
                             pre_buf->height);
533
0
    } else {
534
0
      build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0, block.y0, b_w,
535
0
                      b_h, pre_buf->width, pre_buf->height);
536
0
    }
537
#else
538
    (void)highbd;
539
    build_mc_border(buf_ptr, buf_stride, mc_buf, b_w, block.x0, block.y0, b_w,
540
                    b_h, pre_buf->width, pre_buf->height);
541
#endif
542
0
    *src_stride = b_w;
543
0
    *pre = mc_buf + y_pad * (AOM_INTERP_EXTEND - 1) * b_w +
544
0
           x_pad * (AOM_INTERP_EXTEND - 1);
545
0
  }
546
0
}
547
548
static void dec_calc_subpel_params(const MV *const src_mv,
549
                                   InterPredParams *const inter_pred_params,
550
                                   const MACROBLOCKD *const xd, int mi_x,
551
                                   int mi_y, uint8_t **pre,
552
                                   SubpelParams *subpel_params, int *src_stride,
553
                                   PadBlock *block, MV32 *scaled_mv,
554
0
                                   int *subpel_x_mv, int *subpel_y_mv) {
555
0
  const struct scale_factors *sf = inter_pred_params->scale_factors;
556
0
  struct buf_2d *pre_buf = &inter_pred_params->ref_frame_buf;
557
0
  const int bw = inter_pred_params->block_width;
558
0
  const int bh = inter_pred_params->block_height;
559
0
  const int is_scaled = av1_is_scaled(sf);
560
0
  if (is_scaled) {
561
0
    int ssx = inter_pred_params->subsampling_x;
562
0
    int ssy = inter_pred_params->subsampling_y;
563
0
    int orig_pos_y = inter_pred_params->pix_row << SUBPEL_BITS;
564
0
    orig_pos_y += src_mv->row * (1 << (1 - ssy));
565
0
    int orig_pos_x = inter_pred_params->pix_col << SUBPEL_BITS;
566
0
    orig_pos_x += src_mv->col * (1 << (1 - ssx));
567
0
    int pos_y = sf->scale_value_y(orig_pos_y, sf);
568
0
    int pos_x = sf->scale_value_x(orig_pos_x, sf);
569
0
    pos_x += SCALE_EXTRA_OFF;
570
0
    pos_y += SCALE_EXTRA_OFF;
571
572
0
    const int top = -AOM_LEFT_TOP_MARGIN_SCALED(ssy);
573
0
    const int left = -AOM_LEFT_TOP_MARGIN_SCALED(ssx);
574
0
    const int bottom = (pre_buf->height + AOM_INTERP_EXTEND)
575
0
                       << SCALE_SUBPEL_BITS;
576
0
    const int right = (pre_buf->width + AOM_INTERP_EXTEND) << SCALE_SUBPEL_BITS;
577
0
    pos_y = clamp(pos_y, top, bottom);
578
0
    pos_x = clamp(pos_x, left, right);
579
580
0
    subpel_params->subpel_x = pos_x & SCALE_SUBPEL_MASK;
581
0
    subpel_params->subpel_y = pos_y & SCALE_SUBPEL_MASK;
582
0
    subpel_params->xs = sf->x_step_q4;
583
0
    subpel_params->ys = sf->y_step_q4;
584
585
    // Get reference block top left coordinate.
586
0
    block->x0 = pos_x >> SCALE_SUBPEL_BITS;
587
0
    block->y0 = pos_y >> SCALE_SUBPEL_BITS;
588
589
    // Get reference block bottom right coordinate.
590
0
    block->x1 =
591
0
        ((pos_x + (bw - 1) * subpel_params->xs) >> SCALE_SUBPEL_BITS) + 1;
592
0
    block->y1 =
593
0
        ((pos_y + (bh - 1) * subpel_params->ys) >> SCALE_SUBPEL_BITS) + 1;
594
595
0
    MV temp_mv;
596
0
    temp_mv = clamp_mv_to_umv_border_sb(xd, src_mv, bw, bh,
597
0
                                        inter_pred_params->subsampling_x,
598
0
                                        inter_pred_params->subsampling_y);
599
0
    *scaled_mv = av1_scale_mv(&temp_mv, mi_x, mi_y, sf);
600
0
    scaled_mv->row += SCALE_EXTRA_OFF;
601
0
    scaled_mv->col += SCALE_EXTRA_OFF;
602
603
0
    *subpel_x_mv = scaled_mv->col & SCALE_SUBPEL_MASK;
604
0
    *subpel_y_mv = scaled_mv->row & SCALE_SUBPEL_MASK;
605
0
  } else {
606
    // Get block position in current frame.
607
0
    int pos_x = inter_pred_params->pix_col << SUBPEL_BITS;
608
0
    int pos_y = inter_pred_params->pix_row << SUBPEL_BITS;
609
610
0
    const MV mv_q4 = clamp_mv_to_umv_border_sb(
611
0
        xd, src_mv, bw, bh, inter_pred_params->subsampling_x,
612
0
        inter_pred_params->subsampling_y);
613
0
    subpel_params->xs = subpel_params->ys = SCALE_SUBPEL_SHIFTS;
614
0
    subpel_params->subpel_x = (mv_q4.col & SUBPEL_MASK) << SCALE_EXTRA_BITS;
615
0
    subpel_params->subpel_y = (mv_q4.row & SUBPEL_MASK) << SCALE_EXTRA_BITS;
616
617
    // Get reference block top left coordinate.
618
0
    pos_x += mv_q4.col;
619
0
    pos_y += mv_q4.row;
620
0
    block->x0 = pos_x >> SUBPEL_BITS;
621
0
    block->y0 = pos_y >> SUBPEL_BITS;
622
623
    // Get reference block bottom right coordinate.
624
0
    block->x1 = (pos_x >> SUBPEL_BITS) + (bw - 1) + 1;
625
0
    block->y1 = (pos_y >> SUBPEL_BITS) + (bh - 1) + 1;
626
627
0
    scaled_mv->row = mv_q4.row;
628
0
    scaled_mv->col = mv_q4.col;
629
0
    *subpel_x_mv = scaled_mv->col & SUBPEL_MASK;
630
0
    *subpel_y_mv = scaled_mv->row & SUBPEL_MASK;
631
0
  }
632
0
  *pre = pre_buf->buf0 + block->y0 * pre_buf->stride + block->x0;
633
0
  *src_stride = pre_buf->stride;
634
0
}
635
636
static void dec_calc_subpel_params_and_extend(
637
    const MV *const src_mv, InterPredParams *const inter_pred_params,
638
    MACROBLOCKD *const xd, int mi_x, int mi_y, int ref, uint8_t **mc_buf,
639
0
    uint8_t **pre, SubpelParams *subpel_params, int *src_stride) {
640
0
  PadBlock block;
641
0
  MV32 scaled_mv;
642
0
  int subpel_x_mv, subpel_y_mv;
643
0
  dec_calc_subpel_params(src_mv, inter_pred_params, xd, mi_x, mi_y, pre,
644
0
                         subpel_params, src_stride, &block, &scaled_mv,
645
0
                         &subpel_x_mv, &subpel_y_mv);
646
0
  extend_mc_border(
647
0
      inter_pred_params->scale_factors, &inter_pred_params->ref_frame_buf,
648
0
      scaled_mv, block, subpel_x_mv, subpel_y_mv,
649
0
      inter_pred_params->mode == WARP_PRED, inter_pred_params->is_intrabc,
650
0
      inter_pred_params->use_hbd_buf, mc_buf[ref], pre, src_stride);
651
0
}
652
653
static void dec_build_inter_predictors(const AV1_COMMON *cm,
654
                                       DecoderCodingBlock *dcb, int plane,
655
                                       const MB_MODE_INFO *mi,
656
                                       int build_for_obmc, int bw, int bh,
657
0
                                       int mi_x, int mi_y) {
658
0
  av1_build_inter_predictors(cm, &dcb->xd, plane, mi, build_for_obmc, bw, bh,
659
0
                             mi_x, mi_y, dcb->mc_buf,
660
0
                             dec_calc_subpel_params_and_extend);
661
0
}
662
663
static AOM_INLINE void dec_build_inter_predictor(const AV1_COMMON *cm,
664
                                                 DecoderCodingBlock *dcb,
665
                                                 int mi_row, int mi_col,
666
0
                                                 BLOCK_SIZE bsize) {
667
0
  MACROBLOCKD *const xd = &dcb->xd;
668
0
  const int num_planes = av1_num_planes(cm);
669
0
  for (int plane = 0; plane < num_planes; ++plane) {
670
0
    if (plane && !xd->is_chroma_ref) break;
671
0
    const int mi_x = mi_col * MI_SIZE;
672
0
    const int mi_y = mi_row * MI_SIZE;
673
0
    dec_build_inter_predictors(cm, dcb, plane, xd->mi[0], 0,
674
0
                               xd->plane[plane].width, xd->plane[plane].height,
675
0
                               mi_x, mi_y);
676
0
    if (is_interintra_pred(xd->mi[0])) {
677
0
      BUFFER_SET ctx = { { xd->plane[0].dst.buf, xd->plane[1].dst.buf,
678
0
                           xd->plane[2].dst.buf },
679
0
                         { xd->plane[0].dst.stride, xd->plane[1].dst.stride,
680
0
                           xd->plane[2].dst.stride } };
681
0
      av1_build_interintra_predictor(cm, xd, xd->plane[plane].dst.buf,
682
0
                                     xd->plane[plane].dst.stride, &ctx, plane,
683
0
                                     bsize);
684
0
    }
685
0
  }
686
0
}
687
688
static INLINE void dec_build_prediction_by_above_pred(
689
    MACROBLOCKD *const xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size,
690
0
    int dir, MB_MODE_INFO *above_mbmi, void *fun_ctxt, const int num_planes) {
691
0
  struct build_prediction_ctxt *ctxt = (struct build_prediction_ctxt *)fun_ctxt;
692
0
  const int above_mi_col = xd->mi_col + rel_mi_col;
693
0
  int mi_x, mi_y;
694
0
  MB_MODE_INFO backup_mbmi = *above_mbmi;
695
696
0
  (void)rel_mi_row;
697
0
  (void)dir;
698
699
0
  av1_setup_build_prediction_by_above_pred(xd, rel_mi_col, op_mi_size,
700
0
                                           &backup_mbmi, ctxt, num_planes);
701
0
  mi_x = above_mi_col << MI_SIZE_LOG2;
702
0
  mi_y = xd->mi_row << MI_SIZE_LOG2;
703
704
0
  const BLOCK_SIZE bsize = xd->mi[0]->bsize;
705
706
0
  for (int j = 0; j < num_planes; ++j) {
707
0
    const struct macroblockd_plane *pd = &xd->plane[j];
708
0
    int bw = (op_mi_size * MI_SIZE) >> pd->subsampling_x;
709
0
    int bh = clamp(block_size_high[bsize] >> (pd->subsampling_y + 1), 4,
710
0
                   block_size_high[BLOCK_64X64] >> (pd->subsampling_y + 1));
711
712
0
    if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 0)) continue;
713
0
    dec_build_inter_predictors(ctxt->cm, (DecoderCodingBlock *)ctxt->dcb, j,
714
0
                               &backup_mbmi, 1, bw, bh, mi_x, mi_y);
715
0
  }
716
0
}
717
718
static AOM_INLINE void dec_build_prediction_by_above_preds(
719
    const AV1_COMMON *cm, DecoderCodingBlock *dcb,
720
    uint8_t *tmp_buf[MAX_MB_PLANE], int tmp_width[MAX_MB_PLANE],
721
0
    int tmp_height[MAX_MB_PLANE], int tmp_stride[MAX_MB_PLANE]) {
722
0
  MACROBLOCKD *const xd = &dcb->xd;
723
0
  if (!xd->up_available) return;
724
725
  // Adjust mb_to_bottom_edge to have the correct value for the OBMC
726
  // prediction block. This is half the height of the original block,
727
  // except for 128-wide blocks, where we only use a height of 32.
728
0
  const int this_height = xd->height * MI_SIZE;
729
0
  const int pred_height = AOMMIN(this_height / 2, 32);
730
0
  xd->mb_to_bottom_edge += GET_MV_SUBPEL(this_height - pred_height);
731
0
  struct build_prediction_ctxt ctxt = {
732
0
    cm, tmp_buf, tmp_width, tmp_height, tmp_stride, xd->mb_to_right_edge, dcb
733
0
  };
734
0
  const BLOCK_SIZE bsize = xd->mi[0]->bsize;
735
0
  foreach_overlappable_nb_above(cm, xd,
736
0
                                max_neighbor_obmc[mi_size_wide_log2[bsize]],
737
0
                                dec_build_prediction_by_above_pred, &ctxt);
738
739
0
  xd->mb_to_left_edge = -GET_MV_SUBPEL(xd->mi_col * MI_SIZE);
740
0
  xd->mb_to_right_edge = ctxt.mb_to_far_edge;
741
0
  xd->mb_to_bottom_edge -= GET_MV_SUBPEL(this_height - pred_height);
742
0
}
743
744
static INLINE void dec_build_prediction_by_left_pred(
745
    MACROBLOCKD *const xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size,
746
0
    int dir, MB_MODE_INFO *left_mbmi, void *fun_ctxt, const int num_planes) {
747
0
  struct build_prediction_ctxt *ctxt = (struct build_prediction_ctxt *)fun_ctxt;
748
0
  const int left_mi_row = xd->mi_row + rel_mi_row;
749
0
  int mi_x, mi_y;
750
0
  MB_MODE_INFO backup_mbmi = *left_mbmi;
751
752
0
  (void)rel_mi_col;
753
0
  (void)dir;
754
755
0
  av1_setup_build_prediction_by_left_pred(xd, rel_mi_row, op_mi_size,
756
0
                                          &backup_mbmi, ctxt, num_planes);
757
0
  mi_x = xd->mi_col << MI_SIZE_LOG2;
758
0
  mi_y = left_mi_row << MI_SIZE_LOG2;
759
0
  const BLOCK_SIZE bsize = xd->mi[0]->bsize;
760
761
0
  for (int j = 0; j < num_planes; ++j) {
762
0
    const struct macroblockd_plane *pd = &xd->plane[j];
763
0
    int bw = clamp(block_size_wide[bsize] >> (pd->subsampling_x + 1), 4,
764
0
                   block_size_wide[BLOCK_64X64] >> (pd->subsampling_x + 1));
765
0
    int bh = (op_mi_size << MI_SIZE_LOG2) >> pd->subsampling_y;
766
767
0
    if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 1)) continue;
768
0
    dec_build_inter_predictors(ctxt->cm, (DecoderCodingBlock *)ctxt->dcb, j,
769
0
                               &backup_mbmi, 1, bw, bh, mi_x, mi_y);
770
0
  }
771
0
}
772
773
static AOM_INLINE void dec_build_prediction_by_left_preds(
774
    const AV1_COMMON *cm, DecoderCodingBlock *dcb,
775
    uint8_t *tmp_buf[MAX_MB_PLANE], int tmp_width[MAX_MB_PLANE],
776
0
    int tmp_height[MAX_MB_PLANE], int tmp_stride[MAX_MB_PLANE]) {
777
0
  MACROBLOCKD *const xd = &dcb->xd;
778
0
  if (!xd->left_available) return;
779
780
  // Adjust mb_to_right_edge to have the correct value for the OBMC
781
  // prediction block. This is half the width of the original block,
782
  // except for 128-wide blocks, where we only use a width of 32.
783
0
  const int this_width = xd->width * MI_SIZE;
784
0
  const int pred_width = AOMMIN(this_width / 2, 32);
785
0
  xd->mb_to_right_edge += GET_MV_SUBPEL(this_width - pred_width);
786
787
0
  struct build_prediction_ctxt ctxt = {
788
0
    cm, tmp_buf, tmp_width, tmp_height, tmp_stride, xd->mb_to_bottom_edge, dcb
789
0
  };
790
0
  const BLOCK_SIZE bsize = xd->mi[0]->bsize;
791
0
  foreach_overlappable_nb_left(cm, xd,
792
0
                               max_neighbor_obmc[mi_size_high_log2[bsize]],
793
0
                               dec_build_prediction_by_left_pred, &ctxt);
794
795
0
  xd->mb_to_top_edge = -GET_MV_SUBPEL(xd->mi_row * MI_SIZE);
796
0
  xd->mb_to_right_edge -= GET_MV_SUBPEL(this_width - pred_width);
797
0
  xd->mb_to_bottom_edge = ctxt.mb_to_far_edge;
798
0
}
799
800
static AOM_INLINE void dec_build_obmc_inter_predictors_sb(
801
0
    const AV1_COMMON *cm, DecoderCodingBlock *dcb) {
802
0
  const int num_planes = av1_num_planes(cm);
803
0
  uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
804
0
  int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
805
0
  int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
806
0
  int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
807
0
  int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
808
0
  int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
809
0
  int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
810
811
0
  MACROBLOCKD *const xd = &dcb->xd;
812
0
  av1_setup_obmc_dst_bufs(xd, dst_buf1, dst_buf2);
813
814
0
  dec_build_prediction_by_above_preds(cm, dcb, dst_buf1, dst_width1,
815
0
                                      dst_height1, dst_stride1);
816
0
  dec_build_prediction_by_left_preds(cm, dcb, dst_buf2, dst_width2, dst_height2,
817
0
                                     dst_stride2);
818
0
  const int mi_row = xd->mi_row;
819
0
  const int mi_col = xd->mi_col;
820
0
  av1_setup_dst_planes(xd->plane, xd->mi[0]->bsize, &cm->cur_frame->buf, mi_row,
821
0
                       mi_col, 0, num_planes);
822
0
  av1_build_obmc_inter_prediction(cm, xd, dst_buf1, dst_stride1, dst_buf2,
823
0
                                  dst_stride2);
824
0
}
825
826
static AOM_INLINE void cfl_store_inter_block(AV1_COMMON *const cm,
827
0
                                             MACROBLOCKD *const xd) {
828
0
  MB_MODE_INFO *mbmi = xd->mi[0];
829
0
  if (store_cfl_required(cm, xd)) {
830
0
    cfl_store_block(xd, mbmi->bsize, mbmi->tx_size);
831
0
  }
832
0
}
833
834
static AOM_INLINE void predict_inter_block(AV1_COMMON *const cm,
835
                                           DecoderCodingBlock *dcb,
836
0
                                           BLOCK_SIZE bsize) {
837
0
  MACROBLOCKD *const xd = &dcb->xd;
838
0
  MB_MODE_INFO *mbmi = xd->mi[0];
839
0
  const int num_planes = av1_num_planes(cm);
840
0
  const int mi_row = xd->mi_row;
841
0
  const int mi_col = xd->mi_col;
842
0
  for (int ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
843
0
    const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
844
0
    if (frame < LAST_FRAME) {
845
0
      assert(is_intrabc_block(mbmi));
846
0
      assert(frame == INTRA_FRAME);
847
0
      assert(ref == 0);
848
0
    } else {
849
0
      const RefCntBuffer *ref_buf = get_ref_frame_buf(cm, frame);
850
0
      const struct scale_factors *ref_scale_factors =
851
0
          get_ref_scale_factors_const(cm, frame);
852
853
0
      xd->block_ref_scale_factors[ref] = ref_scale_factors;
854
0
      av1_setup_pre_planes(xd, ref, &ref_buf->buf, mi_row, mi_col,
855
0
                           ref_scale_factors, num_planes);
856
0
    }
857
0
  }
858
859
0
  dec_build_inter_predictor(cm, dcb, mi_row, mi_col, bsize);
860
0
  if (mbmi->motion_mode == OBMC_CAUSAL) {
861
0
    dec_build_obmc_inter_predictors_sb(cm, dcb);
862
0
  }
863
#if CONFIG_MISMATCH_DEBUG
864
  for (int plane = 0; plane < num_planes; ++plane) {
865
    const struct macroblockd_plane *pd = &xd->plane[plane];
866
    int pixel_c, pixel_r;
867
    mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, 0, 0, pd->subsampling_x,
868
                    pd->subsampling_y);
869
    if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x,
870
                             pd->subsampling_y))
871
      continue;
872
    mismatch_check_block_pre(pd->dst.buf, pd->dst.stride,
873
                             cm->current_frame.order_hint, plane, pixel_c,
874
                             pixel_r, pd->width, pd->height,
875
                             xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
876
  }
877
#endif
878
0
}
879
880
static AOM_INLINE void set_color_index_map_offset(MACROBLOCKD *const xd,
881
0
                                                  int plane, aom_reader *r) {
882
0
  (void)r;
883
0
  Av1ColorMapParam params;
884
0
  const MB_MODE_INFO *const mbmi = xd->mi[0];
885
0
  av1_get_block_dimensions(mbmi->bsize, plane, xd, &params.plane_width,
886
0
                           &params.plane_height, NULL, NULL);
887
0
  xd->color_index_map_offset[plane] += params.plane_width * params.plane_height;
888
0
}
889
890
static AOM_INLINE void decode_token_recon_block(AV1Decoder *const pbi,
891
                                                ThreadData *const td,
892
                                                aom_reader *r,
893
0
                                                BLOCK_SIZE bsize) {
894
0
  AV1_COMMON *const cm = &pbi->common;
895
0
  DecoderCodingBlock *const dcb = &td->dcb;
896
0
  MACROBLOCKD *const xd = &dcb->xd;
897
0
  const int num_planes = av1_num_planes(cm);
898
0
  MB_MODE_INFO *mbmi = xd->mi[0];
899
900
0
  if (!is_inter_block(mbmi)) {
901
0
    int row, col;
902
0
    assert(bsize == get_plane_block_size(bsize, xd->plane[0].subsampling_x,
903
0
                                         xd->plane[0].subsampling_y));
904
0
    const int max_blocks_wide = max_block_wide(xd, bsize, 0);
905
0
    const int max_blocks_high = max_block_high(xd, bsize, 0);
906
0
    const BLOCK_SIZE max_unit_bsize = BLOCK_64X64;
907
0
    int mu_blocks_wide = mi_size_wide[max_unit_bsize];
908
0
    int mu_blocks_high = mi_size_high[max_unit_bsize];
909
0
    mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
910
0
    mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
911
912
0
    for (row = 0; row < max_blocks_high; row += mu_blocks_high) {
913
0
      for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) {
914
0
        for (int plane = 0; plane < num_planes; ++plane) {
915
0
          if (plane && !xd->is_chroma_ref) break;
916
0
          const struct macroblockd_plane *const pd = &xd->plane[plane];
917
0
          const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
918
#if CONFIG_REALTIME_ONLY
919
          // Realtime only build doesn't support 4x rectangular txfm sizes.
920
          if (tx_size >= TX_4X16) {
921
            aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_FEATURE,
922
                               "Realtime only build doesn't support 4x "
923
                               "rectangular txfm sizes");
924
          }
925
#endif
926
0
          const int stepr = tx_size_high_unit[tx_size];
927
0
          const int stepc = tx_size_wide_unit[tx_size];
928
929
0
          const int unit_height = ROUND_POWER_OF_TWO(
930
0
              AOMMIN(mu_blocks_high + row, max_blocks_high), pd->subsampling_y);
931
0
          const int unit_width = ROUND_POWER_OF_TWO(
932
0
              AOMMIN(mu_blocks_wide + col, max_blocks_wide), pd->subsampling_x);
933
934
0
          for (int blk_row = row >> pd->subsampling_y; blk_row < unit_height;
935
0
               blk_row += stepr) {
936
0
            for (int blk_col = col >> pd->subsampling_x; blk_col < unit_width;
937
0
                 blk_col += stepc) {
938
0
              td->read_coeffs_tx_intra_block_visit(cm, dcb, r, plane, blk_row,
939
0
                                                   blk_col, tx_size);
940
0
              td->predict_and_recon_intra_block_visit(
941
0
                  cm, dcb, r, plane, blk_row, blk_col, tx_size);
942
0
              set_cb_buffer_offsets(dcb, tx_size, plane);
943
0
            }
944
0
          }
945
0
        }
946
0
      }
947
0
    }
948
0
  } else {
949
0
    td->predict_inter_block_visit(cm, dcb, bsize);
950
    // Reconstruction
951
0
    if (!mbmi->skip_txfm) {
952
0
      int eobtotal = 0;
953
954
0
      const int max_blocks_wide = max_block_wide(xd, bsize, 0);
955
0
      const int max_blocks_high = max_block_high(xd, bsize, 0);
956
0
      int row, col;
957
958
0
      const BLOCK_SIZE max_unit_bsize = BLOCK_64X64;
959
0
      assert(max_unit_bsize ==
960
0
             get_plane_block_size(BLOCK_64X64, xd->plane[0].subsampling_x,
961
0
                                  xd->plane[0].subsampling_y));
962
0
      int mu_blocks_wide = mi_size_wide[max_unit_bsize];
963
0
      int mu_blocks_high = mi_size_high[max_unit_bsize];
964
965
0
      mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
966
0
      mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
967
968
0
      for (row = 0; row < max_blocks_high; row += mu_blocks_high) {
969
0
        for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) {
970
0
          for (int plane = 0; plane < num_planes; ++plane) {
971
0
            if (plane && !xd->is_chroma_ref) break;
972
0
            const struct macroblockd_plane *const pd = &xd->plane[plane];
973
0
            const int ss_x = pd->subsampling_x;
974
0
            const int ss_y = pd->subsampling_y;
975
0
            const BLOCK_SIZE plane_bsize =
976
0
                get_plane_block_size(bsize, ss_x, ss_y);
977
0
            const TX_SIZE max_tx_size =
978
0
                get_vartx_max_txsize(xd, plane_bsize, plane);
979
0
            const int bh_var_tx = tx_size_high_unit[max_tx_size];
980
0
            const int bw_var_tx = tx_size_wide_unit[max_tx_size];
981
0
            int block = 0;
982
0
            int step =
983
0
                tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size];
984
0
            int blk_row, blk_col;
985
0
            const int unit_height = ROUND_POWER_OF_TWO(
986
0
                AOMMIN(mu_blocks_high + row, max_blocks_high), ss_y);
987
0
            const int unit_width = ROUND_POWER_OF_TWO(
988
0
                AOMMIN(mu_blocks_wide + col, max_blocks_wide), ss_x);
989
990
0
            for (blk_row = row >> ss_y; blk_row < unit_height;
991
0
                 blk_row += bh_var_tx) {
992
0
              for (blk_col = col >> ss_x; blk_col < unit_width;
993
0
                   blk_col += bw_var_tx) {
994
0
                decode_reconstruct_tx(cm, td, r, mbmi, plane, plane_bsize,
995
0
                                      blk_row, blk_col, block, max_tx_size,
996
0
                                      &eobtotal);
997
0
                block += step;
998
0
              }
999
0
            }
1000
0
          }
1001
0
        }
1002
0
      }
1003
0
    }
1004
0
    td->cfl_store_inter_block_visit(cm, xd);
1005
0
  }
1006
1007
0
  av1_visit_palette(pbi, xd, r, set_color_index_map_offset);
1008
0
}
1009
1010
static AOM_INLINE void set_inter_tx_size(MB_MODE_INFO *mbmi, int stride_log2,
1011
                                         int tx_w_log2, int tx_h_log2,
1012
                                         int min_txs, int split_size, int txs,
1013
0
                                         int blk_row, int blk_col) {
1014
0
  for (int idy = 0; idy < tx_size_high_unit[split_size];
1015
0
       idy += tx_size_high_unit[min_txs]) {
1016
0
    for (int idx = 0; idx < tx_size_wide_unit[split_size];
1017
0
         idx += tx_size_wide_unit[min_txs]) {
1018
0
      const int index = (((blk_row + idy) >> tx_h_log2) << stride_log2) +
1019
0
                        ((blk_col + idx) >> tx_w_log2);
1020
0
      mbmi->inter_tx_size[index] = txs;
1021
0
    }
1022
0
  }
1023
0
}
1024
1025
static AOM_INLINE void read_tx_size_vartx(MACROBLOCKD *xd, MB_MODE_INFO *mbmi,
1026
                                          TX_SIZE tx_size, int depth,
1027
                                          int blk_row, int blk_col,
1028
0
                                          aom_reader *r) {
1029
0
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1030
0
  int is_split = 0;
1031
0
  const BLOCK_SIZE bsize = mbmi->bsize;
1032
0
  const int max_blocks_high = max_block_high(xd, bsize, 0);
1033
0
  const int max_blocks_wide = max_block_wide(xd, bsize, 0);
1034
0
  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
1035
0
  assert(tx_size > TX_4X4);
1036
0
  TX_SIZE txs = max_txsize_rect_lookup[bsize];
1037
0
  for (int level = 0; level < MAX_VARTX_DEPTH - 1; ++level)
1038
0
    txs = sub_tx_size_map[txs];
1039
0
  const int tx_w_log2 = tx_size_wide_log2[txs] - MI_SIZE_LOG2;
1040
0
  const int tx_h_log2 = tx_size_high_log2[txs] - MI_SIZE_LOG2;
1041
0
  const int bw_log2 = mi_size_wide_log2[bsize];
1042
0
  const int stride_log2 = bw_log2 - tx_w_log2;
1043
1044
0
  if (depth == MAX_VARTX_DEPTH) {
1045
0
    set_inter_tx_size(mbmi, stride_log2, tx_w_log2, tx_h_log2, txs, tx_size,
1046
0
                      tx_size, blk_row, blk_col);
1047
0
    mbmi->tx_size = tx_size;
1048
0
    txfm_partition_update(xd->above_txfm_context + blk_col,
1049
0
                          xd->left_txfm_context + blk_row, tx_size, tx_size);
1050
0
    return;
1051
0
  }
1052
1053
0
  const int ctx = txfm_partition_context(xd->above_txfm_context + blk_col,
1054
0
                                         xd->left_txfm_context + blk_row,
1055
0
                                         mbmi->bsize, tx_size);
1056
0
  is_split = aom_read_symbol(r, ec_ctx->txfm_partition_cdf[ctx], 2, ACCT_STR);
1057
1058
0
  if (is_split) {
1059
0
    const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
1060
0
    const int bsw = tx_size_wide_unit[sub_txs];
1061
0
    const int bsh = tx_size_high_unit[sub_txs];
1062
1063
0
    if (sub_txs == TX_4X4) {
1064
0
      set_inter_tx_size(mbmi, stride_log2, tx_w_log2, tx_h_log2, txs, tx_size,
1065
0
                        sub_txs, blk_row, blk_col);
1066
0
      mbmi->tx_size = sub_txs;
1067
0
      txfm_partition_update(xd->above_txfm_context + blk_col,
1068
0
                            xd->left_txfm_context + blk_row, sub_txs, tx_size);
1069
0
      return;
1070
0
    }
1071
1072
0
    assert(bsw > 0 && bsh > 0);
1073
0
    for (int row = 0; row < tx_size_high_unit[tx_size]; row += bsh) {
1074
0
      for (int col = 0; col < tx_size_wide_unit[tx_size]; col += bsw) {
1075
0
        int offsetr = blk_row + row;
1076
0
        int offsetc = blk_col + col;
1077
0
        read_tx_size_vartx(xd, mbmi, sub_txs, depth + 1, offsetr, offsetc, r);
1078
0
      }
1079
0
    }
1080
0
  } else {
1081
0
    set_inter_tx_size(mbmi, stride_log2, tx_w_log2, tx_h_log2, txs, tx_size,
1082
0
                      tx_size, blk_row, blk_col);
1083
0
    mbmi->tx_size = tx_size;
1084
0
    txfm_partition_update(xd->above_txfm_context + blk_col,
1085
0
                          xd->left_txfm_context + blk_row, tx_size, tx_size);
1086
0
  }
1087
0
}
1088
1089
static TX_SIZE read_selected_tx_size(const MACROBLOCKD *const xd,
1090
0
                                     aom_reader *r) {
1091
  // TODO(debargha): Clean up the logic here. This function should only
1092
  // be called for intra.
1093
0
  const BLOCK_SIZE bsize = xd->mi[0]->bsize;
1094
0
  const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize);
1095
0
  const int max_depths = bsize_to_max_depth(bsize);
1096
0
  const int ctx = get_tx_size_context(xd);
1097
0
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1098
0
  const int depth = aom_read_symbol(r, ec_ctx->tx_size_cdf[tx_size_cat][ctx],
1099
0
                                    max_depths + 1, ACCT_STR);
1100
0
  assert(depth >= 0 && depth <= max_depths);
1101
0
  const TX_SIZE tx_size = depth_to_tx_size(depth, bsize);
1102
0
  return tx_size;
1103
0
}
1104
1105
static TX_SIZE read_tx_size(const MACROBLOCKD *const xd, TX_MODE tx_mode,
1106
                            int is_inter, int allow_select_inter,
1107
0
                            aom_reader *r) {
1108
0
  const BLOCK_SIZE bsize = xd->mi[0]->bsize;
1109
0
  if (xd->lossless[xd->mi[0]->segment_id]) return TX_4X4;
1110
1111
0
  if (block_signals_txsize(bsize)) {
1112
0
    if ((!is_inter || allow_select_inter) && tx_mode == TX_MODE_SELECT) {
1113
0
      const TX_SIZE coded_tx_size = read_selected_tx_size(xd, r);
1114
0
      return coded_tx_size;
1115
0
    } else {
1116
0
      return tx_size_from_tx_mode(bsize, tx_mode);
1117
0
    }
1118
0
  } else {
1119
0
    assert(IMPLIES(tx_mode == ONLY_4X4, bsize == BLOCK_4X4));
1120
0
    return max_txsize_rect_lookup[bsize];
1121
0
  }
1122
0
}
1123
1124
static AOM_INLINE void parse_decode_block(AV1Decoder *const pbi,
1125
                                          ThreadData *const td, int mi_row,
1126
                                          int mi_col, aom_reader *r,
1127
                                          PARTITION_TYPE partition,
1128
0
                                          BLOCK_SIZE bsize) {
1129
0
  DecoderCodingBlock *const dcb = &td->dcb;
1130
0
  MACROBLOCKD *const xd = &dcb->xd;
1131
0
  decode_mbmi_block(pbi, dcb, mi_row, mi_col, r, partition, bsize);
1132
1133
0
  av1_visit_palette(pbi, xd, r, av1_decode_palette_tokens);
1134
1135
0
  AV1_COMMON *cm = &pbi->common;
1136
0
  const int num_planes = av1_num_planes(cm);
1137
0
  MB_MODE_INFO *mbmi = xd->mi[0];
1138
0
  int inter_block_tx = is_inter_block(mbmi) || is_intrabc_block(mbmi);
1139
0
  if (cm->features.tx_mode == TX_MODE_SELECT && block_signals_txsize(bsize) &&
1140
0
      !mbmi->skip_txfm && inter_block_tx && !xd->lossless[mbmi->segment_id]) {
1141
0
    const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize];
1142
0
    const int bh = tx_size_high_unit[max_tx_size];
1143
0
    const int bw = tx_size_wide_unit[max_tx_size];
1144
0
    const int width = mi_size_wide[bsize];
1145
0
    const int height = mi_size_high[bsize];
1146
1147
0
    for (int idy = 0; idy < height; idy += bh)
1148
0
      for (int idx = 0; idx < width; idx += bw)
1149
0
        read_tx_size_vartx(xd, mbmi, max_tx_size, 0, idy, idx, r);
1150
0
  } else {
1151
0
    mbmi->tx_size = read_tx_size(xd, cm->features.tx_mode, inter_block_tx,
1152
0
                                 !mbmi->skip_txfm, r);
1153
0
    if (inter_block_tx)
1154
0
      memset(mbmi->inter_tx_size, mbmi->tx_size, sizeof(mbmi->inter_tx_size));
1155
0
    set_txfm_ctxs(mbmi->tx_size, xd->width, xd->height,
1156
0
                  mbmi->skip_txfm && is_inter_block(mbmi), xd);
1157
0
  }
1158
1159
0
  if (cm->delta_q_info.delta_q_present_flag) {
1160
0
    for (int i = 0; i < MAX_SEGMENTS; i++) {
1161
0
      const int current_qindex =
1162
0
          av1_get_qindex(&cm->seg, i, xd->current_base_qindex);
1163
0
      const CommonQuantParams *const quant_params = &cm->quant_params;
1164
0
      for (int j = 0; j < num_planes; ++j) {
1165
0
        const int dc_delta_q = j == 0 ? quant_params->y_dc_delta_q
1166
0
                                      : (j == 1 ? quant_params->u_dc_delta_q
1167
0
                                                : quant_params->v_dc_delta_q);
1168
0
        const int ac_delta_q = j == 0 ? 0
1169
0
                                      : (j == 1 ? quant_params->u_ac_delta_q
1170
0
                                                : quant_params->v_ac_delta_q);
1171
0
        xd->plane[j].seg_dequant_QTX[i][0] = av1_dc_quant_QTX(
1172
0
            current_qindex, dc_delta_q, cm->seq_params->bit_depth);
1173
0
        xd->plane[j].seg_dequant_QTX[i][1] = av1_ac_quant_QTX(
1174
0
            current_qindex, ac_delta_q, cm->seq_params->bit_depth);
1175
0
      }
1176
0
    }
1177
0
  }
1178
0
  if (mbmi->skip_txfm) av1_reset_entropy_context(xd, bsize, num_planes);
1179
1180
0
  decode_token_recon_block(pbi, td, r, bsize);
1181
0
}
1182
1183
static AOM_INLINE void set_offsets_for_pred_and_recon(AV1Decoder *const pbi,
1184
                                                      ThreadData *const td,
1185
                                                      int mi_row, int mi_col,
1186
0
                                                      BLOCK_SIZE bsize) {
1187
0
  AV1_COMMON *const cm = &pbi->common;
1188
0
  const CommonModeInfoParams *const mi_params = &cm->mi_params;
1189
0
  DecoderCodingBlock *const dcb = &td->dcb;
1190
0
  MACROBLOCKD *const xd = &dcb->xd;
1191
0
  const int bw = mi_size_wide[bsize];
1192
0
  const int bh = mi_size_high[bsize];
1193
0
  const int num_planes = av1_num_planes(cm);
1194
1195
0
  const int offset = mi_row * mi_params->mi_stride + mi_col;
1196
0
  const TileInfo *const tile = &xd->tile;
1197
1198
0
  xd->mi = mi_params->mi_grid_base + offset;
1199
0
  xd->tx_type_map =
1200
0
      &mi_params->tx_type_map[mi_row * mi_params->mi_stride + mi_col];
1201
0
  xd->tx_type_map_stride = mi_params->mi_stride;
1202
1203
0
  set_plane_n4(xd, bw, bh, num_planes);
1204
1205
  // Distance of Mb to the various image edges. These are specified to 8th pel
1206
  // as they are always compared to values that are in 1/8th pel units
1207
0
  set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, mi_params->mi_rows,
1208
0
                 mi_params->mi_cols);
1209
1210
0
  av1_setup_dst_planes(xd->plane, bsize, &cm->cur_frame->buf, mi_row, mi_col, 0,
1211
0
                       num_planes);
1212
0
}
1213
1214
static AOM_INLINE void decode_block(AV1Decoder *const pbi, ThreadData *const td,
1215
                                    int mi_row, int mi_col, aom_reader *r,
1216
                                    PARTITION_TYPE partition,
1217
0
                                    BLOCK_SIZE bsize) {
1218
0
  (void)partition;
1219
0
  set_offsets_for_pred_and_recon(pbi, td, mi_row, mi_col, bsize);
1220
0
  decode_token_recon_block(pbi, td, r, bsize);
1221
0
}
1222
1223
static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col,
1224
                                     aom_reader *r, int has_rows, int has_cols,
1225
0
                                     BLOCK_SIZE bsize) {
1226
0
  const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1227
0
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1228
1229
0
  if (!has_rows && !has_cols) return PARTITION_SPLIT;
1230
1231
0
  assert(ctx >= 0);
1232
0
  aom_cdf_prob *partition_cdf = ec_ctx->partition_cdf[ctx];
1233
0
  if (has_rows && has_cols) {
1234
0
    return (PARTITION_TYPE)aom_read_symbol(
1235
0
        r, partition_cdf, partition_cdf_length(bsize), ACCT_STR);
1236
0
  } else if (!has_rows && has_cols) {
1237
0
    assert(bsize > BLOCK_8X8);
1238
0
    aom_cdf_prob cdf[2];
1239
0
    partition_gather_vert_alike(cdf, partition_cdf, bsize);
1240
0
    assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
1241
0
    return aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ;
1242
0
  } else {
1243
0
    assert(has_rows && !has_cols);
1244
0
    assert(bsize > BLOCK_8X8);
1245
0
    aom_cdf_prob cdf[2];
1246
0
    partition_gather_horz_alike(cdf, partition_cdf, bsize);
1247
0
    assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
1248
0
    return aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_VERT;
1249
0
  }
1250
0
}
1251
1252
// TODO(slavarnway): eliminate bsize and subsize in future commits
1253
static AOM_INLINE void decode_partition(AV1Decoder *const pbi,
1254
                                        ThreadData *const td, int mi_row,
1255
                                        int mi_col, aom_reader *reader,
1256
                                        BLOCK_SIZE bsize,
1257
0
                                        int parse_decode_flag) {
1258
0
  assert(bsize < BLOCK_SIZES_ALL);
1259
0
  AV1_COMMON *const cm = &pbi->common;
1260
0
  DecoderCodingBlock *const dcb = &td->dcb;
1261
0
  MACROBLOCKD *const xd = &dcb->xd;
1262
0
  const int bw = mi_size_wide[bsize];
1263
0
  const int hbs = bw >> 1;
1264
0
  PARTITION_TYPE partition;
1265
0
  BLOCK_SIZE subsize;
1266
0
  const int quarter_step = bw / 4;
1267
0
  BLOCK_SIZE bsize2 = get_partition_subsize(bsize, PARTITION_SPLIT);
1268
0
  const int has_rows = (mi_row + hbs) < cm->mi_params.mi_rows;
1269
0
  const int has_cols = (mi_col + hbs) < cm->mi_params.mi_cols;
1270
1271
0
  if (mi_row >= cm->mi_params.mi_rows || mi_col >= cm->mi_params.mi_cols)
1272
0
    return;
1273
1274
  // parse_decode_flag takes the following values :
1275
  // 01 - do parse only
1276
  // 10 - do decode only
1277
  // 11 - do parse and decode
1278
0
  static const block_visitor_fn_t block_visit[4] = { NULL, parse_decode_block,
1279
0
                                                     decode_block,
1280
0
                                                     parse_decode_block };
1281
1282
0
  if (parse_decode_flag & 1) {
1283
0
    const int num_planes = av1_num_planes(cm);
1284
0
    for (int plane = 0; plane < num_planes; ++plane) {
1285
#if CONFIG_REALTIME_ONLY
1286
      assert(cm->rst_info[plane].frame_restoration_type == RESTORE_NONE);
1287
#else
1288
0
      int rcol0, rcol1, rrow0, rrow1;
1289
0
      if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize,
1290
0
                                             &rcol0, &rcol1, &rrow0, &rrow1)) {
1291
0
        const int rstride = cm->rst_info[plane].horz_units_per_tile;
1292
0
        for (int rrow = rrow0; rrow < rrow1; ++rrow) {
1293
0
          for (int rcol = rcol0; rcol < rcol1; ++rcol) {
1294
0
            const int runit_idx = rcol + rrow * rstride;
1295
0
            loop_restoration_read_sb_coeffs(cm, xd, reader, plane, runit_idx);
1296
0
          }
1297
0
        }
1298
0
      }
1299
0
#endif
1300
0
    }
1301
1302
0
    partition = (bsize < BLOCK_8X8) ? PARTITION_NONE
1303
0
                                    : read_partition(xd, mi_row, mi_col, reader,
1304
0
                                                     has_rows, has_cols, bsize);
1305
0
  } else {
1306
0
    partition = get_partition(cm, mi_row, mi_col, bsize);
1307
0
  }
1308
0
  subsize = get_partition_subsize(bsize, partition);
1309
0
  if (subsize == BLOCK_INVALID) {
1310
0
    aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1311
0
                       "Partition is invalid for block size %dx%d",
1312
0
                       block_size_wide[bsize], block_size_high[bsize]);
1313
0
  }
1314
  // Check the bitstream is conformant: if there is subsampling on the
1315
  // chroma planes, subsize must subsample to a valid block size.
1316
0
  const struct macroblockd_plane *const pd_u = &xd->plane[1];
1317
0
  if (get_plane_block_size(subsize, pd_u->subsampling_x, pd_u->subsampling_y) ==
1318
0
      BLOCK_INVALID) {
1319
0
    aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1320
0
                       "Block size %dx%d invalid with this subsampling mode",
1321
0
                       block_size_wide[subsize], block_size_high[subsize]);
1322
0
  }
1323
1324
0
#define DEC_BLOCK_STX_ARG
1325
0
#define DEC_BLOCK_EPT_ARG partition,
1326
0
#define DEC_BLOCK(db_r, db_c, db_subsize)                                  \
1327
0
  block_visit[parse_decode_flag](pbi, td, DEC_BLOCK_STX_ARG(db_r), (db_c), \
1328
0
                                 reader, DEC_BLOCK_EPT_ARG(db_subsize))
1329
0
#define DEC_PARTITION(db_r, db_c, db_subsize)                        \
1330
0
  decode_partition(pbi, td, DEC_BLOCK_STX_ARG(db_r), (db_c), reader, \
1331
0
                   (db_subsize), parse_decode_flag)
1332
1333
0
  switch (partition) {
1334
0
    case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break;
1335
0
    case PARTITION_HORZ:
1336
0
      DEC_BLOCK(mi_row, mi_col, subsize);
1337
0
      if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize);
1338
0
      break;
1339
0
    case PARTITION_VERT:
1340
0
      DEC_BLOCK(mi_row, mi_col, subsize);
1341
0
      if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize);
1342
0
      break;
1343
0
    case PARTITION_SPLIT:
1344
0
      DEC_PARTITION(mi_row, mi_col, subsize);
1345
0
      DEC_PARTITION(mi_row, mi_col + hbs, subsize);
1346
0
      DEC_PARTITION(mi_row + hbs, mi_col, subsize);
1347
0
      DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize);
1348
0
      break;
1349
0
    case PARTITION_HORZ_A:
1350
0
      DEC_BLOCK(mi_row, mi_col, bsize2);
1351
0
      DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
1352
0
      DEC_BLOCK(mi_row + hbs, mi_col, subsize);
1353
0
      break;
1354
0
    case PARTITION_HORZ_B:
1355
0
      DEC_BLOCK(mi_row, mi_col, subsize);
1356
0
      DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
1357
0
      DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
1358
0
      break;
1359
0
    case PARTITION_VERT_A:
1360
0
      DEC_BLOCK(mi_row, mi_col, bsize2);
1361
0
      DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
1362
0
      DEC_BLOCK(mi_row, mi_col + hbs, subsize);
1363
0
      break;
1364
0
    case PARTITION_VERT_B:
1365
0
      DEC_BLOCK(mi_row, mi_col, subsize);
1366
0
      DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
1367
0
      DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
1368
0
      break;
1369
0
    case PARTITION_HORZ_4:
1370
0
      for (int i = 0; i < 4; ++i) {
1371
0
        int this_mi_row = mi_row + i * quarter_step;
1372
0
        if (i > 0 && this_mi_row >= cm->mi_params.mi_rows) break;
1373
0
        DEC_BLOCK(this_mi_row, mi_col, subsize);
1374
0
      }
1375
0
      break;
1376
0
    case PARTITION_VERT_4:
1377
0
      for (int i = 0; i < 4; ++i) {
1378
0
        int this_mi_col = mi_col + i * quarter_step;
1379
0
        if (i > 0 && this_mi_col >= cm->mi_params.mi_cols) break;
1380
0
        DEC_BLOCK(mi_row, this_mi_col, subsize);
1381
0
      }
1382
0
      break;
1383
0
    default: assert(0 && "Invalid partition type");
1384
0
  }
1385
1386
0
#undef DEC_PARTITION
1387
0
#undef DEC_BLOCK
1388
0
#undef DEC_BLOCK_EPT_ARG
1389
0
#undef DEC_BLOCK_STX_ARG
1390
1391
0
  if (parse_decode_flag & 1)
1392
0
    update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
1393
0
}
1394
1395
static AOM_INLINE void setup_bool_decoder(
1396
    const uint8_t *data, const uint8_t *data_end, const size_t read_size,
1397
    struct aom_internal_error_info *error_info, aom_reader *r,
1398
0
    uint8_t allow_update_cdf) {
1399
  // Validate the calculated partition length. If the buffer
1400
  // described by the partition can't be fully read, then restrict
1401
  // it to the portion that can be (for EC mode) or throw an error.
1402
0
  if (!read_is_valid(data, read_size, data_end))
1403
0
    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
1404
0
                       "Truncated packet or corrupt tile length");
1405
1406
0
  if (aom_reader_init(r, data, read_size))
1407
0
    aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
1408
0
                       "Failed to allocate bool decoder %d", 1);
1409
1410
0
  r->allow_update_cdf = allow_update_cdf;
1411
0
}
1412
1413
static AOM_INLINE void setup_segmentation(AV1_COMMON *const cm,
1414
0
                                          struct aom_read_bit_buffer *rb) {
1415
0
  struct segmentation *const seg = &cm->seg;
1416
1417
0
  seg->update_map = 0;
1418
0
  seg->update_data = 0;
1419
0
  seg->temporal_update = 0;
1420
1421
0
  seg->enabled = aom_rb_read_bit(rb);
1422
0
  if (!seg->enabled) {
1423
0
    if (cm->cur_frame->seg_map) {
1424
0
      memset(cm->cur_frame->seg_map, 0,
1425
0
             (cm->cur_frame->mi_rows * cm->cur_frame->mi_cols));
1426
0
    }
1427
1428
0
    memset(seg, 0, sizeof(*seg));
1429
0
    segfeatures_copy(&cm->cur_frame->seg, seg);
1430
0
    return;
1431
0
  }
1432
0
  if (cm->seg.enabled && cm->prev_frame &&
1433
0
      (cm->mi_params.mi_rows == cm->prev_frame->mi_rows) &&
1434
0
      (cm->mi_params.mi_cols == cm->prev_frame->mi_cols)) {
1435
0
    cm->last_frame_seg_map = cm->prev_frame->seg_map;
1436
0
  } else {
1437
0
    cm->last_frame_seg_map = NULL;
1438
0
  }
1439
  // Read update flags
1440
0
  if (cm->features.primary_ref_frame == PRIMARY_REF_NONE) {
1441
    // These frames can't use previous frames, so must signal map + features
1442
0
    seg->update_map = 1;
1443
0
    seg->temporal_update = 0;
1444
0
    seg->update_data = 1;
1445
0
  } else {
1446
0
    seg->update_map = aom_rb_read_bit(rb);
1447
0
    if (seg->update_map) {
1448
0
      seg->temporal_update = aom_rb_read_bit(rb);
1449
0
    } else {
1450
0
      seg->temporal_update = 0;
1451
0
    }
1452
0
    seg->update_data = aom_rb_read_bit(rb);
1453
0
  }
1454
1455
  // Segmentation data update
1456
0
  if (seg->update_data) {
1457
0
    av1_clearall_segfeatures(seg);
1458
1459
0
    for (int i = 0; i < MAX_SEGMENTS; i++) {
1460
0
      for (int j = 0; j < SEG_LVL_MAX; j++) {
1461
0
        int data = 0;
1462
0
        const int feature_enabled = aom_rb_read_bit(rb);
1463
0
        if (feature_enabled) {
1464
0
          av1_enable_segfeature(seg, i, j);
1465
1466
0
          const int data_max = av1_seg_feature_data_max(j);
1467
0
          const int data_min = -data_max;
1468
0
          const int ubits = get_unsigned_bits(data_max);
1469
1470
0
          if (av1_is_segfeature_signed(j)) {
1471
0
            data = aom_rb_read_inv_signed_literal(rb, ubits);
1472
0
          } else {
1473
0
            data = aom_rb_read_literal(rb, ubits);
1474
0
          }
1475
1476
0
          data = clamp(data, data_min, data_max);
1477
0
        }
1478
0
        av1_set_segdata(seg, i, j, data);
1479
0
      }
1480
0
    }
1481
0
    av1_calculate_segdata(seg);
1482
0
  } else if (cm->prev_frame) {
1483
0
    segfeatures_copy(seg, &cm->prev_frame->seg);
1484
0
  }
1485
0
  segfeatures_copy(&cm->cur_frame->seg, seg);
1486
0
}
1487
1488
static AOM_INLINE void decode_restoration_mode(AV1_COMMON *cm,
1489
0
                                               struct aom_read_bit_buffer *rb) {
1490
0
  assert(!cm->features.all_lossless);
1491
0
  const int num_planes = av1_num_planes(cm);
1492
0
  if (cm->features.allow_intrabc) return;
1493
0
  int all_none = 1, chroma_none = 1;
1494
0
  for (int p = 0; p < num_planes; ++p) {
1495
0
    RestorationInfo *rsi = &cm->rst_info[p];
1496
0
    if (aom_rb_read_bit(rb)) {
1497
0
      rsi->frame_restoration_type =
1498
0
          aom_rb_read_bit(rb) ? RESTORE_SGRPROJ : RESTORE_WIENER;
1499
0
    } else {
1500
0
      rsi->frame_restoration_type =
1501
0
          aom_rb_read_bit(rb) ? RESTORE_SWITCHABLE : RESTORE_NONE;
1502
0
    }
1503
0
    if (rsi->frame_restoration_type != RESTORE_NONE) {
1504
0
      all_none = 0;
1505
0
      chroma_none &= p == 0;
1506
0
    }
1507
0
  }
1508
0
  if (!all_none) {
1509
#if CONFIG_REALTIME_ONLY
1510
    aom_internal_error(cm->error, AOM_CODEC_UNSUP_FEATURE,
1511
                       "Realtime only build doesn't support loop restoration");
1512
#endif
1513
0
    assert(cm->seq_params->sb_size == BLOCK_64X64 ||
1514
0
           cm->seq_params->sb_size == BLOCK_128X128);
1515
0
    const int sb_size = cm->seq_params->sb_size == BLOCK_128X128 ? 128 : 64;
1516
1517
0
    for (int p = 0; p < num_planes; ++p)
1518
0
      cm->rst_info[p].restoration_unit_size = sb_size;
1519
1520
0
    RestorationInfo *rsi = &cm->rst_info[0];
1521
1522
0
    if (sb_size == 64) {
1523
0
      rsi->restoration_unit_size <<= aom_rb_read_bit(rb);
1524
0
    }
1525
0
    if (rsi->restoration_unit_size > 64) {
1526
0
      rsi->restoration_unit_size <<= aom_rb_read_bit(rb);
1527
0
    }
1528
0
  } else {
1529
0
    const int size = RESTORATION_UNITSIZE_MAX;
1530
0
    for (int p = 0; p < num_planes; ++p)
1531
0
      cm->rst_info[p].restoration_unit_size = size;
1532
0
  }
1533
1534
0
  if (num_planes > 1) {
1535
0
    int s =
1536
0
        AOMMIN(cm->seq_params->subsampling_x, cm->seq_params->subsampling_y);
1537
0
    if (s && !chroma_none) {
1538
0
      cm->rst_info[1].restoration_unit_size =
1539
0
          cm->rst_info[0].restoration_unit_size >> (aom_rb_read_bit(rb) * s);
1540
0
    } else {
1541
0
      cm->rst_info[1].restoration_unit_size =
1542
0
          cm->rst_info[0].restoration_unit_size;
1543
0
    }
1544
0
    cm->rst_info[2].restoration_unit_size =
1545
0
        cm->rst_info[1].restoration_unit_size;
1546
0
  }
1547
0
}
1548
1549
#if !CONFIG_REALTIME_ONLY
1550
static AOM_INLINE void read_wiener_filter(int wiener_win,
1551
                                          WienerInfo *wiener_info,
1552
                                          WienerInfo *ref_wiener_info,
1553
0
                                          aom_reader *rb) {
1554
0
  memset(wiener_info->vfilter, 0, sizeof(wiener_info->vfilter));
1555
0
  memset(wiener_info->hfilter, 0, sizeof(wiener_info->hfilter));
1556
1557
0
  if (wiener_win == WIENER_WIN)
1558
0
    wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] =
1559
0
        aom_read_primitive_refsubexpfin(
1560
0
            rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
1561
0
            WIENER_FILT_TAP0_SUBEXP_K,
1562
0
            ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
1563
0
        WIENER_FILT_TAP0_MINV;
1564
0
  else
1565
0
    wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] = 0;
1566
0
  wiener_info->vfilter[1] = wiener_info->vfilter[WIENER_WIN - 2] =
1567
0
      aom_read_primitive_refsubexpfin(
1568
0
          rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
1569
0
          WIENER_FILT_TAP1_SUBEXP_K,
1570
0
          ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) +
1571
0
      WIENER_FILT_TAP1_MINV;
1572
0
  wiener_info->vfilter[2] = wiener_info->vfilter[WIENER_WIN - 3] =
1573
0
      aom_read_primitive_refsubexpfin(
1574
0
          rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
1575
0
          WIENER_FILT_TAP2_SUBEXP_K,
1576
0
          ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) +
1577
0
      WIENER_FILT_TAP2_MINV;
1578
  // The central element has an implicit +WIENER_FILT_STEP
1579
0
  wiener_info->vfilter[WIENER_HALFWIN] =
1580
0
      -2 * (wiener_info->vfilter[0] + wiener_info->vfilter[1] +
1581
0
            wiener_info->vfilter[2]);
1582
1583
0
  if (wiener_win == WIENER_WIN)
1584
0
    wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] =
1585
0
        aom_read_primitive_refsubexpfin(
1586
0
            rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
1587
0
            WIENER_FILT_TAP0_SUBEXP_K,
1588
0
            ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
1589
0
        WIENER_FILT_TAP0_MINV;
1590
0
  else
1591
0
    wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] = 0;
1592
0
  wiener_info->hfilter[1] = wiener_info->hfilter[WIENER_WIN - 2] =
1593
0
      aom_read_primitive_refsubexpfin(
1594
0
          rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
1595
0
          WIENER_FILT_TAP1_SUBEXP_K,
1596
0
          ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) +
1597
0
      WIENER_FILT_TAP1_MINV;
1598
0
  wiener_info->hfilter[2] = wiener_info->hfilter[WIENER_WIN - 3] =
1599
0
      aom_read_primitive_refsubexpfin(
1600
0
          rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
1601
0
          WIENER_FILT_TAP2_SUBEXP_K,
1602
0
          ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) +
1603
0
      WIENER_FILT_TAP2_MINV;
1604
  // The central element has an implicit +WIENER_FILT_STEP
1605
0
  wiener_info->hfilter[WIENER_HALFWIN] =
1606
0
      -2 * (wiener_info->hfilter[0] + wiener_info->hfilter[1] +
1607
0
            wiener_info->hfilter[2]);
1608
0
  memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info));
1609
0
}
1610
1611
static AOM_INLINE void read_sgrproj_filter(SgrprojInfo *sgrproj_info,
1612
                                           SgrprojInfo *ref_sgrproj_info,
1613
0
                                           aom_reader *rb) {
1614
0
  sgrproj_info->ep = aom_read_literal(rb, SGRPROJ_PARAMS_BITS, ACCT_STR);
1615
0
  const sgr_params_type *params = &av1_sgr_params[sgrproj_info->ep];
1616
1617
0
  if (params->r[0] == 0) {
1618
0
    sgrproj_info->xqd[0] = 0;
1619
0
    sgrproj_info->xqd[1] =
1620
0
        aom_read_primitive_refsubexpfin(
1621
0
            rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K,
1622
0
            ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) +
1623
0
        SGRPROJ_PRJ_MIN1;
1624
0
  } else if (params->r[1] == 0) {
1625
0
    sgrproj_info->xqd[0] =
1626
0
        aom_read_primitive_refsubexpfin(
1627
0
            rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K,
1628
0
            ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) +
1629
0
        SGRPROJ_PRJ_MIN0;
1630
0
    sgrproj_info->xqd[1] = clamp((1 << SGRPROJ_PRJ_BITS) - sgrproj_info->xqd[0],
1631
0
                                 SGRPROJ_PRJ_MIN1, SGRPROJ_PRJ_MAX1);
1632
0
  } else {
1633
0
    sgrproj_info->xqd[0] =
1634
0
        aom_read_primitive_refsubexpfin(
1635
0
            rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K,
1636
0
            ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) +
1637
0
        SGRPROJ_PRJ_MIN0;
1638
0
    sgrproj_info->xqd[1] =
1639
0
        aom_read_primitive_refsubexpfin(
1640
0
            rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K,
1641
0
            ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) +
1642
0
        SGRPROJ_PRJ_MIN1;
1643
0
  }
1644
1645
0
  memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info));
1646
0
}
1647
1648
static AOM_INLINE void loop_restoration_read_sb_coeffs(
1649
    const AV1_COMMON *const cm, MACROBLOCKD *xd, aom_reader *const r, int plane,
1650
0
    int runit_idx) {
1651
0
  const RestorationInfo *rsi = &cm->rst_info[plane];
1652
0
  RestorationUnitInfo *rui = &rsi->unit_info[runit_idx];
1653
0
  assert(rsi->frame_restoration_type != RESTORE_NONE);
1654
1655
0
  assert(!cm->features.all_lossless);
1656
1657
0
  const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN;
1658
0
  WienerInfo *wiener_info = xd->wiener_info + plane;
1659
0
  SgrprojInfo *sgrproj_info = xd->sgrproj_info + plane;
1660
1661
0
  if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
1662
0
    rui->restoration_type =
1663
0
        aom_read_symbol(r, xd->tile_ctx->switchable_restore_cdf,
1664
0
                        RESTORE_SWITCHABLE_TYPES, ACCT_STR);
1665
0
    switch (rui->restoration_type) {
1666
0
      case RESTORE_WIENER:
1667
0
        read_wiener_filter(wiener_win, &rui->wiener_info, wiener_info, r);
1668
0
        break;
1669
0
      case RESTORE_SGRPROJ:
1670
0
        read_sgrproj_filter(&rui->sgrproj_info, sgrproj_info, r);
1671
0
        break;
1672
0
      default: assert(rui->restoration_type == RESTORE_NONE); break;
1673
0
    }
1674
0
  } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
1675
0
    if (aom_read_symbol(r, xd->tile_ctx->wiener_restore_cdf, 2, ACCT_STR)) {
1676
0
      rui->restoration_type = RESTORE_WIENER;
1677
0
      read_wiener_filter(wiener_win, &rui->wiener_info, wiener_info, r);
1678
0
    } else {
1679
0
      rui->restoration_type = RESTORE_NONE;
1680
0
    }
1681
0
  } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
1682
0
    if (aom_read_symbol(r, xd->tile_ctx->sgrproj_restore_cdf, 2, ACCT_STR)) {
1683
0
      rui->restoration_type = RESTORE_SGRPROJ;
1684
0
      read_sgrproj_filter(&rui->sgrproj_info, sgrproj_info, r);
1685
0
    } else {
1686
0
      rui->restoration_type = RESTORE_NONE;
1687
0
    }
1688
0
  }
1689
0
}
1690
#endif  // !CONFIG_REALTIME_ONLY
1691
1692
static AOM_INLINE void setup_loopfilter(AV1_COMMON *cm,
1693
0
                                        struct aom_read_bit_buffer *rb) {
1694
0
  const int num_planes = av1_num_planes(cm);
1695
0
  struct loopfilter *lf = &cm->lf;
1696
1697
0
  if (cm->features.allow_intrabc || cm->features.coded_lossless) {
1698
    // write default deltas to frame buffer
1699
0
    av1_set_default_ref_deltas(cm->cur_frame->ref_deltas);
1700
0
    av1_set_default_mode_deltas(cm->cur_frame->mode_deltas);
1701
0
    return;
1702
0
  }
1703
0
  assert(!cm->features.coded_lossless);
1704
0
  if (cm->prev_frame) {
1705
    // write deltas to frame buffer
1706
0
    memcpy(lf->ref_deltas, cm->prev_frame->ref_deltas, REF_FRAMES);
1707
0
    memcpy(lf->mode_deltas, cm->prev_frame->mode_deltas, MAX_MODE_LF_DELTAS);
1708
0
  } else {
1709
0
    av1_set_default_ref_deltas(lf->ref_deltas);
1710
0
    av1_set_default_mode_deltas(lf->mode_deltas);
1711
0
  }
1712
0
  lf->filter_level[0] = aom_rb_read_literal(rb, 6);
1713
0
  lf->filter_level[1] = aom_rb_read_literal(rb, 6);
1714
0
  if (num_planes > 1) {
1715
0
    if (lf->filter_level[0] || lf->filter_level[1]) {
1716
0
      lf->filter_level_u = aom_rb_read_literal(rb, 6);
1717
0
      lf->filter_level_v = aom_rb_read_literal(rb, 6);
1718
0
    }
1719
0
  }
1720
0
  lf->sharpness_level = aom_rb_read_literal(rb, 3);
1721
1722
  // Read in loop filter deltas applied at the MB level based on mode or ref
1723
  // frame.
1724
0
  lf->mode_ref_delta_update = 0;
1725
1726
0
  lf->mode_ref_delta_enabled = aom_rb_read_bit(rb);
1727
0
  if (lf->mode_ref_delta_enabled) {
1728
0
    lf->mode_ref_delta_update = aom_rb_read_bit(rb);
1729
0
    if (lf->mode_ref_delta_update) {
1730
0
      for (int i = 0; i < REF_FRAMES; i++)
1731
0
        if (aom_rb_read_bit(rb))
1732
0
          lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
1733
1734
0
      for (int i = 0; i < MAX_MODE_LF_DELTAS; i++)
1735
0
        if (aom_rb_read_bit(rb))
1736
0
          lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
1737
0
    }
1738
0
  }
1739
1740
  // write deltas to frame buffer
1741
0
  memcpy(cm->cur_frame->ref_deltas, lf->ref_deltas, REF_FRAMES);
1742
0
  memcpy(cm->cur_frame->mode_deltas, lf->mode_deltas, MAX_MODE_LF_DELTAS);
1743
0
}
1744
1745
static AOM_INLINE void setup_cdef(AV1_COMMON *cm,
1746
0
                                  struct aom_read_bit_buffer *rb) {
1747
0
  const int num_planes = av1_num_planes(cm);
1748
0
  CdefInfo *const cdef_info = &cm->cdef_info;
1749
1750
0
  if (cm->features.allow_intrabc) return;
1751
0
  cdef_info->cdef_damping = aom_rb_read_literal(rb, 2) + 3;
1752
0
  cdef_info->cdef_bits = aom_rb_read_literal(rb, 2);
1753
0
  cdef_info->nb_cdef_strengths = 1 << cdef_info->cdef_bits;
1754
0
  for (int i = 0; i < cdef_info->nb_cdef_strengths; i++) {
1755
0
    cdef_info->cdef_strengths[i] = aom_rb_read_literal(rb, CDEF_STRENGTH_BITS);
1756
0
    cdef_info->cdef_uv_strengths[i] =
1757
0
        num_planes > 1 ? aom_rb_read_literal(rb, CDEF_STRENGTH_BITS) : 0;
1758
0
  }
1759
0
}
1760
1761
0
static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
1762
0
  return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0;
1763
0
}
1764
1765
static AOM_INLINE void setup_quantization(CommonQuantParams *quant_params,
1766
                                          int num_planes,
1767
                                          bool separate_uv_delta_q,
1768
0
                                          struct aom_read_bit_buffer *rb) {
1769
0
  quant_params->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
1770
0
  quant_params->y_dc_delta_q = read_delta_q(rb);
1771
0
  if (num_planes > 1) {
1772
0
    int diff_uv_delta = 0;
1773
0
    if (separate_uv_delta_q) diff_uv_delta = aom_rb_read_bit(rb);
1774
0
    quant_params->u_dc_delta_q = read_delta_q(rb);
1775
0
    quant_params->u_ac_delta_q = read_delta_q(rb);
1776
0
    if (diff_uv_delta) {
1777
0
      quant_params->v_dc_delta_q = read_delta_q(rb);
1778
0
      quant_params->v_ac_delta_q = read_delta_q(rb);
1779
0
    } else {
1780
0
      quant_params->v_dc_delta_q = quant_params->u_dc_delta_q;
1781
0
      quant_params->v_ac_delta_q = quant_params->u_ac_delta_q;
1782
0
    }
1783
0
  } else {
1784
0
    quant_params->u_dc_delta_q = 0;
1785
0
    quant_params->u_ac_delta_q = 0;
1786
0
    quant_params->v_dc_delta_q = 0;
1787
0
    quant_params->v_ac_delta_q = 0;
1788
0
  }
1789
0
  quant_params->using_qmatrix = aom_rb_read_bit(rb);
1790
0
  if (quant_params->using_qmatrix) {
1791
0
    quant_params->qmatrix_level_y = aom_rb_read_literal(rb, QM_LEVEL_BITS);
1792
0
    quant_params->qmatrix_level_u = aom_rb_read_literal(rb, QM_LEVEL_BITS);
1793
0
    if (!separate_uv_delta_q)
1794
0
      quant_params->qmatrix_level_v = quant_params->qmatrix_level_u;
1795
0
    else
1796
0
      quant_params->qmatrix_level_v = aom_rb_read_literal(rb, QM_LEVEL_BITS);
1797
0
  } else {
1798
0
    quant_params->qmatrix_level_y = 0;
1799
0
    quant_params->qmatrix_level_u = 0;
1800
0
    quant_params->qmatrix_level_v = 0;
1801
0
  }
1802
0
}
1803
1804
// Build y/uv dequant values based on segmentation.
1805
static AOM_INLINE void setup_segmentation_dequant(AV1_COMMON *const cm,
1806
0
                                                  MACROBLOCKD *const xd) {
1807
0
  const int bit_depth = cm->seq_params->bit_depth;
1808
  // When segmentation is disabled, only the first value is used.  The
1809
  // remaining are don't cares.
1810
0
  const int max_segments = cm->seg.enabled ? MAX_SEGMENTS : 1;
1811
0
  CommonQuantParams *const quant_params = &cm->quant_params;
1812
0
  for (int i = 0; i < max_segments; ++i) {
1813
0
    const int qindex = xd->qindex[i];
1814
0
    quant_params->y_dequant_QTX[i][0] =
1815
0
        av1_dc_quant_QTX(qindex, quant_params->y_dc_delta_q, bit_depth);
1816
0
    quant_params->y_dequant_QTX[i][1] = av1_ac_quant_QTX(qindex, 0, bit_depth);
1817
0
    quant_params->u_dequant_QTX[i][0] =
1818
0
        av1_dc_quant_QTX(qindex, quant_params->u_dc_delta_q, bit_depth);
1819
0
    quant_params->u_dequant_QTX[i][1] =
1820
0
        av1_ac_quant_QTX(qindex, quant_params->u_ac_delta_q, bit_depth);
1821
0
    quant_params->v_dequant_QTX[i][0] =
1822
0
        av1_dc_quant_QTX(qindex, quant_params->v_dc_delta_q, bit_depth);
1823
0
    quant_params->v_dequant_QTX[i][1] =
1824
0
        av1_ac_quant_QTX(qindex, quant_params->v_ac_delta_q, bit_depth);
1825
0
    const int use_qmatrix = av1_use_qmatrix(quant_params, xd, i);
1826
    // NB: depends on base index so there is only 1 set per frame
1827
    // No quant weighting when lossless or signalled not using QM
1828
0
    const int qmlevel_y =
1829
0
        use_qmatrix ? quant_params->qmatrix_level_y : NUM_QM_LEVELS - 1;
1830
0
    for (int j = 0; j < TX_SIZES_ALL; ++j) {
1831
0
      quant_params->y_iqmatrix[i][j] =
1832
0
          av1_iqmatrix(quant_params, qmlevel_y, AOM_PLANE_Y, j);
1833
0
    }
1834
0
    const int qmlevel_u =
1835
0
        use_qmatrix ? quant_params->qmatrix_level_u : NUM_QM_LEVELS - 1;
1836
0
    for (int j = 0; j < TX_SIZES_ALL; ++j) {
1837
0
      quant_params->u_iqmatrix[i][j] =
1838
0
          av1_iqmatrix(quant_params, qmlevel_u, AOM_PLANE_U, j);
1839
0
    }
1840
0
    const int qmlevel_v =
1841
0
        use_qmatrix ? quant_params->qmatrix_level_v : NUM_QM_LEVELS - 1;
1842
0
    for (int j = 0; j < TX_SIZES_ALL; ++j) {
1843
0
      quant_params->v_iqmatrix[i][j] =
1844
0
          av1_iqmatrix(quant_params, qmlevel_v, AOM_PLANE_V, j);
1845
0
    }
1846
0
  }
1847
0
}
1848
1849
0
static InterpFilter read_frame_interp_filter(struct aom_read_bit_buffer *rb) {
1850
0
  return aom_rb_read_bit(rb) ? SWITCHABLE
1851
0
                             : aom_rb_read_literal(rb, LOG_SWITCHABLE_FILTERS);
1852
0
}
1853
1854
static AOM_INLINE void setup_render_size(AV1_COMMON *cm,
1855
0
                                         struct aom_read_bit_buffer *rb) {
1856
0
  cm->render_width = cm->superres_upscaled_width;
1857
0
  cm->render_height = cm->superres_upscaled_height;
1858
0
  if (aom_rb_read_bit(rb))
1859
0
    av1_read_frame_size(rb, 16, 16, &cm->render_width, &cm->render_height);
1860
0
}
1861
1862
// TODO(afergs): make "struct aom_read_bit_buffer *const rb"?
1863
static AOM_INLINE void setup_superres(AV1_COMMON *const cm,
1864
                                      struct aom_read_bit_buffer *rb,
1865
0
                                      int *width, int *height) {
1866
0
  cm->superres_upscaled_width = *width;
1867
0
  cm->superres_upscaled_height = *height;
1868
1869
0
  const SequenceHeader *const seq_params = cm->seq_params;
1870
0
  if (!seq_params->enable_superres) return;
1871
1872
0
  if (aom_rb_read_bit(rb)) {
1873
0
    cm->superres_scale_denominator =
1874
0
        (uint8_t)aom_rb_read_literal(rb, SUPERRES_SCALE_BITS);
1875
0
    cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN;
1876
    // Don't edit cm->width or cm->height directly, or the buffers won't get
1877
    // resized correctly
1878
0
    av1_calculate_scaled_superres_size(width, height,
1879
0
                                       cm->superres_scale_denominator);
1880
0
  } else {
1881
    // 1:1 scaling - ie. no scaling, scale not provided
1882
0
    cm->superres_scale_denominator = SCALE_NUMERATOR;
1883
0
  }
1884
0
}
1885
1886
static AOM_INLINE void resize_context_buffers(AV1_COMMON *cm, int width,
1887
0
                                              int height) {
1888
0
#if CONFIG_SIZE_LIMIT
1889
0
  if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
1890
0
    aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
1891
0
                       "Dimensions of %dx%d beyond allowed size of %dx%d.",
1892
0
                       width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
1893
0
#endif
1894
0
  if (cm->width != width || cm->height != height) {
1895
0
    const int new_mi_rows =
1896
0
        ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
1897
0
    const int new_mi_cols =
1898
0
        ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
1899
1900
    // Allocations in av1_alloc_context_buffers() depend on individual
1901
    // dimensions as well as the overall size.
1902
0
    if (new_mi_cols > cm->mi_params.mi_cols ||
1903
0
        new_mi_rows > cm->mi_params.mi_rows) {
1904
0
      if (av1_alloc_context_buffers(cm, width, height, 0, BLOCK_4X4)) {
1905
        // The cm->mi_* values have been cleared and any existing context
1906
        // buffers have been freed. Clear cm->width and cm->height to be
1907
        // consistent and to force a realloc next time.
1908
0
        cm->width = 0;
1909
0
        cm->height = 0;
1910
0
        aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
1911
0
                           "Failed to allocate context buffers");
1912
0
      }
1913
0
    } else {
1914
0
      cm->mi_params.set_mb_mi(&cm->mi_params, width, height, 0, BLOCK_4X4);
1915
0
    }
1916
0
    av1_init_mi_buffers(&cm->mi_params);
1917
0
    cm->width = width;
1918
0
    cm->height = height;
1919
0
  }
1920
1921
0
  ensure_mv_buffer(cm->cur_frame, cm);
1922
0
  cm->cur_frame->width = cm->width;
1923
0
  cm->cur_frame->height = cm->height;
1924
0
}
1925
1926
0
static AOM_INLINE void setup_buffer_pool(AV1_COMMON *cm) {
1927
0
  BufferPool *const pool = cm->buffer_pool;
1928
0
  const SequenceHeader *const seq_params = cm->seq_params;
1929
1930
0
  lock_buffer_pool(pool);
1931
0
  if (aom_realloc_frame_buffer(
1932
0
          &cm->cur_frame->buf, cm->width, cm->height, seq_params->subsampling_x,
1933
0
          seq_params->subsampling_y, seq_params->use_highbitdepth,
1934
0
          AOM_DEC_BORDER_IN_PIXELS, cm->features.byte_alignment,
1935
0
          &cm->cur_frame->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv,
1936
0
          0)) {
1937
0
    unlock_buffer_pool(pool);
1938
0
    aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
1939
0
                       "Failed to allocate frame buffer");
1940
0
  }
1941
0
  unlock_buffer_pool(pool);
1942
1943
0
  cm->cur_frame->buf.bit_depth = (unsigned int)seq_params->bit_depth;
1944
0
  cm->cur_frame->buf.color_primaries = seq_params->color_primaries;
1945
0
  cm->cur_frame->buf.transfer_characteristics =
1946
0
      seq_params->transfer_characteristics;
1947
0
  cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients;
1948
0
  cm->cur_frame->buf.monochrome = seq_params->monochrome;
1949
0
  cm->cur_frame->buf.chroma_sample_position =
1950
0
      seq_params->chroma_sample_position;
1951
0
  cm->cur_frame->buf.color_range = seq_params->color_range;
1952
0
  cm->cur_frame->buf.render_width = cm->render_width;
1953
0
  cm->cur_frame->buf.render_height = cm->render_height;
1954
0
}
1955
1956
static AOM_INLINE void setup_frame_size(AV1_COMMON *cm,
1957
                                        int frame_size_override_flag,
1958
0
                                        struct aom_read_bit_buffer *rb) {
1959
0
  const SequenceHeader *const seq_params = cm->seq_params;
1960
0
  int width, height;
1961
1962
0
  if (frame_size_override_flag) {
1963
0
    int num_bits_width = seq_params->num_bits_width;
1964
0
    int num_bits_height = seq_params->num_bits_height;
1965
0
    av1_read_frame_size(rb, num_bits_width, num_bits_height, &width, &height);
1966
0
    if (width > seq_params->max_frame_width ||
1967
0
        height > seq_params->max_frame_height) {
1968
0
      aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
1969
0
                         "Frame dimensions are larger than the maximum values");
1970
0
    }
1971
0
  } else {
1972
0
    width = seq_params->max_frame_width;
1973
0
    height = seq_params->max_frame_height;
1974
0
  }
1975
1976
0
  setup_superres(cm, rb, &width, &height);
1977
0
  resize_context_buffers(cm, width, height);
1978
0
  setup_render_size(cm, rb);
1979
0
  setup_buffer_pool(cm);
1980
0
}
1981
1982
static AOM_INLINE void setup_sb_size(SequenceHeader *seq_params,
1983
0
                                     struct aom_read_bit_buffer *rb) {
1984
0
  set_sb_size(seq_params, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
1985
0
}
1986
1987
static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
1988
                                          int ref_xss, int ref_yss,
1989
                                          aom_bit_depth_t this_bit_depth,
1990
0
                                          int this_xss, int this_yss) {
1991
0
  return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
1992
0
         ref_yss == this_yss;
1993
0
}
1994
1995
static AOM_INLINE void setup_frame_size_with_refs(
1996
0
    AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
1997
0
  int width, height;
1998
0
  int found = 0;
1999
0
  int has_valid_ref_frame = 0;
2000
0
  for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
2001
0
    if (aom_rb_read_bit(rb)) {
2002
0
      const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i);
2003
      // This will never be NULL in a normal stream, as streams are required to
2004
      // have a shown keyframe before any inter frames, which would refresh all
2005
      // the reference buffers. However, it might be null if we're starting in
2006
      // the middle of a stream, and static analysis will error if we don't do
2007
      // a null check here.
2008
0
      if (ref_buf == NULL) {
2009
0
        aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2010
0
                           "Invalid condition: invalid reference buffer");
2011
0
      } else {
2012
0
        const YV12_BUFFER_CONFIG *const buf = &ref_buf->buf;
2013
0
        width = buf->y_crop_width;
2014
0
        height = buf->y_crop_height;
2015
0
        cm->render_width = buf->render_width;
2016
0
        cm->render_height = buf->render_height;
2017
0
        setup_superres(cm, rb, &width, &height);
2018
0
        resize_context_buffers(cm, width, height);
2019
0
        found = 1;
2020
0
        break;
2021
0
      }
2022
0
    }
2023
0
  }
2024
2025
0
  const SequenceHeader *const seq_params = cm->seq_params;
2026
0
  if (!found) {
2027
0
    int num_bits_width = seq_params->num_bits_width;
2028
0
    int num_bits_height = seq_params->num_bits_height;
2029
2030
0
    av1_read_frame_size(rb, num_bits_width, num_bits_height, &width, &height);
2031
0
    setup_superres(cm, rb, &width, &height);
2032
0
    resize_context_buffers(cm, width, height);
2033
0
    setup_render_size(cm, rb);
2034
0
  }
2035
2036
0
  if (width <= 0 || height <= 0)
2037
0
    aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2038
0
                       "Invalid frame size");
2039
2040
  // Check to make sure at least one of frames that this frame references
2041
  // has valid dimensions.
2042
0
  for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
2043
0
    const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i);
2044
0
    has_valid_ref_frame |=
2045
0
        valid_ref_frame_size(ref_frame->buf.y_crop_width,
2046
0
                             ref_frame->buf.y_crop_height, width, height);
2047
0
  }
2048
0
  if (!has_valid_ref_frame)
2049
0
    aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2050
0
                       "Referenced frame has invalid size");
2051
0
  for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
2052
0
    const RefCntBuffer *const ref_frame = get_ref_frame_buf(cm, i);
2053
0
    if (!valid_ref_frame_img_fmt(
2054
0
            ref_frame->buf.bit_depth, ref_frame->buf.subsampling_x,
2055
0
            ref_frame->buf.subsampling_y, seq_params->bit_depth,
2056
0
            seq_params->subsampling_x, seq_params->subsampling_y))
2057
0
      aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
2058
0
                         "Referenced frame has incompatible color format");
2059
0
  }
2060
0
  setup_buffer_pool(cm);
2061
0
}
2062
2063
// Same function as av1_read_uniform but reading from uncompresses header wb
2064
0
static int rb_read_uniform(struct aom_read_bit_buffer *const rb, int n) {
2065
0
  const int l = get_unsigned_bits(n);
2066
0
  const int m = (1 << l) - n;
2067
0
  const int v = aom_rb_read_literal(rb, l - 1);
2068
0
  assert(l != 0);
2069
0
  if (v < m)
2070
0
    return v;
2071
0
  else
2072
0
    return (v << 1) - m + aom_rb_read_bit(rb);
2073
0
}
2074
2075
static AOM_INLINE void read_tile_info_max_tile(
2076
0
    AV1_COMMON *const cm, struct aom_read_bit_buffer *const rb) {
2077
0
  const SequenceHeader *const seq_params = cm->seq_params;
2078
0
  CommonTileParams *const tiles = &cm->tiles;
2079
0
  int width_mi =
2080
0
      ALIGN_POWER_OF_TWO(cm->mi_params.mi_cols, seq_params->mib_size_log2);
2081
0
  int height_mi =
2082
0
      ALIGN_POWER_OF_TWO(cm->mi_params.mi_rows, seq_params->mib_size_log2);
2083
0
  int width_sb = width_mi >> seq_params->mib_size_log2;
2084
0
  int height_sb = height_mi >> seq_params->mib_size_log2;
2085
2086
0
  av1_get_tile_limits(cm);
2087
0
  tiles->uniform_spacing = aom_rb_read_bit(rb);
2088
2089
  // Read tile columns
2090
0
  if (tiles->uniform_spacing) {
2091
0
    tiles->log2_cols = tiles->min_log2_cols;
2092
0
    while (tiles->log2_cols < tiles->max_log2_cols) {
2093
0
      if (!aom_rb_read_bit(rb)) {
2094
0
        break;
2095
0
      }
2096
0
      tiles->log2_cols++;
2097
0
    }
2098
0
  } else {
2099
0
    int i;
2100
0
    int start_sb;
2101
0
    for (i = 0, start_sb = 0; width_sb > 0 && i < MAX_TILE_COLS; i++) {
2102
0
      const int size_sb =
2103
0
          1 + rb_read_uniform(rb, AOMMIN(width_sb, tiles->max_width_sb));
2104
0
      tiles->col_start_sb[i] = start_sb;
2105
0
      start_sb += size_sb;
2106
0
      width_sb -= size_sb;
2107
0
    }
2108
0
    tiles->cols = i;
2109
0
    tiles->col_start_sb[i] = start_sb + width_sb;
2110
0
  }
2111
0
  av1_calculate_tile_cols(seq_params, cm->mi_params.mi_rows,
2112
0
                          cm->mi_params.mi_cols, tiles);
2113
2114
  // Read tile rows
2115
0
  if (tiles->uniform_spacing) {
2116
0
    tiles->log2_rows = tiles->min_log2_rows;
2117
0
    while (tiles->log2_rows < tiles->max_log2_rows) {
2118
0
      if (!aom_rb_read_bit(rb)) {
2119
0
        break;
2120
0
      }
2121
0
      tiles->log2_rows++;
2122
0
    }
2123
0
  } else {
2124
0
    int i;
2125
0
    int start_sb;
2126
0
    for (i = 0, start_sb = 0; height_sb > 0 && i < MAX_TILE_ROWS; i++) {
2127
0
      const int size_sb =
2128
0
          1 + rb_read_uniform(rb, AOMMIN(height_sb, tiles->max_height_sb));
2129
0
      tiles->row_start_sb[i] = start_sb;
2130
0
      start_sb += size_sb;
2131
0
      height_sb -= size_sb;
2132
0
    }
2133
0
    tiles->rows = i;
2134
0
    tiles->row_start_sb[i] = start_sb + height_sb;
2135
0
  }
2136
0
  av1_calculate_tile_rows(seq_params, cm->mi_params.mi_rows, tiles);
2137
0
}
2138
2139
0
void av1_set_single_tile_decoding_mode(AV1_COMMON *const cm) {
2140
0
  cm->tiles.single_tile_decoding = 0;
2141
0
  if (cm->tiles.large_scale) {
2142
0
    struct loopfilter *lf = &cm->lf;
2143
0
    RestorationInfo *const rst_info = cm->rst_info;
2144
0
    const CdefInfo *const cdef_info = &cm->cdef_info;
2145
2146
    // Figure out single_tile_decoding by loopfilter_level.
2147
0
    const int no_loopfilter = !(lf->filter_level[0] || lf->filter_level[1]);
2148
0
    const int no_cdef = cdef_info->cdef_bits == 0 &&
2149
0
                        cdef_info->cdef_strengths[0] == 0 &&
2150
0
                        cdef_info->cdef_uv_strengths[0] == 0;
2151
0
    const int no_restoration =
2152
0
        rst_info[0].frame_restoration_type == RESTORE_NONE &&
2153
0
        rst_info[1].frame_restoration_type == RESTORE_NONE &&
2154
0
        rst_info[2].frame_restoration_type == RESTORE_NONE;
2155
0
    assert(IMPLIES(cm->features.coded_lossless, no_loopfilter && no_cdef));
2156
0
    assert(IMPLIES(cm->features.all_lossless, no_restoration));
2157
0
    cm->tiles.single_tile_decoding = no_loopfilter && no_cdef && no_restoration;
2158
0
  }
2159
0
}
2160
2161
static AOM_INLINE void read_tile_info(AV1Decoder *const pbi,
2162
0
                                      struct aom_read_bit_buffer *const rb) {
2163
0
  AV1_COMMON *const cm = &pbi->common;
2164
2165
0
  read_tile_info_max_tile(cm, rb);
2166
2167
0
  pbi->context_update_tile_id = 0;
2168
0
  if (cm->tiles.rows * cm->tiles.cols > 1) {
2169
    // tile to use for cdf update
2170
0
    pbi->context_update_tile_id =
2171
0
        aom_rb_read_literal(rb, cm->tiles.log2_rows + cm->tiles.log2_cols);
2172
0
    if (pbi->context_update_tile_id >= cm->tiles.rows * cm->tiles.cols) {
2173
0
      aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2174
0
                         "Invalid context_update_tile_id");
2175
0
    }
2176
    // tile size magnitude
2177
0
    pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
2178
0
  }
2179
0
}
2180
2181
#if EXT_TILE_DEBUG
2182
static AOM_INLINE void read_ext_tile_info(
2183
0
    AV1Decoder *const pbi, struct aom_read_bit_buffer *const rb) {
2184
0
  AV1_COMMON *const cm = &pbi->common;
2185
2186
  // This information is stored as a separate byte.
2187
0
  int mod = rb->bit_offset % CHAR_BIT;
2188
0
  if (mod > 0) aom_rb_read_literal(rb, CHAR_BIT - mod);
2189
0
  assert(rb->bit_offset % CHAR_BIT == 0);
2190
2191
0
  if (cm->tiles.cols * cm->tiles.rows > 1) {
2192
    // Read the number of bytes used to store tile size
2193
0
    pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1;
2194
0
    pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
2195
0
  }
2196
0
}
2197
#endif  // EXT_TILE_DEBUG
2198
2199
0
static size_t mem_get_varsize(const uint8_t *src, int sz) {
2200
0
  switch (sz) {
2201
0
    case 1: return src[0];
2202
0
    case 2: return mem_get_le16(src);
2203
0
    case 3: return mem_get_le24(src);
2204
0
    case 4: return mem_get_le32(src);
2205
0
    default: assert(0 && "Invalid size"); return -1;
2206
0
  }
2207
0
}
2208
2209
#if EXT_TILE_DEBUG
2210
// Reads the next tile returning its size and adjusting '*data' accordingly
2211
// based on 'is_last'. On return, '*data' is updated to point to the end of the
2212
// raw tile buffer in the bit stream.
2213
static AOM_INLINE void get_ls_tile_buffer(
2214
    const uint8_t *const data_end, struct aom_internal_error_info *error_info,
2215
    const uint8_t **data, TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
2216
0
    int tile_size_bytes, int col, int row, int tile_copy_mode) {
2217
0
  size_t size;
2218
2219
0
  size_t copy_size = 0;
2220
0
  const uint8_t *copy_data = NULL;
2221
2222
0
  if (!read_is_valid(*data, tile_size_bytes, data_end))
2223
0
    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2224
0
                       "Truncated packet or corrupt tile length");
2225
0
  size = mem_get_varsize(*data, tile_size_bytes);
2226
2227
  // If tile_copy_mode = 1, then the top bit of the tile header indicates copy
2228
  // mode.
2229
0
  if (tile_copy_mode && (size >> (tile_size_bytes * 8 - 1)) == 1) {
2230
    // The remaining bits in the top byte signal the row offset
2231
0
    int offset = (size >> (tile_size_bytes - 1) * 8) & 0x7f;
2232
2233
    // Currently, only use tiles in same column as reference tiles.
2234
0
    copy_data = tile_buffers[row - offset][col].data;
2235
0
    copy_size = tile_buffers[row - offset][col].size;
2236
0
    size = 0;
2237
0
  } else {
2238
0
    size += AV1_MIN_TILE_SIZE_BYTES;
2239
0
  }
2240
2241
0
  *data += tile_size_bytes;
2242
2243
0
  if (size > (size_t)(data_end - *data))
2244
0
    aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2245
0
                       "Truncated packet or corrupt tile size");
2246
2247
0
  if (size > 0) {
2248
0
    tile_buffers[row][col].data = *data;
2249
0
    tile_buffers[row][col].size = size;
2250
0
  } else {
2251
0
    tile_buffers[row][col].data = copy_data;
2252
0
    tile_buffers[row][col].size = copy_size;
2253
0
  }
2254
2255
0
  *data += size;
2256
0
}
2257
2258
// Returns the end of the last tile buffer
2259
// (tile_buffers[cm->tiles.rows - 1][cm->tiles.cols - 1]).
2260
static const uint8_t *get_ls_tile_buffers(
2261
    AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
2262
0
    TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
2263
0
  AV1_COMMON *const cm = &pbi->common;
2264
0
  const int tile_cols = cm->tiles.cols;
2265
0
  const int tile_rows = cm->tiles.rows;
2266
0
  const int have_tiles = tile_cols * tile_rows > 1;
2267
0
  const uint8_t *raw_data_end;  // The end of the last tile buffer
2268
2269
0
  if (!have_tiles) {
2270
0
    const size_t tile_size = data_end - data;
2271
0
    tile_buffers[0][0].data = data;
2272
0
    tile_buffers[0][0].size = tile_size;
2273
0
    raw_data_end = NULL;
2274
0
  } else {
2275
    // We locate only the tile buffers that are required, which are the ones
2276
    // specified by pbi->dec_tile_col and pbi->dec_tile_row. Also, we always
2277
    // need the last (bottom right) tile buffer, as we need to know where the
2278
    // end of the compressed frame buffer is for proper superframe decoding.
2279
2280
0
    const uint8_t *tile_col_data_end[MAX_TILE_COLS] = { NULL };
2281
0
    const uint8_t *const data_start = data;
2282
2283
0
    const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
2284
0
    const int single_row = pbi->dec_tile_row >= 0;
2285
0
    const int tile_rows_start = single_row ? dec_tile_row : 0;
2286
0
    const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows;
2287
0
    const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
2288
0
    const int single_col = pbi->dec_tile_col >= 0;
2289
0
    const int tile_cols_start = single_col ? dec_tile_col : 0;
2290
0
    const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
2291
2292
0
    const int tile_col_size_bytes = pbi->tile_col_size_bytes;
2293
0
    const int tile_size_bytes = pbi->tile_size_bytes;
2294
0
    int tile_width, tile_height;
2295
0
    av1_get_uniform_tile_size(cm, &tile_width, &tile_height);
2296
0
    const int tile_copy_mode =
2297
0
        ((AOMMAX(tile_width, tile_height) << MI_SIZE_LOG2) <= 256) ? 1 : 0;
2298
    // Read tile column sizes for all columns (we need the last tile buffer)
2299
0
    for (int c = 0; c < tile_cols; ++c) {
2300
0
      const int is_last = c == tile_cols - 1;
2301
0
      size_t tile_col_size;
2302
2303
0
      if (!is_last) {
2304
0
        tile_col_size = mem_get_varsize(data, tile_col_size_bytes);
2305
0
        data += tile_col_size_bytes;
2306
0
        tile_col_data_end[c] = data + tile_col_size;
2307
0
      } else {
2308
0
        tile_col_size = data_end - data;
2309
0
        tile_col_data_end[c] = data_end;
2310
0
      }
2311
0
      data += tile_col_size;
2312
0
    }
2313
2314
0
    data = data_start;
2315
2316
    // Read the required tile sizes.
2317
0
    for (int c = tile_cols_start; c < tile_cols_end; ++c) {
2318
0
      const int is_last = c == tile_cols - 1;
2319
2320
0
      if (c > 0) data = tile_col_data_end[c - 1];
2321
2322
0
      if (!is_last) data += tile_col_size_bytes;
2323
2324
      // Get the whole of the last column, otherwise stop at the required tile.
2325
0
      for (int r = 0; r < (is_last ? tile_rows : tile_rows_end); ++r) {
2326
0
        get_ls_tile_buffer(tile_col_data_end[c], &pbi->error, &data,
2327
0
                           tile_buffers, tile_size_bytes, c, r, tile_copy_mode);
2328
0
      }
2329
0
    }
2330
2331
    // If we have not read the last column, then read it to get the last tile.
2332
0
    if (tile_cols_end != tile_cols) {
2333
0
      const int c = tile_cols - 1;
2334
2335
0
      data = tile_col_data_end[c - 1];
2336
2337
0
      for (int r = 0; r < tile_rows; ++r) {
2338
0
        get_ls_tile_buffer(tile_col_data_end[c], &pbi->error, &data,
2339
0
                           tile_buffers, tile_size_bytes, c, r, tile_copy_mode);
2340
0
      }
2341
0
    }
2342
0
    raw_data_end = data;
2343
0
  }
2344
0
  return raw_data_end;
2345
0
}
2346
#endif  // EXT_TILE_DEBUG
2347
2348
static const uint8_t *get_ls_single_tile_buffer(
2349
    AV1Decoder *pbi, const uint8_t *data,
2350
0
    TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
2351
0
  assert(pbi->dec_tile_row >= 0 && pbi->dec_tile_col >= 0);
2352
0
  tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].data = data;
2353
0
  tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].size =
2354
0
      (size_t)pbi->coded_tile_data_size;
2355
0
  return data + pbi->coded_tile_data_size;
2356
0
}
2357
2358
// Reads the next tile returning its size and adjusting '*data' accordingly
2359
// based on 'is_last'.
2360
static AOM_INLINE void get_tile_buffer(
2361
    const uint8_t *const data_end, const int tile_size_bytes, int is_last,
2362
    struct aom_internal_error_info *error_info, const uint8_t **data,
2363
0
    TileBufferDec *const buf) {
2364
0
  size_t size;
2365
2366
0
  if (!is_last) {
2367
0
    if (!read_is_valid(*data, tile_size_bytes, data_end))
2368
0
      aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2369
0
                         "Not enough data to read tile size");
2370
2371
0
    size = mem_get_varsize(*data, tile_size_bytes) + AV1_MIN_TILE_SIZE_BYTES;
2372
0
    *data += tile_size_bytes;
2373
2374
0
    if (size > (size_t)(data_end - *data))
2375
0
      aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2376
0
                         "Truncated packet or corrupt tile size");
2377
0
  } else {
2378
0
    size = data_end - *data;
2379
0
  }
2380
2381
0
  buf->data = *data;
2382
0
  buf->size = size;
2383
2384
0
  *data += size;
2385
0
}
2386
2387
static AOM_INLINE void get_tile_buffers(
2388
    AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
2389
    TileBufferDec (*const tile_buffers)[MAX_TILE_COLS], int start_tile,
2390
0
    int end_tile) {
2391
0
  AV1_COMMON *const cm = &pbi->common;
2392
0
  const int tile_cols = cm->tiles.cols;
2393
0
  const int tile_rows = cm->tiles.rows;
2394
0
  int tc = 0;
2395
2396
0
  for (int r = 0; r < tile_rows; ++r) {
2397
0
    for (int c = 0; c < tile_cols; ++c, ++tc) {
2398
0
      TileBufferDec *const buf = &tile_buffers[r][c];
2399
2400
0
      const int is_last = (tc == end_tile);
2401
0
      const size_t hdr_offset = 0;
2402
2403
0
      if (tc < start_tile || tc > end_tile) continue;
2404
2405
0
      if (data + hdr_offset >= data_end)
2406
0
        aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2407
0
                           "Data ended before all tiles were read.");
2408
0
      data += hdr_offset;
2409
0
      get_tile_buffer(data_end, pbi->tile_size_bytes, is_last, &pbi->error,
2410
0
                      &data, buf);
2411
0
    }
2412
0
  }
2413
0
}
2414
2415
static AOM_INLINE void set_cb_buffer(AV1Decoder *pbi, DecoderCodingBlock *dcb,
2416
                                     CB_BUFFER *cb_buffer_base,
2417
                                     const int num_planes, int mi_row,
2418
0
                                     int mi_col) {
2419
0
  AV1_COMMON *const cm = &pbi->common;
2420
0
  int mib_size_log2 = cm->seq_params->mib_size_log2;
2421
0
  int stride = (cm->mi_params.mi_cols >> mib_size_log2) + 1;
2422
0
  int offset = (mi_row >> mib_size_log2) * stride + (mi_col >> mib_size_log2);
2423
0
  CB_BUFFER *cb_buffer = cb_buffer_base + offset;
2424
2425
0
  for (int plane = 0; plane < num_planes; ++plane) {
2426
0
    dcb->dqcoeff_block[plane] = cb_buffer->dqcoeff[plane];
2427
0
    dcb->eob_data[plane] = cb_buffer->eob_data[plane];
2428
0
    dcb->cb_offset[plane] = 0;
2429
0
    dcb->txb_offset[plane] = 0;
2430
0
  }
2431
0
  MACROBLOCKD *const xd = &dcb->xd;
2432
0
  xd->plane[0].color_index_map = cb_buffer->color_index_map[0];
2433
0
  xd->plane[1].color_index_map = cb_buffer->color_index_map[1];
2434
0
  xd->color_index_map_offset[0] = 0;
2435
0
  xd->color_index_map_offset[1] = 0;
2436
0
}
2437
2438
static AOM_INLINE void decoder_alloc_tile_data(AV1Decoder *pbi,
2439
0
                                               const int n_tiles) {
2440
0
  AV1_COMMON *const cm = &pbi->common;
2441
0
  aom_free(pbi->tile_data);
2442
0
  CHECK_MEM_ERROR(cm, pbi->tile_data,
2443
0
                  aom_memalign(32, n_tiles * sizeof(*pbi->tile_data)));
2444
0
  pbi->allocated_tiles = n_tiles;
2445
0
  for (int i = 0; i < n_tiles; i++) {
2446
0
    TileDataDec *const tile_data = pbi->tile_data + i;
2447
0
    av1_zero(tile_data->dec_row_mt_sync);
2448
0
  }
2449
0
  pbi->allocated_row_mt_sync_rows = 0;
2450
0
}
2451
2452
// Set up nsync by width.
2453
0
static INLINE int get_sync_range(int width) {
2454
// nsync numbers are picked by testing.
2455
#if 0
2456
  if (width < 640)
2457
    return 1;
2458
  else if (width <= 1280)
2459
    return 2;
2460
  else if (width <= 4096)
2461
    return 4;
2462
  else
2463
    return 8;
2464
#else
2465
0
  (void)width;
2466
0
#endif
2467
0
  return 1;
2468
0
}
2469
2470
// Allocate memory for decoder row synchronization
2471
static AOM_INLINE void dec_row_mt_alloc(AV1DecRowMTSync *dec_row_mt_sync,
2472
0
                                        AV1_COMMON *cm, int rows) {
2473
0
  dec_row_mt_sync->allocated_sb_rows = rows;
2474
0
#if CONFIG_MULTITHREAD
2475
0
  {
2476
0
    int i;
2477
2478
0
    CHECK_MEM_ERROR(cm, dec_row_mt_sync->mutex_,
2479
0
                    aom_malloc(sizeof(*(dec_row_mt_sync->mutex_)) * rows));
2480
0
    if (dec_row_mt_sync->mutex_) {
2481
0
      for (i = 0; i < rows; ++i) {
2482
0
        pthread_mutex_init(&dec_row_mt_sync->mutex_[i], NULL);
2483
0
      }
2484
0
    }
2485
2486
0
    CHECK_MEM_ERROR(cm, dec_row_mt_sync->cond_,
2487
0
                    aom_malloc(sizeof(*(dec_row_mt_sync->cond_)) * rows));
2488
0
    if (dec_row_mt_sync->cond_) {
2489
0
      for (i = 0; i < rows; ++i) {
2490
0
        pthread_cond_init(&dec_row_mt_sync->cond_[i], NULL);
2491
0
      }
2492
0
    }
2493
0
  }
2494
0
#endif  // CONFIG_MULTITHREAD
2495
2496
0
  CHECK_MEM_ERROR(cm, dec_row_mt_sync->cur_sb_col,
2497
0
                  aom_malloc(sizeof(*(dec_row_mt_sync->cur_sb_col)) * rows));
2498
2499
  // Set up nsync.
2500
0
  dec_row_mt_sync->sync_range = get_sync_range(cm->width);
2501
0
}
2502
2503
// Deallocate decoder row synchronization related mutex and data
2504
0
void av1_dec_row_mt_dealloc(AV1DecRowMTSync *dec_row_mt_sync) {
2505
0
  if (dec_row_mt_sync != NULL) {
2506
0
#if CONFIG_MULTITHREAD
2507
0
    int i;
2508
0
    if (dec_row_mt_sync->mutex_ != NULL) {
2509
0
      for (i = 0; i < dec_row_mt_sync->allocated_sb_rows; ++i) {
2510
0
        pthread_mutex_destroy(&dec_row_mt_sync->mutex_[i]);
2511
0
      }
2512
0
      aom_free(dec_row_mt_sync->mutex_);
2513
0
    }
2514
0
    if (dec_row_mt_sync->cond_ != NULL) {
2515
0
      for (i = 0; i < dec_row_mt_sync->allocated_sb_rows; ++i) {
2516
0
        pthread_cond_destroy(&dec_row_mt_sync->cond_[i]);
2517
0
      }
2518
0
      aom_free(dec_row_mt_sync->cond_);
2519
0
    }
2520
0
#endif  // CONFIG_MULTITHREAD
2521
0
    aom_free(dec_row_mt_sync->cur_sb_col);
2522
2523
    // clear the structure as the source of this call may be a resize in which
2524
    // case this call will be followed by an _alloc() which may fail.
2525
0
    av1_zero(*dec_row_mt_sync);
2526
0
  }
2527
0
}
2528
2529
static INLINE void sync_read(AV1DecRowMTSync *const dec_row_mt_sync, int r,
2530
0
                             int c) {
2531
0
#if CONFIG_MULTITHREAD
2532
0
  const int nsync = dec_row_mt_sync->sync_range;
2533
2534
0
  if (r && !(c & (nsync - 1))) {
2535
0
    pthread_mutex_t *const mutex = &dec_row_mt_sync->mutex_[r - 1];
2536
0
    pthread_mutex_lock(mutex);
2537
2538
0
    while (c > dec_row_mt_sync->cur_sb_col[r - 1] - nsync) {
2539
0
      pthread_cond_wait(&dec_row_mt_sync->cond_[r - 1], mutex);
2540
0
    }
2541
0
    pthread_mutex_unlock(mutex);
2542
0
  }
2543
#else
2544
  (void)dec_row_mt_sync;
2545
  (void)r;
2546
  (void)c;
2547
#endif  // CONFIG_MULTITHREAD
2548
0
}
2549
2550
static INLINE void sync_write(AV1DecRowMTSync *const dec_row_mt_sync, int r,
2551
0
                              int c, const int sb_cols) {
2552
0
#if CONFIG_MULTITHREAD
2553
0
  const int nsync = dec_row_mt_sync->sync_range;
2554
0
  int cur;
2555
0
  int sig = 1;
2556
2557
0
  if (c < sb_cols - 1) {
2558
0
    cur = c;
2559
0
    if (c % nsync) sig = 0;
2560
0
  } else {
2561
0
    cur = sb_cols + nsync;
2562
0
  }
2563
2564
0
  if (sig) {
2565
0
    pthread_mutex_lock(&dec_row_mt_sync->mutex_[r]);
2566
2567
0
    dec_row_mt_sync->cur_sb_col[r] = cur;
2568
2569
0
    pthread_cond_signal(&dec_row_mt_sync->cond_[r]);
2570
0
    pthread_mutex_unlock(&dec_row_mt_sync->mutex_[r]);
2571
0
  }
2572
#else
2573
  (void)dec_row_mt_sync;
2574
  (void)r;
2575
  (void)c;
2576
  (void)sb_cols;
2577
#endif  // CONFIG_MULTITHREAD
2578
0
}
2579
2580
static AOM_INLINE void decode_tile_sb_row(AV1Decoder *pbi, ThreadData *const td,
2581
                                          TileInfo tile_info,
2582
0
                                          const int mi_row) {
2583
0
  AV1_COMMON *const cm = &pbi->common;
2584
0
  const int num_planes = av1_num_planes(cm);
2585
0
  TileDataDec *const tile_data =
2586
0
      pbi->tile_data + tile_info.tile_row * cm->tiles.cols + tile_info.tile_col;
2587
0
  const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_info);
2588
0
  const int sb_row_in_tile =
2589
0
      (mi_row - tile_info.mi_row_start) >> cm->seq_params->mib_size_log2;
2590
0
  int sb_col_in_tile = 0;
2591
2592
0
  for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
2593
0
       mi_col += cm->seq_params->mib_size, sb_col_in_tile++) {
2594
0
    set_cb_buffer(pbi, &td->dcb, pbi->cb_buffer_base, num_planes, mi_row,
2595
0
                  mi_col);
2596
2597
0
    sync_read(&tile_data->dec_row_mt_sync, sb_row_in_tile, sb_col_in_tile);
2598
2599
    // Decoding of the super-block
2600
0
    decode_partition(pbi, td, mi_row, mi_col, td->bit_reader,
2601
0
                     cm->seq_params->sb_size, 0x2);
2602
2603
0
    sync_write(&tile_data->dec_row_mt_sync, sb_row_in_tile, sb_col_in_tile,
2604
0
               sb_cols_in_tile);
2605
0
  }
2606
0
}
2607
2608
0
static int check_trailing_bits_after_symbol_coder(aom_reader *r) {
2609
0
  if (aom_reader_has_overflowed(r)) return -1;
2610
2611
0
  uint32_t nb_bits = aom_reader_tell(r);
2612
0
  uint32_t nb_bytes = (nb_bits + 7) >> 3;
2613
0
  const uint8_t *p = aom_reader_find_begin(r) + nb_bytes;
2614
2615
  // aom_reader_tell() returns 1 for a newly initialized decoder, and the
2616
  // return value only increases as values are decoded. So nb_bits > 0, and
2617
  // thus p > p_begin. Therefore accessing p[-1] is safe.
2618
0
  uint8_t last_byte = p[-1];
2619
0
  uint8_t pattern = 128 >> ((nb_bits - 1) & 7);
2620
0
  if ((last_byte & (2 * pattern - 1)) != pattern) return -1;
2621
2622
  // Make sure that all padding bytes are zero as required by the spec.
2623
0
  const uint8_t *p_end = aom_reader_find_end(r);
2624
0
  while (p < p_end) {
2625
0
    if (*p != 0) return -1;
2626
0
    p++;
2627
0
  }
2628
0
  return 0;
2629
0
}
2630
2631
static AOM_INLINE void set_decode_func_pointers(ThreadData *td,
2632
0
                                                int parse_decode_flag) {
2633
0
  td->read_coeffs_tx_intra_block_visit = decode_block_void;
2634
0
  td->predict_and_recon_intra_block_visit = decode_block_void;
2635
0
  td->read_coeffs_tx_inter_block_visit = decode_block_void;
2636
0
  td->inverse_tx_inter_block_visit = decode_block_void;
2637
0
  td->predict_inter_block_visit = predict_inter_block_void;
2638
0
  td->cfl_store_inter_block_visit = cfl_store_inter_block_void;
2639
2640
0
  if (parse_decode_flag & 0x1) {
2641
0
    td->read_coeffs_tx_intra_block_visit = read_coeffs_tx_intra_block;
2642
0
    td->read_coeffs_tx_inter_block_visit = av1_read_coeffs_txb_facade;
2643
0
  }
2644
0
  if (parse_decode_flag & 0x2) {
2645
0
    td->predict_and_recon_intra_block_visit =
2646
0
        predict_and_reconstruct_intra_block;
2647
0
    td->inverse_tx_inter_block_visit = inverse_transform_inter_block;
2648
0
    td->predict_inter_block_visit = predict_inter_block;
2649
0
    td->cfl_store_inter_block_visit = cfl_store_inter_block;
2650
0
  }
2651
0
}
2652
2653
static AOM_INLINE void decode_tile(AV1Decoder *pbi, ThreadData *const td,
2654
0
                                   int tile_row, int tile_col) {
2655
0
  TileInfo tile_info;
2656
2657
0
  AV1_COMMON *const cm = &pbi->common;
2658
0
  const int num_planes = av1_num_planes(cm);
2659
2660
0
  av1_tile_set_row(&tile_info, cm, tile_row);
2661
0
  av1_tile_set_col(&tile_info, cm, tile_col);
2662
0
  DecoderCodingBlock *const dcb = &td->dcb;
2663
0
  MACROBLOCKD *const xd = &dcb->xd;
2664
2665
0
  av1_zero_above_context(cm, xd, tile_info.mi_col_start, tile_info.mi_col_end,
2666
0
                         tile_row);
2667
0
  av1_reset_loop_filter_delta(xd, num_planes);
2668
0
  av1_reset_loop_restoration(xd, num_planes);
2669
2670
0
  for (int mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
2671
0
       mi_row += cm->seq_params->mib_size) {
2672
0
    av1_zero_left_context(xd);
2673
2674
0
    for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
2675
0
         mi_col += cm->seq_params->mib_size) {
2676
0
      set_cb_buffer(pbi, dcb, &td->cb_buffer_base, num_planes, 0, 0);
2677
2678
      // Bit-stream parsing and decoding of the superblock
2679
0
      decode_partition(pbi, td, mi_row, mi_col, td->bit_reader,
2680
0
                       cm->seq_params->sb_size, 0x3);
2681
2682
0
      if (aom_reader_has_overflowed(td->bit_reader)) {
2683
0
        aom_merge_corrupted_flag(&dcb->corrupted, 1);
2684
0
        return;
2685
0
      }
2686
0
    }
2687
0
  }
2688
2689
0
  int corrupted =
2690
0
      (check_trailing_bits_after_symbol_coder(td->bit_reader)) ? 1 : 0;
2691
0
  aom_merge_corrupted_flag(&dcb->corrupted, corrupted);
2692
0
}
2693
2694
static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
2695
                                   const uint8_t *data_end, int start_tile,
2696
0
                                   int end_tile) {
2697
0
  AV1_COMMON *const cm = &pbi->common;
2698
0
  ThreadData *const td = &pbi->td;
2699
0
  CommonTileParams *const tiles = &cm->tiles;
2700
0
  const int tile_cols = tiles->cols;
2701
0
  const int tile_rows = tiles->rows;
2702
0
  const int n_tiles = tile_cols * tile_rows;
2703
0
  TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
2704
0
  const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
2705
0
  const int single_row = pbi->dec_tile_row >= 0;
2706
0
  const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
2707
0
  const int single_col = pbi->dec_tile_col >= 0;
2708
0
  int tile_rows_start;
2709
0
  int tile_rows_end;
2710
0
  int tile_cols_start;
2711
0
  int tile_cols_end;
2712
0
  int inv_col_order;
2713
0
  int inv_row_order;
2714
0
  int tile_row, tile_col;
2715
0
  uint8_t allow_update_cdf;
2716
0
  const uint8_t *raw_data_end = NULL;
2717
2718
0
  if (tiles->large_scale) {
2719
0
    tile_rows_start = single_row ? dec_tile_row : 0;
2720
0
    tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
2721
0
    tile_cols_start = single_col ? dec_tile_col : 0;
2722
0
    tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
2723
0
    inv_col_order = pbi->inv_tile_order && !single_col;
2724
0
    inv_row_order = pbi->inv_tile_order && !single_row;
2725
0
    allow_update_cdf = 0;
2726
0
  } else {
2727
0
    tile_rows_start = 0;
2728
0
    tile_rows_end = tile_rows;
2729
0
    tile_cols_start = 0;
2730
0
    tile_cols_end = tile_cols;
2731
0
    inv_col_order = pbi->inv_tile_order;
2732
0
    inv_row_order = pbi->inv_tile_order;
2733
0
    allow_update_cdf = 1;
2734
0
  }
2735
2736
  // No tiles to decode.
2737
0
  if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start ||
2738
      // First tile is larger than end_tile.
2739
0
      tile_rows_start * tiles->cols + tile_cols_start > end_tile ||
2740
      // Last tile is smaller than start_tile.
2741
0
      (tile_rows_end - 1) * tiles->cols + tile_cols_end - 1 < start_tile)
2742
0
    return data;
2743
2744
0
  allow_update_cdf = allow_update_cdf && !cm->features.disable_cdf_update;
2745
2746
0
  assert(tile_rows <= MAX_TILE_ROWS);
2747
0
  assert(tile_cols <= MAX_TILE_COLS);
2748
2749
0
#if EXT_TILE_DEBUG
2750
0
  if (tiles->large_scale && !pbi->ext_tile_debug)
2751
0
    raw_data_end = get_ls_single_tile_buffer(pbi, data, tile_buffers);
2752
0
  else if (tiles->large_scale && pbi->ext_tile_debug)
2753
0
    raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
2754
0
  else
2755
0
#endif  // EXT_TILE_DEBUG
2756
0
    get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile);
2757
2758
0
  if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
2759
0
    decoder_alloc_tile_data(pbi, n_tiles);
2760
0
  }
2761
0
  if (pbi->dcb.xd.seg_mask == NULL)
2762
0
    CHECK_MEM_ERROR(cm, pbi->dcb.xd.seg_mask,
2763
0
                    (uint8_t *)aom_memalign(
2764
0
                        16, 2 * MAX_SB_SQUARE * sizeof(*pbi->dcb.xd.seg_mask)));
2765
#if CONFIG_ACCOUNTING
2766
  if (pbi->acct_enabled) {
2767
    aom_accounting_reset(&pbi->accounting);
2768
  }
2769
#endif
2770
2771
0
  set_decode_func_pointers(&pbi->td, 0x3);
2772
2773
  // Load all tile information into thread_data.
2774
0
  td->dcb = pbi->dcb;
2775
2776
0
  td->dcb.corrupted = 0;
2777
0
  td->dcb.mc_buf[0] = td->mc_buf[0];
2778
0
  td->dcb.mc_buf[1] = td->mc_buf[1];
2779
0
  td->dcb.xd.tmp_conv_dst = td->tmp_conv_dst;
2780
0
  for (int j = 0; j < 2; ++j) {
2781
0
    td->dcb.xd.tmp_obmc_bufs[j] = td->tmp_obmc_bufs[j];
2782
0
  }
2783
2784
0
  for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
2785
0
    const int row = inv_row_order ? tile_rows - 1 - tile_row : tile_row;
2786
2787
0
    for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
2788
0
      const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
2789
0
      TileDataDec *const tile_data = pbi->tile_data + row * tiles->cols + col;
2790
0
      const TileBufferDec *const tile_bs_buf = &tile_buffers[row][col];
2791
2792
0
      if (row * tiles->cols + col < start_tile ||
2793
0
          row * tiles->cols + col > end_tile)
2794
0
        continue;
2795
2796
0
      td->bit_reader = &tile_data->bit_reader;
2797
0
      av1_zero(td->cb_buffer_base.dqcoeff);
2798
0
      av1_tile_init(&td->dcb.xd.tile, cm, row, col);
2799
0
      td->dcb.xd.current_base_qindex = cm->quant_params.base_qindex;
2800
0
      setup_bool_decoder(tile_bs_buf->data, data_end, tile_bs_buf->size,
2801
0
                         &pbi->error, td->bit_reader, allow_update_cdf);
2802
#if CONFIG_ACCOUNTING
2803
      if (pbi->acct_enabled) {
2804
        td->bit_reader->accounting = &pbi->accounting;
2805
        td->bit_reader->accounting->last_tell_frac =
2806
            aom_reader_tell_frac(td->bit_reader);
2807
      } else {
2808
        td->bit_reader->accounting = NULL;
2809
      }
2810
#endif
2811
0
      av1_init_macroblockd(cm, &td->dcb.xd);
2812
0
      av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), row,
2813
0
                             &td->dcb.xd);
2814
2815
      // Initialise the tile context from the frame context
2816
0
      tile_data->tctx = *cm->fc;
2817
0
      td->dcb.xd.tile_ctx = &tile_data->tctx;
2818
2819
      // decode tile
2820
0
      decode_tile(pbi, td, row, col);
2821
0
      aom_merge_corrupted_flag(&pbi->dcb.corrupted, td->dcb.corrupted);
2822
0
      if (pbi->dcb.corrupted)
2823
0
        aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
2824
0
                           "Failed to decode tile data");
2825
0
    }
2826
0
  }
2827
2828
0
  if (tiles->large_scale) {
2829
0
    if (n_tiles == 1) {
2830
      // Find the end of the single tile buffer
2831
0
      return aom_reader_find_end(&pbi->tile_data->bit_reader);
2832
0
    }
2833
    // Return the end of the last tile buffer
2834
0
    return raw_data_end;
2835
0
  }
2836
0
  TileDataDec *const tile_data = pbi->tile_data + end_tile;
2837
2838
0
  return aom_reader_find_end(&tile_data->bit_reader);
2839
0
}
2840
2841
0
static TileJobsDec *get_dec_job_info(AV1DecTileMT *tile_mt_info) {
2842
0
  TileJobsDec *cur_job_info = NULL;
2843
0
#if CONFIG_MULTITHREAD
2844
0
  pthread_mutex_lock(tile_mt_info->job_mutex);
2845
2846
0
  if (tile_mt_info->jobs_dequeued < tile_mt_info->jobs_enqueued) {
2847
0
    cur_job_info = tile_mt_info->job_queue + tile_mt_info->jobs_dequeued;
2848
0
    tile_mt_info->jobs_dequeued++;
2849
0
  }
2850
2851
0
  pthread_mutex_unlock(tile_mt_info->job_mutex);
2852
#else
2853
  (void)tile_mt_info;
2854
#endif
2855
0
  return cur_job_info;
2856
0
}
2857
2858
static AOM_INLINE void tile_worker_hook_init(
2859
    AV1Decoder *const pbi, DecWorkerData *const thread_data,
2860
    const TileBufferDec *const tile_buffer, TileDataDec *const tile_data,
2861
0
    uint8_t allow_update_cdf) {
2862
0
  AV1_COMMON *cm = &pbi->common;
2863
0
  ThreadData *const td = thread_data->td;
2864
0
  int tile_row = tile_data->tile_info.tile_row;
2865
0
  int tile_col = tile_data->tile_info.tile_col;
2866
2867
0
  td->bit_reader = &tile_data->bit_reader;
2868
0
  av1_zero(td->cb_buffer_base.dqcoeff);
2869
2870
0
  MACROBLOCKD *const xd = &td->dcb.xd;
2871
0
  av1_tile_init(&xd->tile, cm, tile_row, tile_col);
2872
0
  xd->current_base_qindex = cm->quant_params.base_qindex;
2873
0
  setup_bool_decoder(tile_buffer->data, thread_data->data_end,
2874
0
                     tile_buffer->size, &thread_data->error_info,
2875
0
                     td->bit_reader, allow_update_cdf);
2876
#if CONFIG_ACCOUNTING
2877
  if (pbi->acct_enabled) {
2878
    td->bit_reader->accounting = &pbi->accounting;
2879
    td->bit_reader->accounting->last_tell_frac =
2880
        aom_reader_tell_frac(td->bit_reader);
2881
  } else {
2882
    td->bit_reader->accounting = NULL;
2883
  }
2884
#endif
2885
0
  av1_init_macroblockd(cm, xd);
2886
0
  xd->error_info = &thread_data->error_info;
2887
0
  av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), tile_row, xd);
2888
2889
  // Initialise the tile context from the frame context
2890
0
  tile_data->tctx = *cm->fc;
2891
0
  xd->tile_ctx = &tile_data->tctx;
2892
#if CONFIG_ACCOUNTING
2893
  if (pbi->acct_enabled) {
2894
    tile_data->bit_reader.accounting->last_tell_frac =
2895
        aom_reader_tell_frac(&tile_data->bit_reader);
2896
  }
2897
#endif
2898
0
}
2899
2900
0
static int tile_worker_hook(void *arg1, void *arg2) {
2901
0
  DecWorkerData *const thread_data = (DecWorkerData *)arg1;
2902
0
  AV1Decoder *const pbi = (AV1Decoder *)arg2;
2903
0
  AV1_COMMON *cm = &pbi->common;
2904
0
  ThreadData *const td = thread_data->td;
2905
0
  uint8_t allow_update_cdf;
2906
2907
  // The jmp_buf is valid only for the duration of the function that calls
2908
  // setjmp(). Therefore, this function must reset the 'setjmp' field to 0
2909
  // before it returns.
2910
0
  if (setjmp(thread_data->error_info.jmp)) {
2911
0
    thread_data->error_info.setjmp = 0;
2912
0
    thread_data->td->dcb.corrupted = 1;
2913
0
    return 0;
2914
0
  }
2915
0
  thread_data->error_info.setjmp = 1;
2916
2917
0
  allow_update_cdf = cm->tiles.large_scale ? 0 : 1;
2918
0
  allow_update_cdf = allow_update_cdf && !cm->features.disable_cdf_update;
2919
2920
0
  set_decode_func_pointers(td, 0x3);
2921
2922
0
  assert(cm->tiles.cols > 0);
2923
0
  while (!td->dcb.corrupted) {
2924
0
    TileJobsDec *cur_job_info = get_dec_job_info(&pbi->tile_mt_info);
2925
2926
0
    if (cur_job_info != NULL) {
2927
0
      const TileBufferDec *const tile_buffer = cur_job_info->tile_buffer;
2928
0
      TileDataDec *const tile_data = cur_job_info->tile_data;
2929
0
      tile_worker_hook_init(pbi, thread_data, tile_buffer, tile_data,
2930
0
                            allow_update_cdf);
2931
      // decode tile
2932
0
      int tile_row = tile_data->tile_info.tile_row;
2933
0
      int tile_col = tile_data->tile_info.tile_col;
2934
0
      decode_tile(pbi, td, tile_row, tile_col);
2935
0
    } else {
2936
0
      break;
2937
0
    }
2938
0
  }
2939
0
  thread_data->error_info.setjmp = 0;
2940
0
  return !td->dcb.corrupted;
2941
0
}
2942
2943
static INLINE int get_max_row_mt_workers_per_tile(AV1_COMMON *cm,
2944
0
                                                  TileInfo tile) {
2945
  // NOTE: Currently value of max workers is calculated based
2946
  // on the parse and decode time. As per the theoretical estimate
2947
  // when percentage of parse time is equal to percentage of decode
2948
  // time, number of workers needed to parse + decode a tile can not
2949
  // exceed more than 2.
2950
  // TODO(any): Modify this value if parsing is optimized in future.
2951
0
  int sb_rows = av1_get_sb_rows_in_tile(cm, tile);
2952
0
  int max_workers =
2953
0
      sb_rows == 1 ? AOM_MIN_THREADS_PER_TILE : AOM_MAX_THREADS_PER_TILE;
2954
0
  return max_workers;
2955
0
}
2956
2957
// The caller must hold pbi->row_mt_mutex_ when calling this function.
2958
// Returns 1 if either the next job is stored in *next_job_info or 1 is stored
2959
// in *end_of_frame.
2960
// NOTE: The caller waits on pbi->row_mt_cond_ if this function returns 0.
2961
// The return value of this function depends on the following variables:
2962
// - frame_row_mt_info->mi_rows_parse_done
2963
// - frame_row_mt_info->mi_rows_decode_started
2964
// - frame_row_mt_info->row_mt_exit
2965
// Therefore we may need to signal or broadcast pbi->row_mt_cond_ if any of
2966
// these variables is modified.
2967
static int get_next_job_info(AV1Decoder *const pbi,
2968
                             AV1DecRowMTJobInfo *next_job_info,
2969
0
                             int *end_of_frame) {
2970
0
  AV1_COMMON *cm = &pbi->common;
2971
0
  TileDataDec *tile_data;
2972
0
  AV1DecRowMTSync *dec_row_mt_sync;
2973
0
  AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
2974
0
  TileInfo tile_info;
2975
0
  const int tile_rows_start = frame_row_mt_info->tile_rows_start;
2976
0
  const int tile_rows_end = frame_row_mt_info->tile_rows_end;
2977
0
  const int tile_cols_start = frame_row_mt_info->tile_cols_start;
2978
0
  const int tile_cols_end = frame_row_mt_info->tile_cols_end;
2979
0
  const int start_tile = frame_row_mt_info->start_tile;
2980
0
  const int end_tile = frame_row_mt_info->end_tile;
2981
0
  const int sb_mi_size = mi_size_wide[cm->seq_params->sb_size];
2982
0
  int num_mis_to_decode, num_threads_working;
2983
0
  int num_mis_waiting_for_decode;
2984
0
  int min_threads_working = INT_MAX;
2985
0
  int max_mis_to_decode = 0;
2986
0
  int tile_row_idx, tile_col_idx;
2987
0
  int tile_row = -1;
2988
0
  int tile_col = -1;
2989
2990
0
  memset(next_job_info, 0, sizeof(*next_job_info));
2991
2992
  // Frame decode is completed or error is encountered.
2993
0
  *end_of_frame = (frame_row_mt_info->mi_rows_decode_started ==
2994
0
                   frame_row_mt_info->mi_rows_to_decode) ||
2995
0
                  (frame_row_mt_info->row_mt_exit == 1);
2996
0
  if (*end_of_frame) {
2997
0
    return 1;
2998
0
  }
2999
3000
  // Decoding cannot start as bit-stream parsing is not complete.
3001
0
  assert(frame_row_mt_info->mi_rows_parse_done >=
3002
0
         frame_row_mt_info->mi_rows_decode_started);
3003
0
  if (frame_row_mt_info->mi_rows_parse_done ==
3004
0
      frame_row_mt_info->mi_rows_decode_started)
3005
0
    return 0;
3006
3007
  // Choose the tile to decode.
3008
0
  for (tile_row_idx = tile_rows_start; tile_row_idx < tile_rows_end;
3009
0
       ++tile_row_idx) {
3010
0
    for (tile_col_idx = tile_cols_start; tile_col_idx < tile_cols_end;
3011
0
         ++tile_col_idx) {
3012
0
      if (tile_row_idx * cm->tiles.cols + tile_col_idx < start_tile ||
3013
0
          tile_row_idx * cm->tiles.cols + tile_col_idx > end_tile)
3014
0
        continue;
3015
3016
0
      tile_data = pbi->tile_data + tile_row_idx * cm->tiles.cols + tile_col_idx;
3017
0
      dec_row_mt_sync = &tile_data->dec_row_mt_sync;
3018
3019
0
      num_threads_working = dec_row_mt_sync->num_threads_working;
3020
0
      num_mis_waiting_for_decode = (dec_row_mt_sync->mi_rows_parse_done -
3021
0
                                    dec_row_mt_sync->mi_rows_decode_started) *
3022
0
                                   dec_row_mt_sync->mi_cols;
3023
0
      num_mis_to_decode =
3024
0
          (dec_row_mt_sync->mi_rows - dec_row_mt_sync->mi_rows_decode_started) *
3025
0
          dec_row_mt_sync->mi_cols;
3026
3027
0
      assert(num_mis_to_decode >= num_mis_waiting_for_decode);
3028
3029
      // Pick the tile which has minimum number of threads working on it.
3030
0
      if (num_mis_waiting_for_decode > 0) {
3031
0
        if (num_threads_working < min_threads_working) {
3032
0
          min_threads_working = num_threads_working;
3033
0
          max_mis_to_decode = 0;
3034
0
        }
3035
0
        if (num_threads_working == min_threads_working &&
3036
0
            num_mis_to_decode > max_mis_to_decode &&
3037
0
            num_threads_working <
3038
0
                get_max_row_mt_workers_per_tile(cm, tile_data->tile_info)) {
3039
0
          max_mis_to_decode = num_mis_to_decode;
3040
0
          tile_row = tile_row_idx;
3041
0
          tile_col = tile_col_idx;
3042
0
        }
3043
0
      }
3044
0
    }
3045
0
  }
3046
  // No job found to process
3047
0
  if (tile_row == -1 || tile_col == -1) return 0;
3048
3049
0
  tile_data = pbi->tile_data + tile_row * cm->tiles.cols + tile_col;
3050
0
  tile_info = tile_data->tile_info;
3051
0
  dec_row_mt_sync = &tile_data->dec_row_mt_sync;
3052
3053
0
  next_job_info->tile_row = tile_row;
3054
0
  next_job_info->tile_col = tile_col;
3055
0
  next_job_info->mi_row =
3056
0
      dec_row_mt_sync->mi_rows_decode_started + tile_info.mi_row_start;
3057
3058
0
  dec_row_mt_sync->num_threads_working++;
3059
0
  dec_row_mt_sync->mi_rows_decode_started += sb_mi_size;
3060
0
  frame_row_mt_info->mi_rows_decode_started += sb_mi_size;
3061
0
  assert(frame_row_mt_info->mi_rows_parse_done >=
3062
0
         frame_row_mt_info->mi_rows_decode_started);
3063
0
#if CONFIG_MULTITHREAD
3064
0
  if (frame_row_mt_info->mi_rows_decode_started ==
3065
0
      frame_row_mt_info->mi_rows_to_decode) {
3066
0
    pthread_cond_broadcast(pbi->row_mt_cond_);
3067
0
  }
3068
0
#endif
3069
3070
0
  return 1;
3071
0
}
3072
3073
static INLINE void signal_parse_sb_row_done(AV1Decoder *const pbi,
3074
                                            TileDataDec *const tile_data,
3075
0
                                            const int sb_mi_size) {
3076
0
  AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3077
0
#if CONFIG_MULTITHREAD
3078
0
  pthread_mutex_lock(pbi->row_mt_mutex_);
3079
0
#endif
3080
0
  assert(frame_row_mt_info->mi_rows_parse_done >=
3081
0
         frame_row_mt_info->mi_rows_decode_started);
3082
0
  tile_data->dec_row_mt_sync.mi_rows_parse_done += sb_mi_size;
3083
0
  frame_row_mt_info->mi_rows_parse_done += sb_mi_size;
3084
0
#if CONFIG_MULTITHREAD
3085
  // A new decode job is available. Wake up one worker thread to handle the
3086
  // new decode job.
3087
  // NOTE: This assumes we bump mi_rows_parse_done and mi_rows_decode_started
3088
  // by the same increment (sb_mi_size).
3089
0
  pthread_cond_signal(pbi->row_mt_cond_);
3090
0
  pthread_mutex_unlock(pbi->row_mt_mutex_);
3091
0
#endif
3092
0
}
3093
3094
// This function is very similar to decode_tile(). It would be good to figure
3095
// out how to share code.
3096
static AOM_INLINE void parse_tile_row_mt(AV1Decoder *pbi, ThreadData *const td,
3097
0
                                         TileDataDec *const tile_data) {
3098
0
  AV1_COMMON *const cm = &pbi->common;
3099
0
  const int sb_mi_size = mi_size_wide[cm->seq_params->sb_size];
3100
0
  const int num_planes = av1_num_planes(cm);
3101
0
  TileInfo tile_info = tile_data->tile_info;
3102
0
  int tile_row = tile_info.tile_row;
3103
0
  DecoderCodingBlock *const dcb = &td->dcb;
3104
0
  MACROBLOCKD *const xd = &dcb->xd;
3105
3106
0
  av1_zero_above_context(cm, xd, tile_info.mi_col_start, tile_info.mi_col_end,
3107
0
                         tile_row);
3108
0
  av1_reset_loop_filter_delta(xd, num_planes);
3109
0
  av1_reset_loop_restoration(xd, num_planes);
3110
3111
0
  for (int mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
3112
0
       mi_row += cm->seq_params->mib_size) {
3113
0
    av1_zero_left_context(xd);
3114
3115
0
    for (int mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
3116
0
         mi_col += cm->seq_params->mib_size) {
3117
0
      set_cb_buffer(pbi, dcb, pbi->cb_buffer_base, num_planes, mi_row, mi_col);
3118
3119
      // Bit-stream parsing of the superblock
3120
0
      decode_partition(pbi, td, mi_row, mi_col, td->bit_reader,
3121
0
                       cm->seq_params->sb_size, 0x1);
3122
3123
0
      if (aom_reader_has_overflowed(td->bit_reader)) {
3124
0
        aom_merge_corrupted_flag(&dcb->corrupted, 1);
3125
0
        return;
3126
0
      }
3127
0
    }
3128
0
    signal_parse_sb_row_done(pbi, tile_data, sb_mi_size);
3129
0
  }
3130
3131
0
  int corrupted =
3132
0
      (check_trailing_bits_after_symbol_coder(td->bit_reader)) ? 1 : 0;
3133
0
  aom_merge_corrupted_flag(&dcb->corrupted, corrupted);
3134
0
}
3135
3136
0
static int row_mt_worker_hook(void *arg1, void *arg2) {
3137
0
  DecWorkerData *const thread_data = (DecWorkerData *)arg1;
3138
0
  AV1Decoder *const pbi = (AV1Decoder *)arg2;
3139
0
  AV1_COMMON *cm = &pbi->common;
3140
0
  ThreadData *const td = thread_data->td;
3141
0
  uint8_t allow_update_cdf;
3142
0
  AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3143
0
  td->dcb.corrupted = 0;
3144
3145
  // The jmp_buf is valid only for the duration of the function that calls
3146
  // setjmp(). Therefore, this function must reset the 'setjmp' field to 0
3147
  // before it returns.
3148
0
  if (setjmp(thread_data->error_info.jmp)) {
3149
0
    thread_data->error_info.setjmp = 0;
3150
0
    thread_data->td->dcb.corrupted = 1;
3151
0
#if CONFIG_MULTITHREAD
3152
0
    pthread_mutex_lock(pbi->row_mt_mutex_);
3153
0
#endif
3154
0
    frame_row_mt_info->row_mt_exit = 1;
3155
0
#if CONFIG_MULTITHREAD
3156
0
    pthread_cond_broadcast(pbi->row_mt_cond_);
3157
0
    pthread_mutex_unlock(pbi->row_mt_mutex_);
3158
0
#endif
3159
0
    return 0;
3160
0
  }
3161
0
  thread_data->error_info.setjmp = 1;
3162
3163
0
  allow_update_cdf = cm->tiles.large_scale ? 0 : 1;
3164
0
  allow_update_cdf = allow_update_cdf && !cm->features.disable_cdf_update;
3165
3166
0
  set_decode_func_pointers(td, 0x1);
3167
3168
0
  assert(cm->tiles.cols > 0);
3169
0
  while (!td->dcb.corrupted) {
3170
0
    TileJobsDec *cur_job_info = get_dec_job_info(&pbi->tile_mt_info);
3171
3172
0
    if (cur_job_info != NULL) {
3173
0
      const TileBufferDec *const tile_buffer = cur_job_info->tile_buffer;
3174
0
      TileDataDec *const tile_data = cur_job_info->tile_data;
3175
0
      tile_worker_hook_init(pbi, thread_data, tile_buffer, tile_data,
3176
0
                            allow_update_cdf);
3177
0
#if CONFIG_MULTITHREAD
3178
0
      pthread_mutex_lock(pbi->row_mt_mutex_);
3179
0
#endif
3180
0
      tile_data->dec_row_mt_sync.num_threads_working++;
3181
0
#if CONFIG_MULTITHREAD
3182
0
      pthread_mutex_unlock(pbi->row_mt_mutex_);
3183
0
#endif
3184
      // decode tile
3185
0
      parse_tile_row_mt(pbi, td, tile_data);
3186
0
#if CONFIG_MULTITHREAD
3187
0
      pthread_mutex_lock(pbi->row_mt_mutex_);
3188
0
#endif
3189
0
      tile_data->dec_row_mt_sync.num_threads_working--;
3190
0
#if CONFIG_MULTITHREAD
3191
0
      pthread_mutex_unlock(pbi->row_mt_mutex_);
3192
0
#endif
3193
0
    } else {
3194
0
      break;
3195
0
    }
3196
0
  }
3197
3198
0
  if (td->dcb.corrupted) {
3199
0
    thread_data->error_info.setjmp = 0;
3200
0
#if CONFIG_MULTITHREAD
3201
0
    pthread_mutex_lock(pbi->row_mt_mutex_);
3202
0
#endif
3203
0
    frame_row_mt_info->row_mt_exit = 1;
3204
0
#if CONFIG_MULTITHREAD
3205
0
    pthread_cond_broadcast(pbi->row_mt_cond_);
3206
0
    pthread_mutex_unlock(pbi->row_mt_mutex_);
3207
0
#endif
3208
0
    return 0;
3209
0
  }
3210
3211
0
  set_decode_func_pointers(td, 0x2);
3212
3213
0
  while (1) {
3214
0
    AV1DecRowMTJobInfo next_job_info;
3215
0
    int end_of_frame = 0;
3216
3217
0
#if CONFIG_MULTITHREAD
3218
0
    pthread_mutex_lock(pbi->row_mt_mutex_);
3219
0
#endif
3220
0
    while (!get_next_job_info(pbi, &next_job_info, &end_of_frame)) {
3221
0
#if CONFIG_MULTITHREAD
3222
0
      pthread_cond_wait(pbi->row_mt_cond_, pbi->row_mt_mutex_);
3223
0
#endif
3224
0
    }
3225
0
#if CONFIG_MULTITHREAD
3226
0
    pthread_mutex_unlock(pbi->row_mt_mutex_);
3227
0
#endif
3228
3229
0
    if (end_of_frame) break;
3230
3231
0
    int tile_row = next_job_info.tile_row;
3232
0
    int tile_col = next_job_info.tile_col;
3233
0
    int mi_row = next_job_info.mi_row;
3234
3235
0
    TileDataDec *tile_data =
3236
0
        pbi->tile_data + tile_row * cm->tiles.cols + tile_col;
3237
0
    AV1DecRowMTSync *dec_row_mt_sync = &tile_data->dec_row_mt_sync;
3238
0
    TileInfo tile_info = tile_data->tile_info;
3239
3240
0
    av1_tile_init(&td->dcb.xd.tile, cm, tile_row, tile_col);
3241
0
    av1_init_macroblockd(cm, &td->dcb.xd);
3242
0
    td->dcb.xd.error_info = &thread_data->error_info;
3243
3244
0
    decode_tile_sb_row(pbi, td, tile_info, mi_row);
3245
3246
0
#if CONFIG_MULTITHREAD
3247
0
    pthread_mutex_lock(pbi->row_mt_mutex_);
3248
0
#endif
3249
0
    dec_row_mt_sync->num_threads_working--;
3250
0
#if CONFIG_MULTITHREAD
3251
0
    pthread_mutex_unlock(pbi->row_mt_mutex_);
3252
0
#endif
3253
0
  }
3254
0
  thread_data->error_info.setjmp = 0;
3255
0
  return !td->dcb.corrupted;
3256
0
}
3257
3258
// sorts in descending order
3259
0
static int compare_tile_buffers(const void *a, const void *b) {
3260
0
  const TileJobsDec *const buf1 = (const TileJobsDec *)a;
3261
0
  const TileJobsDec *const buf2 = (const TileJobsDec *)b;
3262
0
  return (((int)buf2->tile_buffer->size) - ((int)buf1->tile_buffer->size));
3263
0
}
3264
3265
static AOM_INLINE void enqueue_tile_jobs(AV1Decoder *pbi, AV1_COMMON *cm,
3266
                                         int tile_rows_start, int tile_rows_end,
3267
                                         int tile_cols_start, int tile_cols_end,
3268
0
                                         int start_tile, int end_tile) {
3269
0
  AV1DecTileMT *tile_mt_info = &pbi->tile_mt_info;
3270
0
  TileJobsDec *tile_job_queue = tile_mt_info->job_queue;
3271
0
  tile_mt_info->jobs_enqueued = 0;
3272
0
  tile_mt_info->jobs_dequeued = 0;
3273
3274
0
  for (int row = tile_rows_start; row < tile_rows_end; row++) {
3275
0
    for (int col = tile_cols_start; col < tile_cols_end; col++) {
3276
0
      if (row * cm->tiles.cols + col < start_tile ||
3277
0
          row * cm->tiles.cols + col > end_tile)
3278
0
        continue;
3279
0
      tile_job_queue->tile_buffer = &pbi->tile_buffers[row][col];
3280
0
      tile_job_queue->tile_data = pbi->tile_data + row * cm->tiles.cols + col;
3281
0
      tile_job_queue++;
3282
0
      tile_mt_info->jobs_enqueued++;
3283
0
    }
3284
0
  }
3285
0
}
3286
3287
static AOM_INLINE void alloc_dec_jobs(AV1DecTileMT *tile_mt_info,
3288
                                      AV1_COMMON *cm, int tile_rows,
3289
0
                                      int tile_cols) {
3290
0
  tile_mt_info->alloc_tile_rows = tile_rows;
3291
0
  tile_mt_info->alloc_tile_cols = tile_cols;
3292
0
  int num_tiles = tile_rows * tile_cols;
3293
0
#if CONFIG_MULTITHREAD
3294
0
  {
3295
0
    CHECK_MEM_ERROR(cm, tile_mt_info->job_mutex,
3296
0
                    aom_malloc(sizeof(*tile_mt_info->job_mutex) * num_tiles));
3297
3298
0
    for (int i = 0; i < num_tiles; i++) {
3299
0
      pthread_mutex_init(&tile_mt_info->job_mutex[i], NULL);
3300
0
    }
3301
0
  }
3302
0
#endif
3303
0
  CHECK_MEM_ERROR(cm, tile_mt_info->job_queue,
3304
0
                  aom_malloc(sizeof(*tile_mt_info->job_queue) * num_tiles));
3305
0
}
3306
3307
0
void av1_free_mc_tmp_buf(ThreadData *thread_data) {
3308
0
  int ref;
3309
0
  for (ref = 0; ref < 2; ref++) {
3310
0
    if (thread_data->mc_buf_use_highbd)
3311
0
      aom_free(CONVERT_TO_SHORTPTR(thread_data->mc_buf[ref]));
3312
0
    else
3313
0
      aom_free(thread_data->mc_buf[ref]);
3314
0
    thread_data->mc_buf[ref] = NULL;
3315
0
  }
3316
0
  thread_data->mc_buf_size = 0;
3317
0
  thread_data->mc_buf_use_highbd = 0;
3318
3319
0
  aom_free(thread_data->tmp_conv_dst);
3320
0
  thread_data->tmp_conv_dst = NULL;
3321
0
  aom_free(thread_data->seg_mask);
3322
0
  thread_data->seg_mask = NULL;
3323
0
  for (int i = 0; i < 2; ++i) {
3324
0
    aom_free(thread_data->tmp_obmc_bufs[i]);
3325
0
    thread_data->tmp_obmc_bufs[i] = NULL;
3326
0
  }
3327
0
}
3328
3329
static AOM_INLINE void allocate_mc_tmp_buf(AV1_COMMON *const cm,
3330
                                           ThreadData *thread_data,
3331
0
                                           int buf_size, int use_highbd) {
3332
0
  for (int ref = 0; ref < 2; ref++) {
3333
    // The mc_buf/hbd_mc_buf must be zeroed to fix a intermittent valgrind error
3334
    // 'Conditional jump or move depends on uninitialised value' from the loop
3335
    // filter. Uninitialized reads in convolve function (e.g. horiz_4tap path in
3336
    // av1_convolve_2d_sr_avx2()) from mc_buf/hbd_mc_buf are seen to be the
3337
    // potential reason for this issue.
3338
0
    if (use_highbd) {
3339
0
      uint16_t *hbd_mc_buf;
3340
0
      CHECK_MEM_ERROR(cm, hbd_mc_buf, (uint16_t *)aom_memalign(16, buf_size));
3341
0
      memset(hbd_mc_buf, 0, buf_size);
3342
0
      thread_data->mc_buf[ref] = CONVERT_TO_BYTEPTR(hbd_mc_buf);
3343
0
    } else {
3344
0
      CHECK_MEM_ERROR(cm, thread_data->mc_buf[ref],
3345
0
                      (uint8_t *)aom_memalign(16, buf_size));
3346
0
      memset(thread_data->mc_buf[ref], 0, buf_size);
3347
0
    }
3348
0
  }
3349
0
  thread_data->mc_buf_size = buf_size;
3350
0
  thread_data->mc_buf_use_highbd = use_highbd;
3351
3352
0
  CHECK_MEM_ERROR(cm, thread_data->tmp_conv_dst,
3353
0
                  aom_memalign(32, MAX_SB_SIZE * MAX_SB_SIZE *
3354
0
                                       sizeof(*thread_data->tmp_conv_dst)));
3355
0
  CHECK_MEM_ERROR(cm, thread_data->seg_mask,
3356
0
                  (uint8_t *)aom_memalign(
3357
0
                      16, 2 * MAX_SB_SQUARE * sizeof(*thread_data->seg_mask)));
3358
3359
0
  for (int i = 0; i < 2; ++i) {
3360
0
    CHECK_MEM_ERROR(
3361
0
        cm, thread_data->tmp_obmc_bufs[i],
3362
0
        aom_memalign(16, 2 * MAX_MB_PLANE * MAX_SB_SQUARE *
3363
0
                             sizeof(*thread_data->tmp_obmc_bufs[i])));
3364
0
  }
3365
0
}
3366
3367
static AOM_INLINE void reset_dec_workers(AV1Decoder *pbi,
3368
                                         AVxWorkerHook worker_hook,
3369
0
                                         int num_workers) {
3370
0
  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3371
3372
  // Reset tile decoding hook
3373
0
  for (int worker_idx = 0; worker_idx < num_workers; ++worker_idx) {
3374
0
    AVxWorker *const worker = &pbi->tile_workers[worker_idx];
3375
0
    DecWorkerData *const thread_data = pbi->thread_data + worker_idx;
3376
0
    thread_data->td->dcb = pbi->dcb;
3377
0
    thread_data->td->dcb.corrupted = 0;
3378
0
    thread_data->td->dcb.mc_buf[0] = thread_data->td->mc_buf[0];
3379
0
    thread_data->td->dcb.mc_buf[1] = thread_data->td->mc_buf[1];
3380
0
    thread_data->td->dcb.xd.tmp_conv_dst = thread_data->td->tmp_conv_dst;
3381
0
    if (worker_idx)
3382
0
      thread_data->td->dcb.xd.seg_mask = thread_data->td->seg_mask;
3383
0
    for (int j = 0; j < 2; ++j) {
3384
0
      thread_data->td->dcb.xd.tmp_obmc_bufs[j] =
3385
0
          thread_data->td->tmp_obmc_bufs[j];
3386
0
    }
3387
0
    winterface->sync(worker);
3388
3389
0
    worker->hook = worker_hook;
3390
0
    worker->data1 = thread_data;
3391
0
    worker->data2 = pbi;
3392
0
  }
3393
#if CONFIG_ACCOUNTING
3394
  if (pbi->acct_enabled) {
3395
    aom_accounting_reset(&pbi->accounting);
3396
  }
3397
#endif
3398
0
}
3399
3400
static AOM_INLINE void launch_dec_workers(AV1Decoder *pbi,
3401
                                          const uint8_t *data_end,
3402
0
                                          int num_workers) {
3403
0
  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3404
3405
0
  for (int worker_idx = num_workers - 1; worker_idx >= 0; --worker_idx) {
3406
0
    AVxWorker *const worker = &pbi->tile_workers[worker_idx];
3407
0
    DecWorkerData *const thread_data = (DecWorkerData *)worker->data1;
3408
3409
0
    thread_data->data_end = data_end;
3410
3411
0
    worker->had_error = 0;
3412
0
    if (worker_idx == 0) {
3413
0
      winterface->execute(worker);
3414
0
    } else {
3415
0
      winterface->launch(worker);
3416
0
    }
3417
0
  }
3418
0
}
3419
3420
0
static AOM_INLINE void sync_dec_workers(AV1Decoder *pbi, int num_workers) {
3421
0
  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3422
0
  int corrupted = 0;
3423
3424
0
  for (int worker_idx = num_workers; worker_idx > 0; --worker_idx) {
3425
0
    AVxWorker *const worker = &pbi->tile_workers[worker_idx - 1];
3426
0
    aom_merge_corrupted_flag(&corrupted, !winterface->sync(worker));
3427
0
  }
3428
3429
0
  pbi->dcb.corrupted = corrupted;
3430
0
}
3431
3432
0
static AOM_INLINE void decode_mt_init(AV1Decoder *pbi) {
3433
0
  AV1_COMMON *const cm = &pbi->common;
3434
0
  const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3435
0
  int worker_idx;
3436
3437
  // Create workers and thread_data
3438
0
  if (pbi->num_workers == 0) {
3439
0
    const int num_threads = pbi->max_threads;
3440
0
    CHECK_MEM_ERROR(cm, pbi->tile_workers,
3441
0
                    aom_malloc(num_threads * sizeof(*pbi->tile_workers)));
3442
0
    CHECK_MEM_ERROR(cm, pbi->thread_data,
3443
0
                    aom_malloc(num_threads * sizeof(*pbi->thread_data)));
3444
3445
0
    for (worker_idx = 0; worker_idx < num_threads; ++worker_idx) {
3446
0
      AVxWorker *const worker = &pbi->tile_workers[worker_idx];
3447
0
      DecWorkerData *const thread_data = pbi->thread_data + worker_idx;
3448
0
      ++pbi->num_workers;
3449
3450
0
      winterface->init(worker);
3451
0
      worker->thread_name = "aom tile worker";
3452
0
      if (worker_idx != 0 && !winterface->reset(worker)) {
3453
0
        aom_internal_error(&pbi->error, AOM_CODEC_ERROR,
3454
0
                           "Tile decoder thread creation failed");
3455
0
      }
3456
3457
0
      if (worker_idx != 0) {
3458
        // Allocate thread data.
3459
0
        CHECK_MEM_ERROR(cm, thread_data->td,
3460
0
                        aom_memalign(32, sizeof(*thread_data->td)));
3461
0
        av1_zero(*thread_data->td);
3462
0
      } else {
3463
        // Main thread acts as a worker and uses the thread data in pbi
3464
0
        thread_data->td = &pbi->td;
3465
0
      }
3466
0
      thread_data->error_info.error_code = AOM_CODEC_OK;
3467
0
      thread_data->error_info.setjmp = 0;
3468
0
    }
3469
0
  }
3470
0
  const int use_highbd = cm->seq_params->use_highbitdepth;
3471
0
  const int buf_size = MC_TEMP_BUF_PELS << use_highbd;
3472
0
  for (worker_idx = 1; worker_idx < pbi->max_threads; ++worker_idx) {
3473
0
    DecWorkerData *const thread_data = pbi->thread_data + worker_idx;
3474
0
    if (thread_data->td->mc_buf_size != buf_size) {
3475
0
      av1_free_mc_tmp_buf(thread_data->td);
3476
0
      allocate_mc_tmp_buf(cm, thread_data->td, buf_size, use_highbd);
3477
0
    }
3478
0
  }
3479
0
}
3480
3481
static AOM_INLINE void tile_mt_queue(AV1Decoder *pbi, int tile_cols,
3482
                                     int tile_rows, int tile_rows_start,
3483
                                     int tile_rows_end, int tile_cols_start,
3484
                                     int tile_cols_end, int start_tile,
3485
0
                                     int end_tile) {
3486
0
  AV1_COMMON *const cm = &pbi->common;
3487
0
  if (pbi->tile_mt_info.alloc_tile_cols != tile_cols ||
3488
0
      pbi->tile_mt_info.alloc_tile_rows != tile_rows) {
3489
0
    av1_dealloc_dec_jobs(&pbi->tile_mt_info);
3490
0
    alloc_dec_jobs(&pbi->tile_mt_info, cm, tile_rows, tile_cols);
3491
0
  }
3492
0
  enqueue_tile_jobs(pbi, cm, tile_rows_start, tile_rows_end, tile_cols_start,
3493
0
                    tile_cols_end, start_tile, end_tile);
3494
0
  qsort(pbi->tile_mt_info.job_queue, pbi->tile_mt_info.jobs_enqueued,
3495
0
        sizeof(pbi->tile_mt_info.job_queue[0]), compare_tile_buffers);
3496
0
}
3497
3498
static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
3499
                                      const uint8_t *data_end, int start_tile,
3500
0
                                      int end_tile) {
3501
0
  AV1_COMMON *const cm = &pbi->common;
3502
0
  CommonTileParams *const tiles = &cm->tiles;
3503
0
  const int tile_cols = tiles->cols;
3504
0
  const int tile_rows = tiles->rows;
3505
0
  const int n_tiles = tile_cols * tile_rows;
3506
0
  TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
3507
0
  const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
3508
0
  const int single_row = pbi->dec_tile_row >= 0;
3509
0
  const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
3510
0
  const int single_col = pbi->dec_tile_col >= 0;
3511
0
  int tile_rows_start;
3512
0
  int tile_rows_end;
3513
0
  int tile_cols_start;
3514
0
  int tile_cols_end;
3515
0
  int tile_count_tg;
3516
0
  int num_workers;
3517
0
  const uint8_t *raw_data_end = NULL;
3518
3519
0
  if (tiles->large_scale) {
3520
0
    tile_rows_start = single_row ? dec_tile_row : 0;
3521
0
    tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
3522
0
    tile_cols_start = single_col ? dec_tile_col : 0;
3523
0
    tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
3524
0
  } else {
3525
0
    tile_rows_start = 0;
3526
0
    tile_rows_end = tile_rows;
3527
0
    tile_cols_start = 0;
3528
0
    tile_cols_end = tile_cols;
3529
0
  }
3530
0
  tile_count_tg = end_tile - start_tile + 1;
3531
0
  num_workers = AOMMIN(pbi->max_threads, tile_count_tg);
3532
3533
  // No tiles to decode.
3534
0
  if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start ||
3535
      // First tile is larger than end_tile.
3536
0
      tile_rows_start * tile_cols + tile_cols_start > end_tile ||
3537
      // Last tile is smaller than start_tile.
3538
0
      (tile_rows_end - 1) * tile_cols + tile_cols_end - 1 < start_tile)
3539
0
    return data;
3540
3541
0
  assert(tile_rows <= MAX_TILE_ROWS);
3542
0
  assert(tile_cols <= MAX_TILE_COLS);
3543
0
  assert(tile_count_tg > 0);
3544
0
  assert(num_workers > 0);
3545
0
  assert(start_tile <= end_tile);
3546
0
  assert(start_tile >= 0 && end_tile < n_tiles);
3547
3548
0
  decode_mt_init(pbi);
3549
3550
  // get tile size in tile group
3551
0
#if EXT_TILE_DEBUG
3552
0
  if (tiles->large_scale) assert(pbi->ext_tile_debug == 1);
3553
0
  if (tiles->large_scale)
3554
0
    raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
3555
0
  else
3556
0
#endif  // EXT_TILE_DEBUG
3557
0
    get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile);
3558
3559
0
  if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
3560
0
    decoder_alloc_tile_data(pbi, n_tiles);
3561
0
  }
3562
0
  if (pbi->dcb.xd.seg_mask == NULL)
3563
0
    CHECK_MEM_ERROR(cm, pbi->dcb.xd.seg_mask,
3564
0
                    (uint8_t *)aom_memalign(
3565
0
                        16, 2 * MAX_SB_SQUARE * sizeof(*pbi->dcb.xd.seg_mask)));
3566
3567
0
  for (int row = 0; row < tile_rows; row++) {
3568
0
    for (int col = 0; col < tile_cols; col++) {
3569
0
      TileDataDec *tile_data = pbi->tile_data + row * tiles->cols + col;
3570
0
      av1_tile_init(&tile_data->tile_info, cm, row, col);
3571
0
    }
3572
0
  }
3573
3574
0
  tile_mt_queue(pbi, tile_cols, tile_rows, tile_rows_start, tile_rows_end,
3575
0
                tile_cols_start, tile_cols_end, start_tile, end_tile);
3576
3577
0
  reset_dec_workers(pbi, tile_worker_hook, num_workers);
3578
0
  launch_dec_workers(pbi, data_end, num_workers);
3579
0
  sync_dec_workers(pbi, num_workers);
3580
3581
0
  if (pbi->dcb.corrupted)
3582
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
3583
0
                       "Failed to decode tile data");
3584
3585
0
  if (tiles->large_scale) {
3586
0
    if (n_tiles == 1) {
3587
      // Find the end of the single tile buffer
3588
0
      return aom_reader_find_end(&pbi->tile_data->bit_reader);
3589
0
    }
3590
    // Return the end of the last tile buffer
3591
0
    return raw_data_end;
3592
0
  }
3593
0
  TileDataDec *const tile_data = pbi->tile_data + end_tile;
3594
3595
0
  return aom_reader_find_end(&tile_data->bit_reader);
3596
0
}
3597
3598
0
static AOM_INLINE void dec_alloc_cb_buf(AV1Decoder *pbi) {
3599
0
  AV1_COMMON *const cm = &pbi->common;
3600
0
  int size = ((cm->mi_params.mi_rows >> cm->seq_params->mib_size_log2) + 1) *
3601
0
             ((cm->mi_params.mi_cols >> cm->seq_params->mib_size_log2) + 1);
3602
3603
0
  if (pbi->cb_buffer_alloc_size < size) {
3604
0
    av1_dec_free_cb_buf(pbi);
3605
0
    CHECK_MEM_ERROR(cm, pbi->cb_buffer_base,
3606
0
                    aom_memalign(32, sizeof(*pbi->cb_buffer_base) * size));
3607
0
    memset(pbi->cb_buffer_base, 0, sizeof(*pbi->cb_buffer_base) * size);
3608
0
    pbi->cb_buffer_alloc_size = size;
3609
0
  }
3610
0
}
3611
3612
static AOM_INLINE void row_mt_frame_init(AV1Decoder *pbi, int tile_rows_start,
3613
                                         int tile_rows_end, int tile_cols_start,
3614
                                         int tile_cols_end, int start_tile,
3615
0
                                         int end_tile, int max_sb_rows) {
3616
0
  AV1_COMMON *const cm = &pbi->common;
3617
0
  AV1DecRowMTInfo *frame_row_mt_info = &pbi->frame_row_mt_info;
3618
3619
0
  frame_row_mt_info->tile_rows_start = tile_rows_start;
3620
0
  frame_row_mt_info->tile_rows_end = tile_rows_end;
3621
0
  frame_row_mt_info->tile_cols_start = tile_cols_start;
3622
0
  frame_row_mt_info->tile_cols_end = tile_cols_end;
3623
0
  frame_row_mt_info->start_tile = start_tile;
3624
0
  frame_row_mt_info->end_tile = end_tile;
3625
0
  frame_row_mt_info->mi_rows_to_decode = 0;
3626
0
  frame_row_mt_info->mi_rows_parse_done = 0;
3627
0
  frame_row_mt_info->mi_rows_decode_started = 0;
3628
0
  frame_row_mt_info->row_mt_exit = 0;
3629
3630
0
  for (int tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
3631
0
    for (int tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
3632
0
      if (tile_row * cm->tiles.cols + tile_col < start_tile ||
3633
0
          tile_row * cm->tiles.cols + tile_col > end_tile)
3634
0
        continue;
3635
3636
0
      TileDataDec *const tile_data =
3637
0
          pbi->tile_data + tile_row * cm->tiles.cols + tile_col;
3638
0
      TileInfo tile_info = tile_data->tile_info;
3639
3640
0
      tile_data->dec_row_mt_sync.mi_rows_parse_done = 0;
3641
0
      tile_data->dec_row_mt_sync.mi_rows_decode_started = 0;
3642
0
      tile_data->dec_row_mt_sync.num_threads_working = 0;
3643
0
      tile_data->dec_row_mt_sync.mi_rows =
3644
0
          ALIGN_POWER_OF_TWO(tile_info.mi_row_end - tile_info.mi_row_start,
3645
0
                             cm->seq_params->mib_size_log2);
3646
0
      tile_data->dec_row_mt_sync.mi_cols =
3647
0
          ALIGN_POWER_OF_TWO(tile_info.mi_col_end - tile_info.mi_col_start,
3648
0
                             cm->seq_params->mib_size_log2);
3649
3650
0
      frame_row_mt_info->mi_rows_to_decode +=
3651
0
          tile_data->dec_row_mt_sync.mi_rows;
3652
3653
      // Initialize cur_sb_col to -1 for all SB rows.
3654
0
      memset(tile_data->dec_row_mt_sync.cur_sb_col, -1,
3655
0
             sizeof(*tile_data->dec_row_mt_sync.cur_sb_col) * max_sb_rows);
3656
0
    }
3657
0
  }
3658
3659
0
#if CONFIG_MULTITHREAD
3660
0
  if (pbi->row_mt_mutex_ == NULL) {
3661
0
    CHECK_MEM_ERROR(cm, pbi->row_mt_mutex_,
3662
0
                    aom_malloc(sizeof(*(pbi->row_mt_mutex_))));
3663
0
    if (pbi->row_mt_mutex_) {
3664
0
      pthread_mutex_init(pbi->row_mt_mutex_, NULL);
3665
0
    }
3666
0
  }
3667
3668
0
  if (pbi->row_mt_cond_ == NULL) {
3669
0
    CHECK_MEM_ERROR(cm, pbi->row_mt_cond_,
3670
0
                    aom_malloc(sizeof(*(pbi->row_mt_cond_))));
3671
0
    if (pbi->row_mt_cond_) {
3672
0
      pthread_cond_init(pbi->row_mt_cond_, NULL);
3673
0
    }
3674
0
  }
3675
0
#endif
3676
0
}
3677
3678
static const uint8_t *decode_tiles_row_mt(AV1Decoder *pbi, const uint8_t *data,
3679
                                          const uint8_t *data_end,
3680
0
                                          int start_tile, int end_tile) {
3681
0
  AV1_COMMON *const cm = &pbi->common;
3682
0
  CommonTileParams *const tiles = &cm->tiles;
3683
0
  const int tile_cols = tiles->cols;
3684
0
  const int tile_rows = tiles->rows;
3685
0
  const int n_tiles = tile_cols * tile_rows;
3686
0
  TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
3687
0
  const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
3688
0
  const int single_row = pbi->dec_tile_row >= 0;
3689
0
  const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
3690
0
  const int single_col = pbi->dec_tile_col >= 0;
3691
0
  int tile_rows_start;
3692
0
  int tile_rows_end;
3693
0
  int tile_cols_start;
3694
0
  int tile_cols_end;
3695
0
  int tile_count_tg;
3696
0
  int num_workers = 0;
3697
0
  int max_threads;
3698
0
  const uint8_t *raw_data_end = NULL;
3699
0
  int max_sb_rows = 0;
3700
3701
0
  if (tiles->large_scale) {
3702
0
    tile_rows_start = single_row ? dec_tile_row : 0;
3703
0
    tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
3704
0
    tile_cols_start = single_col ? dec_tile_col : 0;
3705
0
    tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
3706
0
  } else {
3707
0
    tile_rows_start = 0;
3708
0
    tile_rows_end = tile_rows;
3709
0
    tile_cols_start = 0;
3710
0
    tile_cols_end = tile_cols;
3711
0
  }
3712
0
  tile_count_tg = end_tile - start_tile + 1;
3713
0
  max_threads = pbi->max_threads;
3714
3715
  // No tiles to decode.
3716
0
  if (tile_rows_end <= tile_rows_start || tile_cols_end <= tile_cols_start ||
3717
      // First tile is larger than end_tile.
3718
0
      tile_rows_start * tile_cols + tile_cols_start > end_tile ||
3719
      // Last tile is smaller than start_tile.
3720
0
      (tile_rows_end - 1) * tile_cols + tile_cols_end - 1 < start_tile)
3721
0
    return data;
3722
3723
0
  assert(tile_rows <= MAX_TILE_ROWS);
3724
0
  assert(tile_cols <= MAX_TILE_COLS);
3725
0
  assert(tile_count_tg > 0);
3726
0
  assert(max_threads > 0);
3727
0
  assert(start_tile <= end_tile);
3728
0
  assert(start_tile >= 0 && end_tile < n_tiles);
3729
3730
0
  (void)tile_count_tg;
3731
3732
0
  decode_mt_init(pbi);
3733
3734
  // get tile size in tile group
3735
0
#if EXT_TILE_DEBUG
3736
0
  if (tiles->large_scale) assert(pbi->ext_tile_debug == 1);
3737
0
  if (tiles->large_scale)
3738
0
    raw_data_end = get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
3739
0
  else
3740
0
#endif  // EXT_TILE_DEBUG
3741
0
    get_tile_buffers(pbi, data, data_end, tile_buffers, start_tile, end_tile);
3742
3743
0
  if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
3744
0
    if (pbi->tile_data != NULL) {
3745
0
      for (int i = 0; i < pbi->allocated_tiles; i++) {
3746
0
        TileDataDec *const tile_data = pbi->tile_data + i;
3747
0
        av1_dec_row_mt_dealloc(&tile_data->dec_row_mt_sync);
3748
0
      }
3749
0
    }
3750
0
    decoder_alloc_tile_data(pbi, n_tiles);
3751
0
  }
3752
0
  if (pbi->dcb.xd.seg_mask == NULL)
3753
0
    CHECK_MEM_ERROR(cm, pbi->dcb.xd.seg_mask,
3754
0
                    (uint8_t *)aom_memalign(
3755
0
                        16, 2 * MAX_SB_SQUARE * sizeof(*pbi->dcb.xd.seg_mask)));
3756
3757
0
  for (int row = 0; row < tile_rows; row++) {
3758
0
    for (int col = 0; col < tile_cols; col++) {
3759
0
      TileDataDec *tile_data = pbi->tile_data + row * tiles->cols + col;
3760
0
      av1_tile_init(&tile_data->tile_info, cm, row, col);
3761
3762
0
      max_sb_rows = AOMMAX(max_sb_rows,
3763
0
                           av1_get_sb_rows_in_tile(cm, tile_data->tile_info));
3764
0
      num_workers += get_max_row_mt_workers_per_tile(cm, tile_data->tile_info);
3765
0
    }
3766
0
  }
3767
0
  num_workers = AOMMIN(num_workers, max_threads);
3768
3769
0
  if (pbi->allocated_row_mt_sync_rows != max_sb_rows) {
3770
0
    for (int i = 0; i < n_tiles; ++i) {
3771
0
      TileDataDec *const tile_data = pbi->tile_data + i;
3772
0
      av1_dec_row_mt_dealloc(&tile_data->dec_row_mt_sync);
3773
0
      dec_row_mt_alloc(&tile_data->dec_row_mt_sync, cm, max_sb_rows);
3774
0
    }
3775
0
    pbi->allocated_row_mt_sync_rows = max_sb_rows;
3776
0
  }
3777
3778
0
  tile_mt_queue(pbi, tile_cols, tile_rows, tile_rows_start, tile_rows_end,
3779
0
                tile_cols_start, tile_cols_end, start_tile, end_tile);
3780
3781
0
  dec_alloc_cb_buf(pbi);
3782
3783
0
  row_mt_frame_init(pbi, tile_rows_start, tile_rows_end, tile_cols_start,
3784
0
                    tile_cols_end, start_tile, end_tile, max_sb_rows);
3785
3786
0
  reset_dec_workers(pbi, row_mt_worker_hook, num_workers);
3787
0
  launch_dec_workers(pbi, data_end, num_workers);
3788
0
  sync_dec_workers(pbi, num_workers);
3789
3790
0
  if (pbi->dcb.corrupted)
3791
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
3792
0
                       "Failed to decode tile data");
3793
3794
0
  if (tiles->large_scale) {
3795
0
    if (n_tiles == 1) {
3796
      // Find the end of the single tile buffer
3797
0
      return aom_reader_find_end(&pbi->tile_data->bit_reader);
3798
0
    }
3799
    // Return the end of the last tile buffer
3800
0
    return raw_data_end;
3801
0
  }
3802
0
  TileDataDec *const tile_data = pbi->tile_data + end_tile;
3803
3804
0
  return aom_reader_find_end(&tile_data->bit_reader);
3805
0
}
3806
3807
0
static AOM_INLINE void error_handler(void *data) {
3808
0
  AV1_COMMON *const cm = (AV1_COMMON *)data;
3809
0
  aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet");
3810
0
}
3811
3812
// Reads the high_bitdepth and twelve_bit fields in color_config() and sets
3813
// seq_params->bit_depth based on the values of those fields and
3814
// seq_params->profile. Reports errors by calling rb->error_handler() or
3815
// aom_internal_error().
3816
static AOM_INLINE void read_bitdepth(
3817
    struct aom_read_bit_buffer *rb, SequenceHeader *seq_params,
3818
0
    struct aom_internal_error_info *error_info) {
3819
0
  const int high_bitdepth = aom_rb_read_bit(rb);
3820
0
  if (seq_params->profile == PROFILE_2 && high_bitdepth) {
3821
0
    const int twelve_bit = aom_rb_read_bit(rb);
3822
0
    seq_params->bit_depth = twelve_bit ? AOM_BITS_12 : AOM_BITS_10;
3823
0
  } else if (seq_params->profile <= PROFILE_2) {
3824
0
    seq_params->bit_depth = high_bitdepth ? AOM_BITS_10 : AOM_BITS_8;
3825
0
  } else {
3826
0
    aom_internal_error(error_info, AOM_CODEC_UNSUP_BITSTREAM,
3827
0
                       "Unsupported profile/bit-depth combination");
3828
0
  }
3829
#if !CONFIG_AV1_HIGHBITDEPTH
3830
  if (seq_params->bit_depth > AOM_BITS_8) {
3831
    aom_internal_error(error_info, AOM_CODEC_UNSUP_BITSTREAM,
3832
                       "Bit-depth %d not supported", seq_params->bit_depth);
3833
  }
3834
#endif
3835
0
}
3836
3837
void av1_read_film_grain_params(AV1_COMMON *cm,
3838
0
                                struct aom_read_bit_buffer *rb) {
3839
0
  aom_film_grain_t *pars = &cm->film_grain_params;
3840
0
  const SequenceHeader *const seq_params = cm->seq_params;
3841
3842
0
  pars->apply_grain = aom_rb_read_bit(rb);
3843
0
  if (!pars->apply_grain) {
3844
0
    memset(pars, 0, sizeof(*pars));
3845
0
    return;
3846
0
  }
3847
3848
0
  pars->random_seed = aom_rb_read_literal(rb, 16);
3849
0
  if (cm->current_frame.frame_type == INTER_FRAME)
3850
0
    pars->update_parameters = aom_rb_read_bit(rb);
3851
0
  else
3852
0
    pars->update_parameters = 1;
3853
3854
0
  pars->bit_depth = seq_params->bit_depth;
3855
3856
0
  if (!pars->update_parameters) {
3857
    // inherit parameters from a previous reference frame
3858
0
    int film_grain_params_ref_idx = aom_rb_read_literal(rb, 3);
3859
    // Section 6.8.20: It is a requirement of bitstream conformance that
3860
    // film_grain_params_ref_idx is equal to ref_frame_idx[ j ] for some value
3861
    // of j in the range 0 to REFS_PER_FRAME - 1.
3862
0
    int found = 0;
3863
0
    for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
3864
0
      if (film_grain_params_ref_idx == cm->remapped_ref_idx[i]) {
3865
0
        found = 1;
3866
0
        break;
3867
0
      }
3868
0
    }
3869
0
    if (!found) {
3870
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3871
0
                         "Invalid film grain reference idx %d. ref_frame_idx = "
3872
0
                         "{%d, %d, %d, %d, %d, %d, %d}",
3873
0
                         film_grain_params_ref_idx, cm->remapped_ref_idx[0],
3874
0
                         cm->remapped_ref_idx[1], cm->remapped_ref_idx[2],
3875
0
                         cm->remapped_ref_idx[3], cm->remapped_ref_idx[4],
3876
0
                         cm->remapped_ref_idx[5], cm->remapped_ref_idx[6]);
3877
0
    }
3878
0
    RefCntBuffer *const buf = cm->ref_frame_map[film_grain_params_ref_idx];
3879
0
    if (buf == NULL) {
3880
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3881
0
                         "Invalid Film grain reference idx");
3882
0
    }
3883
0
    if (!buf->film_grain_params_present) {
3884
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3885
0
                         "Film grain reference parameters not available");
3886
0
    }
3887
0
    uint16_t random_seed = pars->random_seed;
3888
0
    *pars = buf->film_grain_params;   // inherit paramaters
3889
0
    pars->random_seed = random_seed;  // with new random seed
3890
0
    return;
3891
0
  }
3892
3893
  // Scaling functions parameters
3894
0
  pars->num_y_points = aom_rb_read_literal(rb, 4);  // max 14
3895
0
  if (pars->num_y_points > 14)
3896
0
    aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3897
0
                       "Number of points for film grain luma scaling function "
3898
0
                       "exceeds the maximum value.");
3899
0
  for (int i = 0; i < pars->num_y_points; i++) {
3900
0
    pars->scaling_points_y[i][0] = aom_rb_read_literal(rb, 8);
3901
0
    if (i && pars->scaling_points_y[i - 1][0] >= pars->scaling_points_y[i][0])
3902
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3903
0
                         "First coordinate of the scaling function points "
3904
0
                         "shall be increasing.");
3905
0
    pars->scaling_points_y[i][1] = aom_rb_read_literal(rb, 8);
3906
0
  }
3907
3908
0
  if (!seq_params->monochrome)
3909
0
    pars->chroma_scaling_from_luma = aom_rb_read_bit(rb);
3910
0
  else
3911
0
    pars->chroma_scaling_from_luma = 0;
3912
3913
0
  if (seq_params->monochrome || pars->chroma_scaling_from_luma ||
3914
0
      ((seq_params->subsampling_x == 1) && (seq_params->subsampling_y == 1) &&
3915
0
       (pars->num_y_points == 0))) {
3916
0
    pars->num_cb_points = 0;
3917
0
    pars->num_cr_points = 0;
3918
0
  } else {
3919
0
    pars->num_cb_points = aom_rb_read_literal(rb, 4);  // max 10
3920
0
    if (pars->num_cb_points > 10)
3921
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3922
0
                         "Number of points for film grain cb scaling function "
3923
0
                         "exceeds the maximum value.");
3924
0
    for (int i = 0; i < pars->num_cb_points; i++) {
3925
0
      pars->scaling_points_cb[i][0] = aom_rb_read_literal(rb, 8);
3926
0
      if (i &&
3927
0
          pars->scaling_points_cb[i - 1][0] >= pars->scaling_points_cb[i][0])
3928
0
        aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3929
0
                           "First coordinate of the scaling function points "
3930
0
                           "shall be increasing.");
3931
0
      pars->scaling_points_cb[i][1] = aom_rb_read_literal(rb, 8);
3932
0
    }
3933
3934
0
    pars->num_cr_points = aom_rb_read_literal(rb, 4);  // max 10
3935
0
    if (pars->num_cr_points > 10)
3936
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3937
0
                         "Number of points for film grain cr scaling function "
3938
0
                         "exceeds the maximum value.");
3939
0
    for (int i = 0; i < pars->num_cr_points; i++) {
3940
0
      pars->scaling_points_cr[i][0] = aom_rb_read_literal(rb, 8);
3941
0
      if (i &&
3942
0
          pars->scaling_points_cr[i - 1][0] >= pars->scaling_points_cr[i][0])
3943
0
        aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3944
0
                           "First coordinate of the scaling function points "
3945
0
                           "shall be increasing.");
3946
0
      pars->scaling_points_cr[i][1] = aom_rb_read_literal(rb, 8);
3947
0
    }
3948
3949
0
    if ((seq_params->subsampling_x == 1) && (seq_params->subsampling_y == 1) &&
3950
0
        (((pars->num_cb_points == 0) && (pars->num_cr_points != 0)) ||
3951
0
         ((pars->num_cb_points != 0) && (pars->num_cr_points == 0))))
3952
0
      aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM,
3953
0
                         "In YCbCr 4:2:0, film grain shall be applied "
3954
0
                         "to both chroma components or neither.");
3955
0
  }
3956
3957
0
  pars->scaling_shift = aom_rb_read_literal(rb, 2) + 8;  // 8 + value
3958
3959
  // AR coefficients
3960
  // Only sent if the corresponsing scaling function has
3961
  // more than 0 points
3962
3963
0
  pars->ar_coeff_lag = aom_rb_read_literal(rb, 2);
3964
3965
0
  int num_pos_luma = 2 * pars->ar_coeff_lag * (pars->ar_coeff_lag + 1);
3966
0
  int num_pos_chroma = num_pos_luma;
3967
0
  if (pars->num_y_points > 0) ++num_pos_chroma;
3968
3969
0
  if (pars->num_y_points)
3970
0
    for (int i = 0; i < num_pos_luma; i++)
3971
0
      pars->ar_coeffs_y[i] = aom_rb_read_literal(rb, 8) - 128;
3972
3973
0
  if (pars->num_cb_points || pars->chroma_scaling_from_luma)
3974
0
    for (int i = 0; i < num_pos_chroma; i++)
3975
0
      pars->ar_coeffs_cb[i] = aom_rb_read_literal(rb, 8) - 128;
3976
3977
0
  if (pars->num_cr_points || pars->chroma_scaling_from_luma)
3978
0
    for (int i = 0; i < num_pos_chroma; i++)
3979
0
      pars->ar_coeffs_cr[i] = aom_rb_read_literal(rb, 8) - 128;
3980
3981
0
  pars->ar_coeff_shift = aom_rb_read_literal(rb, 2) + 6;  // 6 + value
3982
3983
0
  pars->grain_scale_shift = aom_rb_read_literal(rb, 2);
3984
3985
0
  if (pars->num_cb_points) {
3986
0
    pars->cb_mult = aom_rb_read_literal(rb, 8);
3987
0
    pars->cb_luma_mult = aom_rb_read_literal(rb, 8);
3988
0
    pars->cb_offset = aom_rb_read_literal(rb, 9);
3989
0
  }
3990
3991
0
  if (pars->num_cr_points) {
3992
0
    pars->cr_mult = aom_rb_read_literal(rb, 8);
3993
0
    pars->cr_luma_mult = aom_rb_read_literal(rb, 8);
3994
0
    pars->cr_offset = aom_rb_read_literal(rb, 9);
3995
0
  }
3996
3997
0
  pars->overlap_flag = aom_rb_read_bit(rb);
3998
3999
0
  pars->clip_to_restricted_range = aom_rb_read_bit(rb);
4000
0
}
4001
4002
static AOM_INLINE void read_film_grain(AV1_COMMON *cm,
4003
0
                                       struct aom_read_bit_buffer *rb) {
4004
0
  if (cm->seq_params->film_grain_params_present &&
4005
0
      (cm->show_frame || cm->showable_frame)) {
4006
0
    av1_read_film_grain_params(cm, rb);
4007
0
  } else {
4008
0
    memset(&cm->film_grain_params, 0, sizeof(cm->film_grain_params));
4009
0
  }
4010
0
  cm->film_grain_params.bit_depth = cm->seq_params->bit_depth;
4011
0
  memcpy(&cm->cur_frame->film_grain_params, &cm->film_grain_params,
4012
0
         sizeof(aom_film_grain_t));
4013
0
}
4014
4015
void av1_read_color_config(struct aom_read_bit_buffer *rb,
4016
                           int allow_lowbitdepth, SequenceHeader *seq_params,
4017
0
                           struct aom_internal_error_info *error_info) {
4018
0
  read_bitdepth(rb, seq_params, error_info);
4019
4020
0
  seq_params->use_highbitdepth =
4021
0
      seq_params->bit_depth > AOM_BITS_8 || !allow_lowbitdepth;
4022
  // monochrome bit (not needed for PROFILE_1)
4023
0
  const int is_monochrome =
4024
0
      seq_params->profile != PROFILE_1 ? aom_rb_read_bit(rb) : 0;
4025
0
  seq_params->monochrome = is_monochrome;
4026
0
  int color_description_present_flag = aom_rb_read_bit(rb);
4027
0
  if (color_description_present_flag) {
4028
0
    seq_params->color_primaries = aom_rb_read_literal(rb, 8);
4029
0
    seq_params->transfer_characteristics = aom_rb_read_literal(rb, 8);
4030
0
    seq_params->matrix_coefficients = aom_rb_read_literal(rb, 8);
4031
0
  } else {
4032
0
    seq_params->color_primaries = AOM_CICP_CP_UNSPECIFIED;
4033
0
    seq_params->transfer_characteristics = AOM_CICP_TC_UNSPECIFIED;
4034
0
    seq_params->matrix_coefficients = AOM_CICP_MC_UNSPECIFIED;
4035
0
  }
4036
0
  if (is_monochrome) {
4037
    // [16,235] (including xvycc) vs [0,255] range
4038
0
    seq_params->color_range = aom_rb_read_bit(rb);
4039
0
    seq_params->subsampling_y = seq_params->subsampling_x = 1;
4040
0
    seq_params->chroma_sample_position = AOM_CSP_UNKNOWN;
4041
0
    seq_params->separate_uv_delta_q = 0;
4042
0
    return;
4043
0
  }
4044
0
  if (seq_params->color_primaries == AOM_CICP_CP_BT_709 &&
4045
0
      seq_params->transfer_characteristics == AOM_CICP_TC_SRGB &&
4046
0
      seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY) {
4047
0
    seq_params->subsampling_y = seq_params->subsampling_x = 0;
4048
0
    seq_params->color_range = 1;  // assume full color-range
4049
0
    if (!(seq_params->profile == PROFILE_1 ||
4050
0
          (seq_params->profile == PROFILE_2 &&
4051
0
           seq_params->bit_depth == AOM_BITS_12))) {
4052
0
      aom_internal_error(
4053
0
          error_info, AOM_CODEC_UNSUP_BITSTREAM,
4054
0
          "sRGB colorspace not compatible with specified profile");
4055
0
    }
4056
0
  } else {
4057
    // [16,235] (including xvycc) vs [0,255] range
4058
0
    seq_params->color_range = aom_rb_read_bit(rb);
4059
0
    if (seq_params->profile == PROFILE_0) {
4060
      // 420 only
4061
0
      seq_params->subsampling_x = seq_params->subsampling_y = 1;
4062
0
    } else if (seq_params->profile == PROFILE_1) {
4063
      // 444 only
4064
0
      seq_params->subsampling_x = seq_params->subsampling_y = 0;
4065
0
    } else {
4066
0
      assert(seq_params->profile == PROFILE_2);
4067
0
      if (seq_params->bit_depth == AOM_BITS_12) {
4068
0
        seq_params->subsampling_x = aom_rb_read_bit(rb);
4069
0
        if (seq_params->subsampling_x)
4070
0
          seq_params->subsampling_y = aom_rb_read_bit(rb);  // 422 or 420
4071
0
        else
4072
0
          seq_params->subsampling_y = 0;  // 444
4073
0
      } else {
4074
        // 422
4075
0
        seq_params->subsampling_x = 1;
4076
0
        seq_params->subsampling_y = 0;
4077
0
      }
4078
0
    }
4079
0
    if (seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY &&
4080
0
        (seq_params->subsampling_x || seq_params->subsampling_y)) {
4081
0
      aom_internal_error(
4082
0
          error_info, AOM_CODEC_UNSUP_BITSTREAM,
4083
0
          "Identity CICP Matrix incompatible with non 4:4:4 color sampling");
4084
0
    }
4085
0
    if (seq_params->subsampling_x && seq_params->subsampling_y) {
4086
0
      seq_params->chroma_sample_position = aom_rb_read_literal(rb, 2);
4087
0
    }
4088
0
  }
4089
0
  seq_params->separate_uv_delta_q = aom_rb_read_bit(rb);
4090
0
}
4091
4092
void av1_read_timing_info_header(aom_timing_info_t *timing_info,
4093
                                 struct aom_internal_error_info *error,
4094
0
                                 struct aom_read_bit_buffer *rb) {
4095
0
  timing_info->num_units_in_display_tick =
4096
0
      aom_rb_read_unsigned_literal(rb,
4097
0
                                   32);  // Number of units in a display tick
4098
0
  timing_info->time_scale = aom_rb_read_unsigned_literal(rb, 32);  // Time scale
4099
0
  if (timing_info->num_units_in_display_tick == 0 ||
4100
0
      timing_info->time_scale == 0) {
4101
0
    aom_internal_error(
4102
0
        error, AOM_CODEC_UNSUP_BITSTREAM,
4103
0
        "num_units_in_display_tick and time_scale must be greater than 0.");
4104
0
  }
4105
0
  timing_info->equal_picture_interval =
4106
0
      aom_rb_read_bit(rb);  // Equal picture interval bit
4107
0
  if (timing_info->equal_picture_interval) {
4108
0
    const uint32_t num_ticks_per_picture_minus_1 = aom_rb_read_uvlc(rb);
4109
0
    if (num_ticks_per_picture_minus_1 == UINT32_MAX) {
4110
0
      aom_internal_error(
4111
0
          error, AOM_CODEC_UNSUP_BITSTREAM,
4112
0
          "num_ticks_per_picture_minus_1 cannot be (1 << 32) - 1.");
4113
0
    }
4114
0
    timing_info->num_ticks_per_picture = num_ticks_per_picture_minus_1 + 1;
4115
0
  }
4116
0
}
4117
4118
void av1_read_decoder_model_info(aom_dec_model_info_t *decoder_model_info,
4119
0
                                 struct aom_read_bit_buffer *rb) {
4120
0
  decoder_model_info->encoder_decoder_buffer_delay_length =
4121
0
      aom_rb_read_literal(rb, 5) + 1;
4122
0
  decoder_model_info->num_units_in_decoding_tick =
4123
0
      aom_rb_read_unsigned_literal(rb,
4124
0
                                   32);  // Number of units in a decoding tick
4125
0
  decoder_model_info->buffer_removal_time_length =
4126
0
      aom_rb_read_literal(rb, 5) + 1;
4127
0
  decoder_model_info->frame_presentation_time_length =
4128
0
      aom_rb_read_literal(rb, 5) + 1;
4129
0
}
4130
4131
void av1_read_op_parameters_info(aom_dec_model_op_parameters_t *op_params,
4132
                                 int buffer_delay_length,
4133
0
                                 struct aom_read_bit_buffer *rb) {
4134
0
  op_params->decoder_buffer_delay =
4135
0
      aom_rb_read_unsigned_literal(rb, buffer_delay_length);
4136
0
  op_params->encoder_buffer_delay =
4137
0
      aom_rb_read_unsigned_literal(rb, buffer_delay_length);
4138
0
  op_params->low_delay_mode_flag = aom_rb_read_bit(rb);
4139
0
}
4140
4141
static AOM_INLINE void read_temporal_point_info(
4142
0
    AV1_COMMON *const cm, struct aom_read_bit_buffer *rb) {
4143
0
  cm->frame_presentation_time = aom_rb_read_unsigned_literal(
4144
0
      rb, cm->seq_params->decoder_model_info.frame_presentation_time_length);
4145
0
}
4146
4147
void av1_read_sequence_header(AV1_COMMON *cm, struct aom_read_bit_buffer *rb,
4148
0
                              SequenceHeader *seq_params) {
4149
0
  const int num_bits_width = aom_rb_read_literal(rb, 4) + 1;
4150
0
  const int num_bits_height = aom_rb_read_literal(rb, 4) + 1;
4151
0
  const int max_frame_width = aom_rb_read_literal(rb, num_bits_width) + 1;
4152
0
  const int max_frame_height = aom_rb_read_literal(rb, num_bits_height) + 1;
4153
4154
0
  seq_params->num_bits_width = num_bits_width;
4155
0
  seq_params->num_bits_height = num_bits_height;
4156
0
  seq_params->max_frame_width = max_frame_width;
4157
0
  seq_params->max_frame_height = max_frame_height;
4158
4159
0
  if (seq_params->reduced_still_picture_hdr) {
4160
0
    seq_params->frame_id_numbers_present_flag = 0;
4161
0
  } else {
4162
0
    seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
4163
0
  }
4164
0
  if (seq_params->frame_id_numbers_present_flag) {
4165
    // We must always have delta_frame_id_length < frame_id_length,
4166
    // in order for a frame to be referenced with a unique delta.
4167
    // Avoid wasting bits by using a coding that enforces this restriction.
4168
0
    seq_params->delta_frame_id_length = aom_rb_read_literal(rb, 4) + 2;
4169
0
    seq_params->frame_id_length =
4170
0
        aom_rb_read_literal(rb, 3) + seq_params->delta_frame_id_length + 1;
4171
0
    if (seq_params->frame_id_length > 16)
4172
0
      aom_internal_error(cm->error, AOM_CODEC_CORRUPT_FRAME,
4173
0
                         "Invalid frame_id_length");
4174
0
  }
4175
4176
0
  setup_sb_size(seq_params, rb);
4177
4178
0
  seq_params->enable_filter_intra = aom_rb_read_bit(rb);
4179
0
  seq_params->enable_intra_edge_filter = aom_rb_read_bit(rb);
4180
4181
0
  if (seq_params->reduced_still_picture_hdr) {
4182
0
    seq_params->enable_interintra_compound = 0;
4183
0
    seq_params->enable_masked_compound = 0;
4184
0
    seq_params->enable_warped_motion = 0;
4185
0
    seq_params->enable_dual_filter = 0;
4186
0
    seq_params->order_hint_info.enable_order_hint = 0;
4187
0
    seq_params->order_hint_info.enable_dist_wtd_comp = 0;
4188
0
    seq_params->order_hint_info.enable_ref_frame_mvs = 0;
4189
0
    seq_params->force_screen_content_tools = 2;  // SELECT_SCREEN_CONTENT_TOOLS
4190
0
    seq_params->force_integer_mv = 2;            // SELECT_INTEGER_MV
4191
0
    seq_params->order_hint_info.order_hint_bits_minus_1 = -1;
4192
0
  } else {
4193
0
    seq_params->enable_interintra_compound = aom_rb_read_bit(rb);
4194
0
    seq_params->enable_masked_compound = aom_rb_read_bit(rb);
4195
0
    seq_params->enable_warped_motion = aom_rb_read_bit(rb);
4196
0
    seq_params->enable_dual_filter = aom_rb_read_bit(rb);
4197
4198
0
    seq_params->order_hint_info.enable_order_hint = aom_rb_read_bit(rb);
4199
0
    seq_params->order_hint_info.enable_dist_wtd_comp =
4200
0
        seq_params->order_hint_info.enable_order_hint ? aom_rb_read_bit(rb) : 0;
4201
0
    seq_params->order_hint_info.enable_ref_frame_mvs =
4202
0
        seq_params->order_hint_info.enable_order_hint ? aom_rb_read_bit(rb) : 0;
4203
4204
0
    if (aom_rb_read_bit(rb)) {
4205
0
      seq_params->force_screen_content_tools =
4206
0
          2;  // SELECT_SCREEN_CONTENT_TOOLS
4207
0
    } else {
4208
0
      seq_params->force_screen_content_tools = aom_rb_read_bit(rb);
4209
0
    }
4210
4211
0
    if (seq_params->force_screen_content_tools > 0) {
4212
0
      if (aom_rb_read_bit(rb)) {
4213
0
        seq_params->force_integer_mv = 2;  // SELECT_INTEGER_MV
4214
0
      } else {
4215
0
        seq_params->force_integer_mv = aom_rb_read_bit(rb);
4216
0
      }
4217
0
    } else {
4218
0
      seq_params->force_integer_mv = 2;  // SELECT_INTEGER_MV
4219
0
    }
4220
0
    seq_params->order_hint_info.order_hint_bits_minus_1 =
4221
0
        seq_params->order_hint_info.enable_order_hint
4222
0
            ? aom_rb_read_literal(rb, 3)
4223
0
            : -1;
4224
0
  }
4225
4226
0
  seq_params->enable_superres = aom_rb_read_bit(rb);
4227
0
  seq_params->enable_cdef = aom_rb_read_bit(rb);
4228
0
  seq_params->enable_restoration = aom_rb_read_bit(rb);
4229
0
}
4230
4231
static int read_global_motion_params(WarpedMotionParams *params,
4232
                                     const WarpedMotionParams *ref_params,
4233
                                     struct aom_read_bit_buffer *rb,
4234
0
                                     int allow_hp) {
4235
0
  TransformationType type = aom_rb_read_bit(rb);
4236
0
  if (type != IDENTITY) {
4237
0
    if (aom_rb_read_bit(rb))
4238
0
      type = ROTZOOM;
4239
0
    else
4240
0
      type = aom_rb_read_bit(rb) ? TRANSLATION : AFFINE;
4241
0
  }
4242
4243
0
  *params = default_warp_params;
4244
0
  params->wmtype = type;
4245
4246
0
  if (type >= ROTZOOM) {
4247
0
    params->wmmat[2] = aom_rb_read_signed_primitive_refsubexpfin(
4248
0
                           rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4249
0
                           (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
4250
0
                               (1 << GM_ALPHA_PREC_BITS)) *
4251
0
                           GM_ALPHA_DECODE_FACTOR +
4252
0
                       (1 << WARPEDMODEL_PREC_BITS);
4253
0
    params->wmmat[3] = aom_rb_read_signed_primitive_refsubexpfin(
4254
0
                           rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4255
0
                           (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF)) *
4256
0
                       GM_ALPHA_DECODE_FACTOR;
4257
0
  }
4258
4259
0
  if (type >= AFFINE) {
4260
0
    params->wmmat[4] = aom_rb_read_signed_primitive_refsubexpfin(
4261
0
                           rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4262
0
                           (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF)) *
4263
0
                       GM_ALPHA_DECODE_FACTOR;
4264
0
    params->wmmat[5] = aom_rb_read_signed_primitive_refsubexpfin(
4265
0
                           rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4266
0
                           (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4267
0
                               (1 << GM_ALPHA_PREC_BITS)) *
4268
0
                           GM_ALPHA_DECODE_FACTOR +
4269
0
                       (1 << WARPEDMODEL_PREC_BITS);
4270
0
  } else {
4271
0
    params->wmmat[4] = -params->wmmat[3];
4272
0
    params->wmmat[5] = params->wmmat[2];
4273
0
  }
4274
4275
0
  if (type >= TRANSLATION) {
4276
0
    const int trans_bits = (type == TRANSLATION)
4277
0
                               ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
4278
0
                               : GM_ABS_TRANS_BITS;
4279
0
    const int trans_dec_factor =
4280
0
        (type == TRANSLATION) ? GM_TRANS_ONLY_DECODE_FACTOR * (1 << !allow_hp)
4281
0
                              : GM_TRANS_DECODE_FACTOR;
4282
0
    const int trans_prec_diff = (type == TRANSLATION)
4283
0
                                    ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
4284
0
                                    : GM_TRANS_PREC_DIFF;
4285
0
    params->wmmat[0] = aom_rb_read_signed_primitive_refsubexpfin(
4286
0
                           rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4287
0
                           (ref_params->wmmat[0] >> trans_prec_diff)) *
4288
0
                       trans_dec_factor;
4289
0
    params->wmmat[1] = aom_rb_read_signed_primitive_refsubexpfin(
4290
0
                           rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4291
0
                           (ref_params->wmmat[1] >> trans_prec_diff)) *
4292
0
                       trans_dec_factor;
4293
0
  }
4294
4295
0
#if !CONFIG_REALTIME_ONLY
4296
  // For realtime only build, warped motion is disabled, so this section is not
4297
  // needed.
4298
0
  if (params->wmtype <= AFFINE) {
4299
0
    int good_shear_params = av1_get_shear_params(params);
4300
0
    if (!good_shear_params) return 0;
4301
0
  }
4302
0
#endif
4303
4304
0
  return 1;
4305
0
}
4306
4307
static AOM_INLINE void read_global_motion(AV1_COMMON *cm,
4308
0
                                          struct aom_read_bit_buffer *rb) {
4309
0
  for (int frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
4310
0
    const WarpedMotionParams *ref_params =
4311
0
        cm->prev_frame ? &cm->prev_frame->global_motion[frame]
4312
0
                       : &default_warp_params;
4313
0
    int good_params =
4314
0
        read_global_motion_params(&cm->global_motion[frame], ref_params, rb,
4315
0
                                  cm->features.allow_high_precision_mv);
4316
0
    if (!good_params) {
4317
#if WARPED_MOTION_DEBUG
4318
      printf("Warning: unexpected global motion shear params from aomenc\n");
4319
#endif
4320
0
      cm->global_motion[frame].invalid = 1;
4321
0
    }
4322
4323
    // TODO(sarahparker, debargha): The logic in the commented out code below
4324
    // does not work currently and causes mismatches when resize is on. Fix it
4325
    // before turning the optimization back on.
4326
    /*
4327
    YV12_BUFFER_CONFIG *ref_buf = get_ref_frame(cm, frame);
4328
    if (cm->width == ref_buf->y_crop_width &&
4329
        cm->height == ref_buf->y_crop_height) {
4330
      read_global_motion_params(&cm->global_motion[frame],
4331
                                &cm->prev_frame->global_motion[frame], rb,
4332
                                cm->features.allow_high_precision_mv);
4333
    } else {
4334
      cm->global_motion[frame] = default_warp_params;
4335
    }
4336
    */
4337
    /*
4338
    printf("Dec Ref %d [%d/%d]: %d %d %d %d\n",
4339
           frame, cm->current_frame.frame_number, cm->show_frame,
4340
           cm->global_motion[frame].wmmat[0],
4341
           cm->global_motion[frame].wmmat[1],
4342
           cm->global_motion[frame].wmmat[2],
4343
           cm->global_motion[frame].wmmat[3]);
4344
           */
4345
0
  }
4346
0
  memcpy(cm->cur_frame->global_motion, cm->global_motion,
4347
0
         REF_FRAMES * sizeof(WarpedMotionParams));
4348
0
}
4349
4350
// Release the references to the frame buffers in cm->ref_frame_map and reset
4351
// all elements of cm->ref_frame_map to NULL.
4352
0
static AOM_INLINE void reset_ref_frame_map(AV1_COMMON *const cm) {
4353
0
  BufferPool *const pool = cm->buffer_pool;
4354
4355
0
  for (int i = 0; i < REF_FRAMES; i++) {
4356
0
    decrease_ref_count(cm->ref_frame_map[i], pool);
4357
0
    cm->ref_frame_map[i] = NULL;
4358
0
  }
4359
0
}
4360
4361
// If the refresh_frame_flags bitmask is set, update reference frame id values
4362
// and mark frames as valid for reference.
4363
0
static AOM_INLINE void update_ref_frame_id(AV1Decoder *const pbi) {
4364
0
  AV1_COMMON *const cm = &pbi->common;
4365
0
  int refresh_frame_flags = cm->current_frame.refresh_frame_flags;
4366
0
  for (int i = 0; i < REF_FRAMES; i++) {
4367
0
    if ((refresh_frame_flags >> i) & 1) {
4368
0
      cm->ref_frame_id[i] = cm->current_frame_id;
4369
0
      pbi->valid_for_referencing[i] = 1;
4370
0
    }
4371
0
  }
4372
0
}
4373
4374
static AOM_INLINE void show_existing_frame_reset(AV1Decoder *const pbi,
4375
0
                                                 int existing_frame_idx) {
4376
0
  AV1_COMMON *const cm = &pbi->common;
4377
4378
0
  assert(cm->show_existing_frame);
4379
4380
0
  cm->current_frame.frame_type = KEY_FRAME;
4381
4382
0
  cm->current_frame.refresh_frame_flags = (1 << REF_FRAMES) - 1;
4383
4384
0
  for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4385
0
    cm->remapped_ref_idx[i] = INVALID_IDX;
4386
0
  }
4387
4388
0
  if (pbi->need_resync) {
4389
0
    reset_ref_frame_map(cm);
4390
0
    pbi->need_resync = 0;
4391
0
  }
4392
4393
  // Note that the displayed frame must be valid for referencing in order to
4394
  // have been selected.
4395
0
  cm->current_frame_id = cm->ref_frame_id[existing_frame_idx];
4396
0
  update_ref_frame_id(pbi);
4397
4398
0
  cm->features.refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED;
4399
0
}
4400
4401
0
static INLINE void reset_frame_buffers(AV1_COMMON *cm) {
4402
0
  RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
4403
0
  int i;
4404
4405
0
  lock_buffer_pool(cm->buffer_pool);
4406
0
  reset_ref_frame_map(cm);
4407
0
  assert(cm->cur_frame->ref_count == 1);
4408
0
  for (i = 0; i < FRAME_BUFFERS; ++i) {
4409
    // Reset all unreferenced frame buffers. We can also reset cm->cur_frame
4410
    // because we are the sole owner of cm->cur_frame.
4411
0
    if (frame_bufs[i].ref_count > 0 && &frame_bufs[i] != cm->cur_frame) {
4412
0
      continue;
4413
0
    }
4414
0
    frame_bufs[i].order_hint = 0;
4415
0
    av1_zero(frame_bufs[i].ref_order_hints);
4416
0
  }
4417
0
  av1_zero_unused_internal_frame_buffers(&cm->buffer_pool->int_frame_buffers);
4418
0
  unlock_buffer_pool(cm->buffer_pool);
4419
0
}
4420
4421
// On success, returns 0. On failure, calls aom_internal_error and does not
4422
// return.
4423
static int read_uncompressed_header(AV1Decoder *pbi,
4424
0
                                    struct aom_read_bit_buffer *rb) {
4425
0
  AV1_COMMON *const cm = &pbi->common;
4426
0
  const SequenceHeader *const seq_params = cm->seq_params;
4427
0
  CurrentFrame *const current_frame = &cm->current_frame;
4428
0
  FeatureFlags *const features = &cm->features;
4429
0
  MACROBLOCKD *const xd = &pbi->dcb.xd;
4430
0
  BufferPool *const pool = cm->buffer_pool;
4431
0
  RefCntBuffer *const frame_bufs = pool->frame_bufs;
4432
0
  aom_s_frame_info *sframe_info = &pbi->sframe_info;
4433
0
  sframe_info->is_s_frame = 0;
4434
0
  sframe_info->is_s_frame_at_altref = 0;
4435
4436
0
  if (!pbi->sequence_header_ready) {
4437
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4438
0
                       "No sequence header");
4439
0
  }
4440
4441
0
  if (seq_params->reduced_still_picture_hdr) {
4442
0
    cm->show_existing_frame = 0;
4443
0
    cm->show_frame = 1;
4444
0
    current_frame->frame_type = KEY_FRAME;
4445
0
    if (pbi->sequence_header_changed) {
4446
      // This is the start of a new coded video sequence.
4447
0
      pbi->sequence_header_changed = 0;
4448
0
      pbi->decoding_first_frame = 1;
4449
0
      reset_frame_buffers(cm);
4450
0
    }
4451
0
    features->error_resilient_mode = 1;
4452
0
  } else {
4453
0
    cm->show_existing_frame = aom_rb_read_bit(rb);
4454
0
    pbi->reset_decoder_state = 0;
4455
4456
0
    if (cm->show_existing_frame) {
4457
0
      if (pbi->sequence_header_changed) {
4458
0
        aom_internal_error(
4459
0
            &pbi->error, AOM_CODEC_CORRUPT_FRAME,
4460
0
            "New sequence header starts with a show_existing_frame.");
4461
0
      }
4462
      // Show an existing frame directly.
4463
0
      const int existing_frame_idx = aom_rb_read_literal(rb, 3);
4464
0
      RefCntBuffer *const frame_to_show = cm->ref_frame_map[existing_frame_idx];
4465
0
      if (frame_to_show == NULL) {
4466
0
        aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4467
0
                           "Buffer does not contain a decoded frame");
4468
0
      }
4469
0
      if (seq_params->decoder_model_info_present_flag &&
4470
0
          seq_params->timing_info.equal_picture_interval == 0) {
4471
0
        read_temporal_point_info(cm, rb);
4472
0
      }
4473
0
      if (seq_params->frame_id_numbers_present_flag) {
4474
0
        int frame_id_length = seq_params->frame_id_length;
4475
0
        int display_frame_id = aom_rb_read_literal(rb, frame_id_length);
4476
        /* Compare display_frame_id with ref_frame_id and check valid for
4477
         * referencing */
4478
0
        if (display_frame_id != cm->ref_frame_id[existing_frame_idx] ||
4479
0
            pbi->valid_for_referencing[existing_frame_idx] == 0)
4480
0
          aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4481
0
                             "Reference buffer frame ID mismatch");
4482
0
      }
4483
0
      lock_buffer_pool(pool);
4484
0
      assert(frame_to_show->ref_count > 0);
4485
      // cm->cur_frame should be the buffer referenced by the return value
4486
      // of the get_free_fb() call in assign_cur_frame_new_fb() (called by
4487
      // av1_receive_compressed_data()), so the ref_count should be 1.
4488
0
      assert(cm->cur_frame->ref_count == 1);
4489
      // assign_frame_buffer_p() decrements ref_count directly rather than
4490
      // call decrease_ref_count(). If cm->cur_frame->raw_frame_buffer has
4491
      // already been allocated, it will not be released by
4492
      // assign_frame_buffer_p()!
4493
0
      assert(!cm->cur_frame->raw_frame_buffer.data);
4494
0
      assign_frame_buffer_p(&cm->cur_frame, frame_to_show);
4495
0
      pbi->reset_decoder_state = frame_to_show->frame_type == KEY_FRAME;
4496
0
      unlock_buffer_pool(pool);
4497
4498
0
      cm->lf.filter_level[0] = 0;
4499
0
      cm->lf.filter_level[1] = 0;
4500
0
      cm->show_frame = 1;
4501
0
      current_frame->order_hint = frame_to_show->order_hint;
4502
4503
      // Section 6.8.2: It is a requirement of bitstream conformance that when
4504
      // show_existing_frame is used to show a previous frame, that the value
4505
      // of showable_frame for the previous frame was equal to 1.
4506
0
      if (!frame_to_show->showable_frame) {
4507
0
        aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4508
0
                           "Buffer does not contain a showable frame");
4509
0
      }
4510
      // Section 6.8.2: It is a requirement of bitstream conformance that when
4511
      // show_existing_frame is used to show a previous frame with
4512
      // RefFrameType[ frame_to_show_map_idx ] equal to KEY_FRAME, that the
4513
      // frame is output via the show_existing_frame mechanism at most once.
4514
0
      if (pbi->reset_decoder_state) frame_to_show->showable_frame = 0;
4515
4516
0
      cm->film_grain_params = frame_to_show->film_grain_params;
4517
4518
0
      if (pbi->reset_decoder_state) {
4519
0
        show_existing_frame_reset(pbi, existing_frame_idx);
4520
0
      } else {
4521
0
        current_frame->refresh_frame_flags = 0;
4522
0
      }
4523
4524
0
      return 0;
4525
0
    }
4526
4527
0
    current_frame->frame_type = (FRAME_TYPE)aom_rb_read_literal(rb, 2);
4528
0
    if (pbi->sequence_header_changed) {
4529
0
      if (current_frame->frame_type == KEY_FRAME) {
4530
        // This is the start of a new coded video sequence.
4531
0
        pbi->sequence_header_changed = 0;
4532
0
        pbi->decoding_first_frame = 1;
4533
0
        reset_frame_buffers(cm);
4534
0
      } else {
4535
0
        aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4536
0
                           "Sequence header has changed without a keyframe.");
4537
0
      }
4538
0
    }
4539
4540
0
    cm->show_frame = aom_rb_read_bit(rb);
4541
0
    if (cm->show_frame == 0) pbi->is_arf_frame_present = 1;
4542
0
    if (cm->show_frame == 0 && cm->current_frame.frame_type == KEY_FRAME)
4543
0
      pbi->is_fwd_kf_present = 1;
4544
0
    if (cm->current_frame.frame_type == S_FRAME) {
4545
0
      sframe_info->is_s_frame = 1;
4546
0
      sframe_info->is_s_frame_at_altref = cm->show_frame ? 0 : 1;
4547
0
    }
4548
0
    if (seq_params->still_picture &&
4549
0
        (current_frame->frame_type != KEY_FRAME || !cm->show_frame)) {
4550
0
      aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4551
0
                         "Still pictures must be coded as shown keyframes");
4552
0
    }
4553
0
    cm->showable_frame = current_frame->frame_type != KEY_FRAME;
4554
0
    if (cm->show_frame) {
4555
0
      if (seq_params->decoder_model_info_present_flag &&
4556
0
          seq_params->timing_info.equal_picture_interval == 0)
4557
0
        read_temporal_point_info(cm, rb);
4558
0
    } else {
4559
      // See if this frame can be used as show_existing_frame in future
4560
0
      cm->showable_frame = aom_rb_read_bit(rb);
4561
0
    }
4562
0
    cm->cur_frame->showable_frame = cm->showable_frame;
4563
0
    features->error_resilient_mode =
4564
0
        frame_is_sframe(cm) ||
4565
0
                (current_frame->frame_type == KEY_FRAME && cm->show_frame)
4566
0
            ? 1
4567
0
            : aom_rb_read_bit(rb);
4568
0
  }
4569
4570
0
  if (current_frame->frame_type == KEY_FRAME && cm->show_frame) {
4571
    /* All frames need to be marked as not valid for referencing */
4572
0
    for (int i = 0; i < REF_FRAMES; i++) {
4573
0
      pbi->valid_for_referencing[i] = 0;
4574
0
    }
4575
0
  }
4576
0
  features->disable_cdf_update = aom_rb_read_bit(rb);
4577
0
  if (seq_params->force_screen_content_tools == 2) {
4578
0
    features->allow_screen_content_tools = aom_rb_read_bit(rb);
4579
0
  } else {
4580
0
    features->allow_screen_content_tools =
4581
0
        seq_params->force_screen_content_tools;
4582
0
  }
4583
4584
0
  if (features->allow_screen_content_tools) {
4585
0
    if (seq_params->force_integer_mv == 2) {
4586
0
      features->cur_frame_force_integer_mv = aom_rb_read_bit(rb);
4587
0
    } else {
4588
0
      features->cur_frame_force_integer_mv = seq_params->force_integer_mv;
4589
0
    }
4590
0
  } else {
4591
0
    features->cur_frame_force_integer_mv = 0;
4592
0
  }
4593
4594
0
  int frame_size_override_flag = 0;
4595
0
  features->allow_intrabc = 0;
4596
0
  features->primary_ref_frame = PRIMARY_REF_NONE;
4597
4598
0
  if (!seq_params->reduced_still_picture_hdr) {
4599
0
    if (seq_params->frame_id_numbers_present_flag) {
4600
0
      int frame_id_length = seq_params->frame_id_length;
4601
0
      int diff_len = seq_params->delta_frame_id_length;
4602
0
      int prev_frame_id = 0;
4603
0
      int have_prev_frame_id =
4604
0
          !pbi->decoding_first_frame &&
4605
0
          !(current_frame->frame_type == KEY_FRAME && cm->show_frame);
4606
0
      if (have_prev_frame_id) {
4607
0
        prev_frame_id = cm->current_frame_id;
4608
0
      }
4609
0
      cm->current_frame_id = aom_rb_read_literal(rb, frame_id_length);
4610
4611
0
      if (have_prev_frame_id) {
4612
0
        int diff_frame_id;
4613
0
        if (cm->current_frame_id > prev_frame_id) {
4614
0
          diff_frame_id = cm->current_frame_id - prev_frame_id;
4615
0
        } else {
4616
0
          diff_frame_id =
4617
0
              (1 << frame_id_length) + cm->current_frame_id - prev_frame_id;
4618
0
        }
4619
        /* Check current_frame_id for conformance */
4620
0
        if (prev_frame_id == cm->current_frame_id ||
4621
0
            diff_frame_id >= (1 << (frame_id_length - 1))) {
4622
0
          aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4623
0
                             "Invalid value of current_frame_id");
4624
0
        }
4625
0
      }
4626
      /* Check if some frames need to be marked as not valid for referencing */
4627
0
      for (int i = 0; i < REF_FRAMES; i++) {
4628
0
        if (cm->current_frame_id - (1 << diff_len) > 0) {
4629
0
          if (cm->ref_frame_id[i] > cm->current_frame_id ||
4630
0
              cm->ref_frame_id[i] < cm->current_frame_id - (1 << diff_len))
4631
0
            pbi->valid_for_referencing[i] = 0;
4632
0
        } else {
4633
0
          if (cm->ref_frame_id[i] > cm->current_frame_id &&
4634
0
              cm->ref_frame_id[i] < (1 << frame_id_length) +
4635
0
                                        cm->current_frame_id - (1 << diff_len))
4636
0
            pbi->valid_for_referencing[i] = 0;
4637
0
        }
4638
0
      }
4639
0
    }
4640
4641
0
    frame_size_override_flag = frame_is_sframe(cm) ? 1 : aom_rb_read_bit(rb);
4642
4643
0
    current_frame->order_hint = aom_rb_read_literal(
4644
0
        rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);
4645
0
    current_frame->frame_number = current_frame->order_hint;
4646
4647
0
    if (!features->error_resilient_mode && !frame_is_intra_only(cm)) {
4648
0
      features->primary_ref_frame = aom_rb_read_literal(rb, PRIMARY_REF_BITS);
4649
0
    }
4650
0
  }
4651
4652
0
  if (seq_params->decoder_model_info_present_flag) {
4653
0
    pbi->buffer_removal_time_present = aom_rb_read_bit(rb);
4654
0
    if (pbi->buffer_removal_time_present) {
4655
0
      for (int op_num = 0;
4656
0
           op_num < seq_params->operating_points_cnt_minus_1 + 1; op_num++) {
4657
0
        if (seq_params->op_params[op_num].decoder_model_param_present_flag) {
4658
0
          if (seq_params->operating_point_idc[op_num] == 0 ||
4659
0
              (((seq_params->operating_point_idc[op_num] >>
4660
0
                 cm->temporal_layer_id) &
4661
0
                0x1) &&
4662
0
               ((seq_params->operating_point_idc[op_num] >>
4663
0
                 (cm->spatial_layer_id + 8)) &
4664
0
                0x1))) {
4665
0
            cm->buffer_removal_times[op_num] = aom_rb_read_unsigned_literal(
4666
0
                rb, seq_params->decoder_model_info.buffer_removal_time_length);
4667
0
          } else {
4668
0
            cm->buffer_removal_times[op_num] = 0;
4669
0
          }
4670
0
        } else {
4671
0
          cm->buffer_removal_times[op_num] = 0;
4672
0
        }
4673
0
      }
4674
0
    }
4675
0
  }
4676
0
  if (current_frame->frame_type == KEY_FRAME) {
4677
0
    if (!cm->show_frame) {  // unshown keyframe (forward keyframe)
4678
0
      current_frame->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
4679
0
    } else {  // shown keyframe
4680
0
      current_frame->refresh_frame_flags = (1 << REF_FRAMES) - 1;
4681
0
    }
4682
4683
0
    for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4684
0
      cm->remapped_ref_idx[i] = INVALID_IDX;
4685
0
    }
4686
0
    if (pbi->need_resync) {
4687
0
      reset_ref_frame_map(cm);
4688
0
      pbi->need_resync = 0;
4689
0
    }
4690
0
  } else {
4691
0
    if (current_frame->frame_type == INTRA_ONLY_FRAME) {
4692
0
      current_frame->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
4693
0
      if (current_frame->refresh_frame_flags == 0xFF) {
4694
0
        aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4695
0
                           "Intra only frames cannot have refresh flags 0xFF");
4696
0
      }
4697
0
      if (pbi->need_resync) {
4698
0
        reset_ref_frame_map(cm);
4699
0
        pbi->need_resync = 0;
4700
0
      }
4701
0
    } else if (pbi->need_resync != 1) { /* Skip if need resync */
4702
0
      current_frame->refresh_frame_flags =
4703
0
          frame_is_sframe(cm) ? 0xFF : aom_rb_read_literal(rb, REF_FRAMES);
4704
0
    }
4705
0
  }
4706
4707
0
  if (!frame_is_intra_only(cm) || current_frame->refresh_frame_flags != 0xFF) {
4708
    // Read all ref frame order hints if error_resilient_mode == 1
4709
0
    if (features->error_resilient_mode &&
4710
0
        seq_params->order_hint_info.enable_order_hint) {
4711
0
      for (int ref_idx = 0; ref_idx < REF_FRAMES; ref_idx++) {
4712
        // Read order hint from bit stream
4713
0
        unsigned int order_hint = aom_rb_read_literal(
4714
0
            rb, seq_params->order_hint_info.order_hint_bits_minus_1 + 1);
4715
        // Get buffer
4716
0
        RefCntBuffer *buf = cm->ref_frame_map[ref_idx];
4717
0
        if (buf == NULL || order_hint != buf->order_hint) {
4718
0
          if (buf != NULL) {
4719
0
            lock_buffer_pool(pool);
4720
0
            decrease_ref_count(buf, pool);
4721
0
            unlock_buffer_pool(pool);
4722
0
            cm->ref_frame_map[ref_idx] = NULL;
4723
0
          }
4724
          // If no corresponding buffer exists, allocate a new buffer with all
4725
          // pixels set to neutral grey.
4726
0
          int buf_idx = get_free_fb(cm);
4727
0
          if (buf_idx == INVALID_IDX) {
4728
0
            aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
4729
0
                               "Unable to find free frame buffer");
4730
0
          }
4731
0
          buf = &frame_bufs[buf_idx];
4732
0
          lock_buffer_pool(pool);
4733
0
          if (aom_realloc_frame_buffer(
4734
0
                  &buf->buf, seq_params->max_frame_width,
4735
0
                  seq_params->max_frame_height, seq_params->subsampling_x,
4736
0
                  seq_params->subsampling_y, seq_params->use_highbitdepth,
4737
0
                  AOM_BORDER_IN_PIXELS, features->byte_alignment,
4738
0
                  &buf->raw_frame_buffer, pool->get_fb_cb, pool->cb_priv, 0)) {
4739
0
            decrease_ref_count(buf, pool);
4740
0
            unlock_buffer_pool(pool);
4741
0
            aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
4742
0
                               "Failed to allocate frame buffer");
4743
0
          }
4744
0
          unlock_buffer_pool(pool);
4745
          // According to the specification, valid bitstreams are required to
4746
          // never use missing reference frames so the filling process for
4747
          // missing frames is not normatively defined and RefValid for missing
4748
          // frames is set to 0.
4749
4750
          // To make libaom more robust when the bitstream has been corrupted
4751
          // by the loss of some frames of data, this code adds a neutral grey
4752
          // buffer in place of missing frames, i.e.
4753
          //
4754
0
          set_planes_to_neutral_grey(seq_params, &buf->buf, 0);
4755
          //
4756
          // and allows the frames to be used for referencing, i.e.
4757
          //
4758
0
          pbi->valid_for_referencing[ref_idx] = 1;
4759
          //
4760
          // Please note such behavior is not normative and other decoders may
4761
          // use a different approach.
4762
0
          cm->ref_frame_map[ref_idx] = buf;
4763
0
          buf->order_hint = order_hint;
4764
0
        }
4765
0
      }
4766
0
    }
4767
0
  }
4768
4769
0
  if (current_frame->frame_type == KEY_FRAME) {
4770
0
    setup_frame_size(cm, frame_size_override_flag, rb);
4771
4772
0
    if (features->allow_screen_content_tools && !av1_superres_scaled(cm))
4773
0
      features->allow_intrabc = aom_rb_read_bit(rb);
4774
0
    features->allow_ref_frame_mvs = 0;
4775
0
    cm->prev_frame = NULL;
4776
0
  } else {
4777
0
    features->allow_ref_frame_mvs = 0;
4778
4779
0
    if (current_frame->frame_type == INTRA_ONLY_FRAME) {
4780
0
      cm->cur_frame->film_grain_params_present =
4781
0
          seq_params->film_grain_params_present;
4782
0
      setup_frame_size(cm, frame_size_override_flag, rb);
4783
0
      if (features->allow_screen_content_tools && !av1_superres_scaled(cm))
4784
0
        features->allow_intrabc = aom_rb_read_bit(rb);
4785
4786
0
    } else if (pbi->need_resync != 1) { /* Skip if need resync */
4787
0
      int frame_refs_short_signaling = 0;
4788
      // Frame refs short signaling is off when error resilient mode is on.
4789
0
      if (seq_params->order_hint_info.enable_order_hint)
4790
0
        frame_refs_short_signaling = aom_rb_read_bit(rb);
4791
4792
0
      if (frame_refs_short_signaling) {
4793
        // == LAST_FRAME ==
4794
0
        const int lst_ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4795
0
        const RefCntBuffer *const lst_buf = cm->ref_frame_map[lst_ref];
4796
4797
        // == GOLDEN_FRAME ==
4798
0
        const int gld_ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4799
0
        const RefCntBuffer *const gld_buf = cm->ref_frame_map[gld_ref];
4800
4801
        // Most of the time, streams start with a keyframe. In that case,
4802
        // ref_frame_map will have been filled in at that point and will not
4803
        // contain any NULLs. However, streams are explicitly allowed to start
4804
        // with an intra-only frame, so long as they don't then signal a
4805
        // reference to a slot that hasn't been set yet. That's what we are
4806
        // checking here.
4807
0
        if (lst_buf == NULL)
4808
0
          aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4809
0
                             "Inter frame requests nonexistent reference");
4810
0
        if (gld_buf == NULL)
4811
0
          aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4812
0
                             "Inter frame requests nonexistent reference");
4813
4814
0
        av1_set_frame_refs(cm, cm->remapped_ref_idx, lst_ref, gld_ref);
4815
0
      }
4816
4817
0
      for (int i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4818
0
        int ref = 0;
4819
0
        if (!frame_refs_short_signaling) {
4820
0
          ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4821
4822
          // Most of the time, streams start with a keyframe. In that case,
4823
          // ref_frame_map will have been filled in at that point and will not
4824
          // contain any NULLs. However, streams are explicitly allowed to start
4825
          // with an intra-only frame, so long as they don't then signal a
4826
          // reference to a slot that hasn't been set yet. That's what we are
4827
          // checking here.
4828
0
          if (cm->ref_frame_map[ref] == NULL)
4829
0
            aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4830
0
                               "Inter frame requests nonexistent reference");
4831
0
          cm->remapped_ref_idx[i] = ref;
4832
0
        } else {
4833
0
          ref = cm->remapped_ref_idx[i];
4834
0
        }
4835
        // Check valid for referencing
4836
0
        if (pbi->valid_for_referencing[ref] == 0)
4837
0
          aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4838
0
                             "Reference frame not valid for referencing");
4839
4840
0
        cm->ref_frame_sign_bias[LAST_FRAME + i] = 0;
4841
4842
0
        if (seq_params->frame_id_numbers_present_flag) {
4843
0
          int frame_id_length = seq_params->frame_id_length;
4844
0
          int diff_len = seq_params->delta_frame_id_length;
4845
0
          int delta_frame_id_minus_1 = aom_rb_read_literal(rb, diff_len);
4846
0
          int ref_frame_id =
4847
0
              ((cm->current_frame_id - (delta_frame_id_minus_1 + 1) +
4848
0
                (1 << frame_id_length)) %
4849
0
               (1 << frame_id_length));
4850
          // Compare values derived from delta_frame_id_minus_1 and
4851
          // refresh_frame_flags.
4852
0
          if (ref_frame_id != cm->ref_frame_id[ref])
4853
0
            aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4854
0
                               "Reference buffer frame ID mismatch");
4855
0
        }
4856
0
      }
4857
4858
0
      if (!features->error_resilient_mode && frame_size_override_flag) {
4859
0
        setup_frame_size_with_refs(cm, rb);
4860
0
      } else {
4861
0
        setup_frame_size(cm, frame_size_override_flag, rb);
4862
0
      }
4863
4864
0
      if (features->cur_frame_force_integer_mv) {
4865
0
        features->allow_high_precision_mv = 0;
4866
0
      } else {
4867
0
        features->allow_high_precision_mv = aom_rb_read_bit(rb);
4868
0
      }
4869
0
      features->interp_filter = read_frame_interp_filter(rb);
4870
0
      features->switchable_motion_mode = aom_rb_read_bit(rb);
4871
0
    }
4872
4873
0
    cm->prev_frame = get_primary_ref_frame_buf(cm);
4874
0
    if (features->primary_ref_frame != PRIMARY_REF_NONE &&
4875
0
        get_primary_ref_frame_buf(cm) == NULL) {
4876
0
      aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4877
0
                         "Reference frame containing this frame's initial "
4878
0
                         "frame context is unavailable.");
4879
0
    }
4880
4881
0
    if (!(current_frame->frame_type == INTRA_ONLY_FRAME) &&
4882
0
        pbi->need_resync != 1) {
4883
0
      if (frame_might_allow_ref_frame_mvs(cm))
4884
0
        features->allow_ref_frame_mvs = aom_rb_read_bit(rb);
4885
0
      else
4886
0
        features->allow_ref_frame_mvs = 0;
4887
4888
0
      for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
4889
0
        const RefCntBuffer *const ref_buf = get_ref_frame_buf(cm, i);
4890
0
        struct scale_factors *const ref_scale_factors =
4891
0
            get_ref_scale_factors(cm, i);
4892
0
        av1_setup_scale_factors_for_frame(
4893
0
            ref_scale_factors, ref_buf->buf.y_crop_width,
4894
0
            ref_buf->buf.y_crop_height, cm->width, cm->height);
4895
0
        if ((!av1_is_valid_scale(ref_scale_factors)))
4896
0
          aom_internal_error(&pbi->error, AOM_CODEC_UNSUP_BITSTREAM,
4897
0
                             "Reference frame has invalid dimensions");
4898
0
      }
4899
0
    }
4900
0
  }
4901
4902
0
  av1_setup_frame_buf_refs(cm);
4903
4904
0
  av1_setup_frame_sign_bias(cm);
4905
4906
0
  cm->cur_frame->frame_type = current_frame->frame_type;
4907
4908
0
  update_ref_frame_id(pbi);
4909
4910
0
  const int might_bwd_adapt = !(seq_params->reduced_still_picture_hdr) &&
4911
0
                              !(features->disable_cdf_update);
4912
0
  if (might_bwd_adapt) {
4913
0
    features->refresh_frame_context = aom_rb_read_bit(rb)
4914
0
                                          ? REFRESH_FRAME_CONTEXT_DISABLED
4915
0
                                          : REFRESH_FRAME_CONTEXT_BACKWARD;
4916
0
  } else {
4917
0
    features->refresh_frame_context = REFRESH_FRAME_CONTEXT_DISABLED;
4918
0
  }
4919
4920
0
  cm->cur_frame->buf.bit_depth = seq_params->bit_depth;
4921
0
  cm->cur_frame->buf.color_primaries = seq_params->color_primaries;
4922
0
  cm->cur_frame->buf.transfer_characteristics =
4923
0
      seq_params->transfer_characteristics;
4924
0
  cm->cur_frame->buf.matrix_coefficients = seq_params->matrix_coefficients;
4925
0
  cm->cur_frame->buf.monochrome = seq_params->monochrome;
4926
0
  cm->cur_frame->buf.chroma_sample_position =
4927
0
      seq_params->chroma_sample_position;
4928
0
  cm->cur_frame->buf.color_range = seq_params->color_range;
4929
0
  cm->cur_frame->buf.render_width = cm->render_width;
4930
0
  cm->cur_frame->buf.render_height = cm->render_height;
4931
4932
0
  if (pbi->need_resync) {
4933
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4934
0
                       "Keyframe / intra-only frame required to reset decoder"
4935
0
                       " state");
4936
0
  }
4937
4938
0
  if (features->allow_intrabc) {
4939
    // Set parameters corresponding to no filtering.
4940
0
    struct loopfilter *lf = &cm->lf;
4941
0
    lf->filter_level[0] = 0;
4942
0
    lf->filter_level[1] = 0;
4943
0
    cm->cdef_info.cdef_bits = 0;
4944
0
    cm->cdef_info.cdef_strengths[0] = 0;
4945
0
    cm->cdef_info.nb_cdef_strengths = 1;
4946
0
    cm->cdef_info.cdef_uv_strengths[0] = 0;
4947
0
    cm->rst_info[0].frame_restoration_type = RESTORE_NONE;
4948
0
    cm->rst_info[1].frame_restoration_type = RESTORE_NONE;
4949
0
    cm->rst_info[2].frame_restoration_type = RESTORE_NONE;
4950
0
  }
4951
4952
0
  read_tile_info(pbi, rb);
4953
0
  if (!av1_is_min_tile_width_satisfied(cm)) {
4954
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
4955
0
                       "Minimum tile width requirement not satisfied");
4956
0
  }
4957
4958
0
  CommonQuantParams *const quant_params = &cm->quant_params;
4959
0
  setup_quantization(quant_params, av1_num_planes(cm),
4960
0
                     cm->seq_params->separate_uv_delta_q, rb);
4961
0
  xd->bd = (int)seq_params->bit_depth;
4962
4963
0
  CommonContexts *const above_contexts = &cm->above_contexts;
4964
0
  if (above_contexts->num_planes < av1_num_planes(cm) ||
4965
0
      above_contexts->num_mi_cols < cm->mi_params.mi_cols ||
4966
0
      above_contexts->num_tile_rows < cm->tiles.rows) {
4967
0
    av1_free_above_context_buffers(above_contexts);
4968
0
    if (av1_alloc_above_context_buffers(above_contexts, cm->tiles.rows,
4969
0
                                        cm->mi_params.mi_cols,
4970
0
                                        av1_num_planes(cm))) {
4971
0
      aom_internal_error(&pbi->error, AOM_CODEC_MEM_ERROR,
4972
0
                         "Failed to allocate context buffers");
4973
0
    }
4974
0
  }
4975
4976
0
  if (features->primary_ref_frame == PRIMARY_REF_NONE) {
4977
0
    av1_setup_past_independence(cm);
4978
0
  }
4979
4980
0
  setup_segmentation(cm, rb);
4981
4982
0
  cm->delta_q_info.delta_q_res = 1;
4983
0
  cm->delta_q_info.delta_lf_res = 1;
4984
0
  cm->delta_q_info.delta_lf_present_flag = 0;
4985
0
  cm->delta_q_info.delta_lf_multi = 0;
4986
0
  cm->delta_q_info.delta_q_present_flag =
4987
0
      quant_params->base_qindex > 0 ? aom_rb_read_bit(rb) : 0;
4988
0
  if (cm->delta_q_info.delta_q_present_flag) {
4989
0
    xd->current_base_qindex = quant_params->base_qindex;
4990
0
    cm->delta_q_info.delta_q_res = 1 << aom_rb_read_literal(rb, 2);
4991
0
    if (!features->allow_intrabc)
4992
0
      cm->delta_q_info.delta_lf_present_flag = aom_rb_read_bit(rb);
4993
0
    if (cm->delta_q_info.delta_lf_present_flag) {
4994
0
      cm->delta_q_info.delta_lf_res = 1 << aom_rb_read_literal(rb, 2);
4995
0
      cm->delta_q_info.delta_lf_multi = aom_rb_read_bit(rb);
4996
0
      av1_reset_loop_filter_delta(xd, av1_num_planes(cm));
4997
0
    }
4998
0
  }
4999
5000
0
  xd->cur_frame_force_integer_mv = features->cur_frame_force_integer_mv;
5001
5002
0
  for (int i = 0; i < MAX_SEGMENTS; ++i) {
5003
0
    const int qindex = av1_get_qindex(&cm->seg, i, quant_params->base_qindex);
5004
0
    xd->lossless[i] =
5005
0
        qindex == 0 && quant_params->y_dc_delta_q == 0 &&
5006
0
        quant_params->u_dc_delta_q == 0 && quant_params->u_ac_delta_q == 0 &&
5007
0
        quant_params->v_dc_delta_q == 0 && quant_params->v_ac_delta_q == 0;
5008
0
    xd->qindex[i] = qindex;
5009
0
  }
5010
0
  features->coded_lossless = is_coded_lossless(cm, xd);
5011
0
  features->all_lossless = features->coded_lossless && !av1_superres_scaled(cm);
5012
0
  setup_segmentation_dequant(cm, xd);
5013
0
  if (features->coded_lossless) {
5014
0
    cm->lf.filter_level[0] = 0;
5015
0
    cm->lf.filter_level[1] = 0;
5016
0
  }
5017
0
  if (features->coded_lossless || !seq_params->enable_cdef) {
5018
0
    cm->cdef_info.cdef_bits = 0;
5019
0
    cm->cdef_info.cdef_strengths[0] = 0;
5020
0
    cm->cdef_info.cdef_uv_strengths[0] = 0;
5021
0
  }
5022
0
  if (features->all_lossless || !seq_params->enable_restoration) {
5023
0
    cm->rst_info[0].frame_restoration_type = RESTORE_NONE;
5024
0
    cm->rst_info[1].frame_restoration_type = RESTORE_NONE;
5025
0
    cm->rst_info[2].frame_restoration_type = RESTORE_NONE;
5026
0
  }
5027
0
  setup_loopfilter(cm, rb);
5028
5029
0
  if (!features->coded_lossless && seq_params->enable_cdef) {
5030
0
    setup_cdef(cm, rb);
5031
0
  }
5032
0
  if (!features->all_lossless && seq_params->enable_restoration) {
5033
0
    decode_restoration_mode(cm, rb);
5034
0
  }
5035
5036
0
  features->tx_mode = read_tx_mode(rb, features->coded_lossless);
5037
0
  current_frame->reference_mode = read_frame_reference_mode(cm, rb);
5038
5039
0
  av1_setup_skip_mode_allowed(cm);
5040
0
  current_frame->skip_mode_info.skip_mode_flag =
5041
0
      current_frame->skip_mode_info.skip_mode_allowed ? aom_rb_read_bit(rb) : 0;
5042
5043
0
  if (frame_might_allow_warped_motion(cm))
5044
0
    features->allow_warped_motion = aom_rb_read_bit(rb);
5045
0
  else
5046
0
    features->allow_warped_motion = 0;
5047
5048
0
  features->reduced_tx_set_used = aom_rb_read_bit(rb);
5049
5050
0
  if (features->allow_ref_frame_mvs && !frame_might_allow_ref_frame_mvs(cm)) {
5051
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5052
0
                       "Frame wrongly requests reference frame MVs");
5053
0
  }
5054
5055
0
  if (!frame_is_intra_only(cm)) read_global_motion(cm, rb);
5056
5057
0
  cm->cur_frame->film_grain_params_present =
5058
0
      seq_params->film_grain_params_present;
5059
0
  read_film_grain(cm, rb);
5060
5061
0
#if EXT_TILE_DEBUG
5062
0
  if (pbi->ext_tile_debug && cm->tiles.large_scale) {
5063
0
    read_ext_tile_info(pbi, rb);
5064
0
    av1_set_single_tile_decoding_mode(cm);
5065
0
  }
5066
0
#endif  // EXT_TILE_DEBUG
5067
0
  return 0;
5068
0
}
5069
5070
struct aom_read_bit_buffer *av1_init_read_bit_buffer(
5071
    AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
5072
0
    const uint8_t *data_end) {
5073
0
  rb->bit_offset = 0;
5074
0
  rb->error_handler = error_handler;
5075
0
  rb->error_handler_data = &pbi->common;
5076
0
  rb->bit_buffer = data;
5077
0
  rb->bit_buffer_end = data_end;
5078
0
  return rb;
5079
0
}
5080
5081
void av1_read_frame_size(struct aom_read_bit_buffer *rb, int num_bits_width,
5082
0
                         int num_bits_height, int *width, int *height) {
5083
0
  *width = aom_rb_read_literal(rb, num_bits_width) + 1;
5084
0
  *height = aom_rb_read_literal(rb, num_bits_height) + 1;
5085
0
}
5086
5087
0
BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
5088
0
  int profile = aom_rb_read_literal(rb, PROFILE_BITS);
5089
0
  return (BITSTREAM_PROFILE)profile;
5090
0
}
5091
5092
#if !CONFIG_REALTIME_ONLY
5093
0
static AOM_INLINE void superres_post_decode(AV1Decoder *pbi) {
5094
0
  AV1_COMMON *const cm = &pbi->common;
5095
0
  BufferPool *const pool = cm->buffer_pool;
5096
5097
0
  if (!av1_superres_scaled(cm)) return;
5098
0
  assert(!cm->features.all_lossless);
5099
5100
0
  av1_superres_upscale(cm, pool);
5101
0
}
5102
#endif
5103
5104
uint32_t av1_decode_frame_headers_and_setup(AV1Decoder *pbi,
5105
                                            struct aom_read_bit_buffer *rb,
5106
0
                                            int trailing_bits_present) {
5107
0
  AV1_COMMON *const cm = &pbi->common;
5108
0
  const int num_planes = av1_num_planes(cm);
5109
0
  MACROBLOCKD *const xd = &pbi->dcb.xd;
5110
5111
#if CONFIG_BITSTREAM_DEBUG
5112
  aom_bitstream_queue_set_frame_read(cm->current_frame.order_hint * 2 +
5113
                                     cm->show_frame);
5114
#endif
5115
#if CONFIG_MISMATCH_DEBUG
5116
  mismatch_move_frame_idx_r();
5117
#endif
5118
5119
0
  for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
5120
0
    cm->global_motion[i] = default_warp_params;
5121
0
    cm->cur_frame->global_motion[i] = default_warp_params;
5122
0
  }
5123
0
  xd->global_motion = cm->global_motion;
5124
5125
0
  read_uncompressed_header(pbi, rb);
5126
5127
0
  if (trailing_bits_present) av1_check_trailing_bits(pbi, rb);
5128
5129
0
  if (!cm->tiles.single_tile_decoding &&
5130
0
      (pbi->dec_tile_row >= 0 || pbi->dec_tile_col >= 0)) {
5131
0
    pbi->dec_tile_row = -1;
5132
0
    pbi->dec_tile_col = -1;
5133
0
  }
5134
5135
0
  const uint32_t uncomp_hdr_size =
5136
0
      (uint32_t)aom_rb_bytes_read(rb);  // Size of the uncompressed header
5137
0
  YV12_BUFFER_CONFIG *new_fb = &cm->cur_frame->buf;
5138
0
  xd->cur_buf = new_fb;
5139
0
  if (av1_allow_intrabc(cm)) {
5140
0
    av1_setup_scale_factors_for_frame(
5141
0
        &cm->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
5142
0
        xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height);
5143
0
  }
5144
5145
  // Showing a frame directly.
5146
0
  if (cm->show_existing_frame) {
5147
0
    if (pbi->reset_decoder_state) {
5148
      // Use the default frame context values.
5149
0
      *cm->fc = *cm->default_frame_context;
5150
0
      if (!cm->fc->initialized)
5151
0
        aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5152
0
                           "Uninitialized entropy context.");
5153
0
    }
5154
0
    return uncomp_hdr_size;
5155
0
  }
5156
5157
0
  cm->mi_params.setup_mi(&cm->mi_params);
5158
5159
0
  av1_calculate_ref_frame_side(cm);
5160
0
  if (cm->features.allow_ref_frame_mvs) av1_setup_motion_field(cm);
5161
5162
0
  av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
5163
0
                         cm->seq_params->subsampling_y, num_planes);
5164
0
  if (cm->features.primary_ref_frame == PRIMARY_REF_NONE) {
5165
    // use the default frame context values
5166
0
    *cm->fc = *cm->default_frame_context;
5167
0
  } else {
5168
0
    *cm->fc = get_primary_ref_frame_buf(cm)->frame_context;
5169
0
  }
5170
0
  if (!cm->fc->initialized)
5171
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5172
0
                       "Uninitialized entropy context.");
5173
5174
0
  pbi->dcb.corrupted = 0;
5175
0
  return uncomp_hdr_size;
5176
0
}
5177
5178
// Once-per-frame initialization
5179
0
static AOM_INLINE void setup_frame_info(AV1Decoder *pbi) {
5180
0
  AV1_COMMON *const cm = &pbi->common;
5181
5182
0
#if !CONFIG_REALTIME_ONLY
5183
0
  if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5184
0
      cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5185
0
      cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
5186
0
    av1_alloc_restoration_buffers(cm);
5187
0
  }
5188
0
#endif
5189
0
  const int use_highbd = cm->seq_params->use_highbitdepth;
5190
0
  const int buf_size = MC_TEMP_BUF_PELS << use_highbd;
5191
0
  if (pbi->td.mc_buf_size != buf_size) {
5192
0
    av1_free_mc_tmp_buf(&pbi->td);
5193
0
    allocate_mc_tmp_buf(cm, &pbi->td, buf_size, use_highbd);
5194
0
  }
5195
0
}
5196
5197
void av1_decode_tg_tiles_and_wrapup(AV1Decoder *pbi, const uint8_t *data,
5198
                                    const uint8_t *data_end,
5199
                                    const uint8_t **p_data_end, int start_tile,
5200
0
                                    int end_tile, int initialize_flag) {
5201
0
  AV1_COMMON *const cm = &pbi->common;
5202
0
  CommonTileParams *const tiles = &cm->tiles;
5203
0
  MACROBLOCKD *const xd = &pbi->dcb.xd;
5204
0
  const int tile_count_tg = end_tile - start_tile + 1;
5205
5206
0
  if (initialize_flag) setup_frame_info(pbi);
5207
0
  const int num_planes = av1_num_planes(cm);
5208
5209
0
  if (pbi->max_threads > 1 && !(tiles->large_scale && !pbi->ext_tile_debug) &&
5210
0
      pbi->row_mt)
5211
0
    *p_data_end =
5212
0
        decode_tiles_row_mt(pbi, data, data_end, start_tile, end_tile);
5213
0
  else if (pbi->max_threads > 1 && tile_count_tg > 1 &&
5214
0
           !(tiles->large_scale && !pbi->ext_tile_debug))
5215
0
    *p_data_end = decode_tiles_mt(pbi, data, data_end, start_tile, end_tile);
5216
0
  else
5217
0
    *p_data_end = decode_tiles(pbi, data, data_end, start_tile, end_tile);
5218
5219
  // If the bit stream is monochrome, set the U and V buffers to a constant.
5220
0
  if (num_planes < 3) {
5221
0
    set_planes_to_neutral_grey(cm->seq_params, xd->cur_buf, 1);
5222
0
  }
5223
5224
0
  if (end_tile != tiles->rows * tiles->cols - 1) {
5225
0
    return;
5226
0
  }
5227
5228
0
  av1_alloc_cdef_buffers(cm, &pbi->cdef_worker, &pbi->cdef_sync,
5229
0
                         pbi->num_workers, 1);
5230
0
  av1_alloc_cdef_sync(cm, &pbi->cdef_sync, pbi->num_workers);
5231
5232
0
  if (!cm->features.allow_intrabc && !tiles->single_tile_decoding) {
5233
0
    if (cm->lf.filter_level[0] || cm->lf.filter_level[1]) {
5234
0
      av1_loop_filter_frame_mt(&cm->cur_frame->buf, cm, &pbi->dcb.xd, 0,
5235
0
                               num_planes, 0, pbi->tile_workers,
5236
0
                               pbi->num_workers, &pbi->lf_row_sync, 0);
5237
0
    }
5238
5239
0
    const int do_cdef =
5240
0
        !pbi->skip_loop_filter && !cm->features.coded_lossless &&
5241
0
        (cm->cdef_info.cdef_bits || cm->cdef_info.cdef_strengths[0] ||
5242
0
         cm->cdef_info.cdef_uv_strengths[0]);
5243
0
    const int do_superres = av1_superres_scaled(cm);
5244
0
    const int optimized_loop_restoration = !do_cdef && !do_superres;
5245
5246
0
#if !CONFIG_REALTIME_ONLY
5247
0
    const int do_loop_restoration =
5248
0
        cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5249
0
        cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5250
0
        cm->rst_info[2].frame_restoration_type != RESTORE_NONE;
5251
0
    if (!optimized_loop_restoration) {
5252
0
      if (do_loop_restoration)
5253
0
        av1_loop_restoration_save_boundary_lines(&pbi->common.cur_frame->buf,
5254
0
                                                 cm, 0);
5255
5256
0
      if (do_cdef) {
5257
0
        if (pbi->num_workers > 1) {
5258
0
          av1_cdef_frame_mt(cm, &pbi->dcb.xd, pbi->cdef_worker,
5259
0
                            pbi->tile_workers, &pbi->cdef_sync,
5260
0
                            pbi->num_workers, av1_cdef_init_fb_row_mt);
5261
0
        } else {
5262
0
          av1_cdef_frame(&pbi->common.cur_frame->buf, cm, &pbi->dcb.xd,
5263
0
                         av1_cdef_init_fb_row);
5264
0
        }
5265
0
      }
5266
5267
0
      superres_post_decode(pbi);
5268
5269
0
      if (do_loop_restoration) {
5270
0
        av1_loop_restoration_save_boundary_lines(&pbi->common.cur_frame->buf,
5271
0
                                                 cm, 1);
5272
0
        if (pbi->num_workers > 1) {
5273
0
          av1_loop_restoration_filter_frame_mt(
5274
0
              (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, optimized_loop_restoration,
5275
0
              pbi->tile_workers, pbi->num_workers, &pbi->lr_row_sync,
5276
0
              &pbi->lr_ctxt);
5277
0
        } else {
5278
0
          av1_loop_restoration_filter_frame((YV12_BUFFER_CONFIG *)xd->cur_buf,
5279
0
                                            cm, optimized_loop_restoration,
5280
0
                                            &pbi->lr_ctxt);
5281
0
        }
5282
0
      }
5283
0
    } else {
5284
      // In no cdef and no superres case. Provide an optimized version of
5285
      // loop_restoration_filter.
5286
0
      if (do_loop_restoration) {
5287
0
        if (pbi->num_workers > 1) {
5288
0
          av1_loop_restoration_filter_frame_mt(
5289
0
              (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, optimized_loop_restoration,
5290
0
              pbi->tile_workers, pbi->num_workers, &pbi->lr_row_sync,
5291
0
              &pbi->lr_ctxt);
5292
0
        } else {
5293
0
          av1_loop_restoration_filter_frame((YV12_BUFFER_CONFIG *)xd->cur_buf,
5294
0
                                            cm, optimized_loop_restoration,
5295
0
                                            &pbi->lr_ctxt);
5296
0
        }
5297
0
      }
5298
0
    }
5299
#else
5300
    if (!optimized_loop_restoration) {
5301
      if (do_cdef) {
5302
        if (pbi->num_workers > 1) {
5303
          av1_cdef_frame_mt(cm, &pbi->dcb.xd, pbi->cdef_worker,
5304
                            pbi->tile_workers, &pbi->cdef_sync,
5305
                            pbi->num_workers, av1_cdef_init_fb_row_mt);
5306
        } else {
5307
          av1_cdef_frame(&pbi->common.cur_frame->buf, cm, &pbi->dcb.xd,
5308
                         av1_cdef_init_fb_row);
5309
        }
5310
      }
5311
    }
5312
#endif  // !CONFIG_REALTIME_ONLY
5313
0
  }
5314
5315
0
  if (!pbi->dcb.corrupted) {
5316
0
    if (cm->features.refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
5317
0
      assert(pbi->context_update_tile_id < pbi->allocated_tiles);
5318
0
      *cm->fc = pbi->tile_data[pbi->context_update_tile_id].tctx;
5319
0
      av1_reset_cdf_symbol_counters(cm->fc);
5320
0
    }
5321
0
  } else {
5322
0
    aom_internal_error(&pbi->error, AOM_CODEC_CORRUPT_FRAME,
5323
0
                       "Decode failed. Frame data is corrupted.");
5324
0
  }
5325
5326
#if CONFIG_INSPECTION
5327
  if (pbi->inspect_cb != NULL) {
5328
    (*pbi->inspect_cb)(pbi, pbi->inspect_ctx);
5329
  }
5330
#endif
5331
5332
  // Non frame parallel update frame context here.
5333
0
  if (!tiles->large_scale) {
5334
0
    cm->cur_frame->frame_context = *cm->fc;
5335
0
  }
5336
0
}