Coverage Report

Created: 2022-08-24 06:17

/src/aom/av1/encoder/encodeframe.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <limits.h>
13
#include <float.h>
14
#include <math.h>
15
#include <stdbool.h>
16
#include <stdio.h>
17
18
#include "av1/common/common_data.h"
19
#include "config/aom_config.h"
20
#include "config/aom_dsp_rtcd.h"
21
#include "config/av1_rtcd.h"
22
23
#include "aom_dsp/aom_dsp_common.h"
24
#include "aom_dsp/binary_codes_writer.h"
25
#include "aom_ports/mem.h"
26
#include "aom_ports/aom_timer.h"
27
28
#if CONFIG_MISMATCH_DEBUG
29
#include "aom_util/debug_util.h"
30
#endif  // CONFIG_MISMATCH_DEBUG
31
32
#include "av1/common/cfl.h"
33
#include "av1/common/common.h"
34
#include "av1/common/entropy.h"
35
#include "av1/common/entropymode.h"
36
#include "av1/common/idct.h"
37
#include "av1/common/mv.h"
38
#include "av1/common/mvref_common.h"
39
#include "av1/common/pred_common.h"
40
#include "av1/common/quant_common.h"
41
#include "av1/common/reconintra.h"
42
#include "av1/common/reconinter.h"
43
#include "av1/common/seg_common.h"
44
#include "av1/common/tile_common.h"
45
#include "av1/common/warped_motion.h"
46
47
#include "av1/encoder/allintra_vis.h"
48
#include "av1/encoder/aq_complexity.h"
49
#include "av1/encoder/aq_cyclicrefresh.h"
50
#include "av1/encoder/aq_variance.h"
51
#include "av1/encoder/global_motion_facade.h"
52
#include "av1/encoder/encodeframe.h"
53
#include "av1/encoder/encodeframe_utils.h"
54
#include "av1/encoder/encodemb.h"
55
#include "av1/encoder/encodemv.h"
56
#include "av1/encoder/encodetxb.h"
57
#include "av1/encoder/ethread.h"
58
#include "av1/encoder/extend.h"
59
#include "av1/encoder/intra_mode_search_utils.h"
60
#include "av1/encoder/ml.h"
61
#include "av1/encoder/motion_search_facade.h"
62
#include "av1/encoder/partition_strategy.h"
63
#if !CONFIG_REALTIME_ONLY
64
#include "av1/encoder/partition_model_weights.h"
65
#endif
66
#include "av1/encoder/partition_search.h"
67
#include "av1/encoder/rd.h"
68
#include "av1/encoder/rdopt.h"
69
#include "av1/encoder/reconinter_enc.h"
70
#include "av1/encoder/segmentation.h"
71
#include "av1/encoder/tokenize.h"
72
#include "av1/encoder/tpl_model.h"
73
#include "av1/encoder/var_based_part.h"
74
75
#if CONFIG_TUNE_VMAF
76
#include "av1/encoder/tune_vmaf.h"
77
#endif
78
79
/*!\cond */
80
// This is used as a reference when computing the source variance for the
81
//  purposes of activity masking.
82
// Eventually this should be replaced by custom no-reference routines,
83
//  which will be faster.
84
const uint8_t AV1_VAR_OFFS[MAX_SB_SIZE] = {
85
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
86
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
87
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
88
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
89
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
90
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
91
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
92
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
93
  128, 128, 128, 128, 128, 128, 128, 128
94
};
95
96
static const uint16_t AV1_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
97
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
98
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
99
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
100
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
101
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
102
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
103
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
104
  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
105
  128, 128, 128, 128, 128, 128, 128, 128
106
};
107
108
static const uint16_t AV1_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
109
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
110
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
111
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
112
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
113
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
114
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
115
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
116
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
117
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
118
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
119
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
120
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
121
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
122
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
123
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
124
  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
125
};
126
127
static const uint16_t AV1_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
128
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
129
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
130
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
131
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
132
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
133
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
134
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
135
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
136
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
137
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
138
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
139
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
140
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
141
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
142
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
143
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
144
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
145
  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
146
  128 * 16, 128 * 16
147
};
148
/*!\endcond */
149
150
unsigned int av1_get_sby_perpixel_variance(const AV1_COMP *cpi,
151
                                           const struct buf_2d *ref,
152
0
                                           BLOCK_SIZE bs) {
153
0
  unsigned int sse;
154
0
  const unsigned int var =
155
0
      cpi->ppi->fn_ptr[bs].vf(ref->buf, ref->stride, AV1_VAR_OFFS, 0, &sse);
156
0
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
157
0
}
158
159
unsigned int av1_high_get_sby_perpixel_variance(const AV1_COMP *cpi,
160
                                                const struct buf_2d *ref,
161
0
                                                BLOCK_SIZE bs, int bd) {
162
0
  unsigned int var, sse;
163
0
  assert(bd == 8 || bd == 10 || bd == 12);
164
0
  const int off_index = (bd - 8) >> 1;
165
0
  const uint16_t *high_var_offs[3] = { AV1_HIGH_VAR_OFFS_8,
166
0
                                       AV1_HIGH_VAR_OFFS_10,
167
0
                                       AV1_HIGH_VAR_OFFS_12 };
168
0
  var = cpi->ppi->fn_ptr[bs].vf(ref->buf, ref->stride,
169
0
                                CONVERT_TO_BYTEPTR(high_var_offs[off_index]), 0,
170
0
                                &sse);
171
0
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
172
0
}
173
174
void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
175
                          int mi_row, int mi_col, const int num_planes,
176
0
                          BLOCK_SIZE bsize) {
177
  // Set current frame pointer.
178
0
  x->e_mbd.cur_buf = src;
179
180
  // We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet
181
  // the static analysis warnings.
182
0
  for (int i = 0; i < AOMMIN(num_planes, MAX_MB_PLANE); i++) {
183
0
    const int is_uv = i > 0;
184
0
    setup_pred_plane(
185
0
        &x->plane[i].src, bsize, src->buffers[i], src->crop_widths[is_uv],
186
0
        src->crop_heights[is_uv], src->strides[is_uv], mi_row, mi_col, NULL,
187
0
        x->e_mbd.plane[i].subsampling_x, x->e_mbd.plane[i].subsampling_y);
188
0
  }
189
0
}
190
191
#if !CONFIG_REALTIME_ONLY
192
/*!\brief Assigns different quantization parameters to each super
193
 * block based on its TPL weight.
194
 *
195
 * \ingroup tpl_modelling
196
 *
197
 * \param[in]     cpi         Top level encoder instance structure
198
 * \param[in,out] td          Thread data structure
199
 * \param[in,out] x           Macro block level data for this block.
200
 * \param[in]     tile_info   Tile infromation / identification
201
 * \param[in]     mi_row      Block row (in "MI_SIZE" units) index
202
 * \param[in]     mi_col      Block column (in "MI_SIZE" units) index
203
 * \param[out]    num_planes  Number of image planes (e.g. Y,U,V)
204
 *
205
 * \return No return value but updates macroblock and thread data
206
 * related to the q / q delta to be used.
207
 */
208
static AOM_INLINE void setup_delta_q(AV1_COMP *const cpi, ThreadData *td,
209
                                     MACROBLOCK *const x,
210
                                     const TileInfo *const tile_info,
211
0
                                     int mi_row, int mi_col, int num_planes) {
212
0
  AV1_COMMON *const cm = &cpi->common;
213
0
  const CommonModeInfoParams *const mi_params = &cm->mi_params;
214
0
  const DeltaQInfo *const delta_q_info = &cm->delta_q_info;
215
0
  assert(delta_q_info->delta_q_present_flag);
216
217
0
  const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
218
  // Delta-q modulation based on variance
219
0
  av1_setup_src_planes(x, cpi->source, mi_row, mi_col, num_planes, sb_size);
220
221
0
  const int delta_q_res = delta_q_info->delta_q_res;
222
0
  int current_qindex = cm->quant_params.base_qindex;
223
0
  if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_PERCEPTUAL) {
224
0
    if (DELTA_Q_PERCEPTUAL_MODULATION == 1) {
225
0
      const int block_wavelet_energy_level =
226
0
          av1_block_wavelet_energy_level(cpi, x, sb_size);
227
0
      x->sb_energy_level = block_wavelet_energy_level;
228
0
      current_qindex = av1_compute_q_from_energy_level_deltaq_mode(
229
0
          cpi, block_wavelet_energy_level);
230
0
    } else {
231
0
      const int block_var_level = av1_log_block_var(cpi, x, sb_size);
232
0
      x->sb_energy_level = block_var_level;
233
0
      current_qindex =
234
0
          av1_compute_q_from_energy_level_deltaq_mode(cpi, block_var_level);
235
0
    }
236
0
  } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_OBJECTIVE &&
237
0
             cpi->oxcf.algo_cfg.enable_tpl_model) {
238
    // Setup deltaq based on tpl stats
239
0
    current_qindex =
240
0
        av1_get_q_for_deltaq_objective(cpi, td, NULL, sb_size, mi_row, mi_col);
241
0
  } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_PERCEPTUAL_AI) {
242
0
    current_qindex = av1_get_sbq_perceptual_ai(cpi, sb_size, mi_row, mi_col);
243
0
  } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_USER_RATING_BASED) {
244
0
    current_qindex = av1_get_sbq_user_rating_based(cpi, mi_row, mi_col);
245
0
  } else if (cpi->oxcf.q_cfg.enable_hdr_deltaq) {
246
0
    current_qindex = av1_get_q_for_hdr(cpi, x, sb_size, mi_row, mi_col);
247
0
  }
248
249
0
  MACROBLOCKD *const xd = &x->e_mbd;
250
0
  current_qindex = av1_adjust_q_from_delta_q_res(
251
0
      delta_q_res, xd->current_base_qindex, current_qindex);
252
253
0
  x->delta_qindex = current_qindex - cm->quant_params.base_qindex;
254
0
  av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
255
0
  xd->mi[0]->current_qindex = current_qindex;
256
0
  av1_init_plane_quantizers(cpi, x, xd->mi[0]->segment_id);
257
258
  // keep track of any non-zero delta-q used
259
0
  td->deltaq_used |= (x->delta_qindex != 0);
260
261
0
  if (cpi->oxcf.tool_cfg.enable_deltalf_mode) {
262
0
    const int delta_lf_res = delta_q_info->delta_lf_res;
263
0
    const int lfmask = ~(delta_lf_res - 1);
264
0
    const int delta_lf_from_base =
265
0
        ((x->delta_qindex / 4 + delta_lf_res / 2) & lfmask);
266
0
    const int8_t delta_lf =
267
0
        (int8_t)clamp(delta_lf_from_base, -MAX_LOOP_FILTER, MAX_LOOP_FILTER);
268
0
    const int frame_lf_count =
269
0
        av1_num_planes(cm) > 1 ? FRAME_LF_COUNT : FRAME_LF_COUNT - 2;
270
0
    const int mib_size = cm->seq_params->mib_size;
271
272
    // pre-set the delta lf for loop filter. Note that this value is set
273
    // before mi is assigned for each block in current superblock
274
0
    for (int j = 0; j < AOMMIN(mib_size, mi_params->mi_rows - mi_row); j++) {
275
0
      for (int k = 0; k < AOMMIN(mib_size, mi_params->mi_cols - mi_col); k++) {
276
0
        const int grid_idx = get_mi_grid_idx(mi_params, mi_row + j, mi_col + k);
277
0
        mi_params->mi_alloc[grid_idx].delta_lf_from_base = delta_lf;
278
0
        for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id) {
279
0
          mi_params->mi_alloc[grid_idx].delta_lf[lf_id] = delta_lf;
280
0
        }
281
0
      }
282
0
    }
283
0
  }
284
0
}
285
286
static void init_ref_frame_space(AV1_COMP *cpi, ThreadData *td, int mi_row,
287
0
                                 int mi_col) {
288
0
  const AV1_COMMON *cm = &cpi->common;
289
0
  const GF_GROUP *const gf_group = &cpi->ppi->gf_group;
290
0
  const CommonModeInfoParams *const mi_params = &cm->mi_params;
291
0
  MACROBLOCK *x = &td->mb;
292
0
  const int frame_idx = cpi->gf_frame_index;
293
0
  TplParams *const tpl_data = &cpi->ppi->tpl_data;
294
0
  const uint8_t block_mis_log2 = tpl_data->tpl_stats_block_mis_log2;
295
296
0
  av1_zero(x->tpl_keep_ref_frame);
297
298
0
  if (!av1_tpl_stats_ready(tpl_data, frame_idx)) return;
299
0
  if (!is_frame_tpl_eligible(gf_group, cpi->gf_frame_index)) return;
300
0
  if (cpi->oxcf.q_cfg.aq_mode != NO_AQ) return;
301
302
0
  const int is_overlay =
303
0
      cpi->ppi->gf_group.update_type[frame_idx] == OVERLAY_UPDATE;
304
0
  if (is_overlay) {
305
0
    memset(x->tpl_keep_ref_frame, 1, sizeof(x->tpl_keep_ref_frame));
306
0
    return;
307
0
  }
308
309
0
  TplDepFrame *tpl_frame = &tpl_data->tpl_frame[frame_idx];
310
0
  TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
311
0
  const int tpl_stride = tpl_frame->stride;
312
0
  int64_t inter_cost[INTER_REFS_PER_FRAME] = { 0 };
313
0
  const int step = 1 << block_mis_log2;
314
0
  const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
315
316
0
  const int mi_row_end =
317
0
      AOMMIN(mi_size_high[sb_size] + mi_row, mi_params->mi_rows);
318
0
  const int mi_cols_sr = av1_pixels_to_mi(cm->superres_upscaled_width);
319
0
  const int mi_col_sr =
320
0
      coded_to_superres_mi(mi_col, cm->superres_scale_denominator);
321
0
  const int mi_col_end_sr =
322
0
      AOMMIN(coded_to_superres_mi(mi_col + mi_size_wide[sb_size],
323
0
                                  cm->superres_scale_denominator),
324
0
             mi_cols_sr);
325
0
  const int row_step = step;
326
0
  const int col_step_sr =
327
0
      coded_to_superres_mi(step, cm->superres_scale_denominator);
328
0
  for (int row = mi_row; row < mi_row_end; row += row_step) {
329
0
    for (int col = mi_col_sr; col < mi_col_end_sr; col += col_step_sr) {
330
0
      const TplDepStats *this_stats =
331
0
          &tpl_stats[av1_tpl_ptr_pos(row, col, tpl_stride, block_mis_log2)];
332
0
      int64_t tpl_pred_error[INTER_REFS_PER_FRAME] = { 0 };
333
      // Find the winner ref frame idx for the current block
334
0
      int64_t best_inter_cost = this_stats->pred_error[0];
335
0
      int best_rf_idx = 0;
336
0
      for (int idx = 1; idx < INTER_REFS_PER_FRAME; ++idx) {
337
0
        if ((this_stats->pred_error[idx] < best_inter_cost) &&
338
0
            (this_stats->pred_error[idx] != 0)) {
339
0
          best_inter_cost = this_stats->pred_error[idx];
340
0
          best_rf_idx = idx;
341
0
        }
342
0
      }
343
      // tpl_pred_error is the pred_error reduction of best_ref w.r.t.
344
      // LAST_FRAME.
345
0
      tpl_pred_error[best_rf_idx] = this_stats->pred_error[best_rf_idx] -
346
0
                                    this_stats->pred_error[LAST_FRAME - 1];
347
348
0
      for (int rf_idx = 1; rf_idx < INTER_REFS_PER_FRAME; ++rf_idx)
349
0
        inter_cost[rf_idx] += tpl_pred_error[rf_idx];
350
0
    }
351
0
  }
352
353
0
  int rank_index[INTER_REFS_PER_FRAME - 1];
354
0
  for (int idx = 0; idx < INTER_REFS_PER_FRAME - 1; ++idx) {
355
0
    rank_index[idx] = idx + 1;
356
0
    for (int i = idx; i > 0; --i) {
357
0
      if (inter_cost[rank_index[i - 1]] > inter_cost[rank_index[i]]) {
358
0
        const int tmp = rank_index[i - 1];
359
0
        rank_index[i - 1] = rank_index[i];
360
0
        rank_index[i] = tmp;
361
0
      }
362
0
    }
363
0
  }
364
365
0
  x->tpl_keep_ref_frame[INTRA_FRAME] = 1;
366
0
  x->tpl_keep_ref_frame[LAST_FRAME] = 1;
367
368
0
  int cutoff_ref = 0;
369
0
  for (int idx = 0; idx < INTER_REFS_PER_FRAME - 1; ++idx) {
370
0
    x->tpl_keep_ref_frame[rank_index[idx] + LAST_FRAME] = 1;
371
0
    if (idx > 2) {
372
0
      if (!cutoff_ref) {
373
        // If the predictive coding gains are smaller than the previous more
374
        // relevant frame over certain amount, discard this frame and all the
375
        // frames afterwards.
376
0
        if (llabs(inter_cost[rank_index[idx]]) <
377
0
                llabs(inter_cost[rank_index[idx - 1]]) / 8 ||
378
0
            inter_cost[rank_index[idx]] == 0)
379
0
          cutoff_ref = 1;
380
0
      }
381
382
0
      if (cutoff_ref) x->tpl_keep_ref_frame[rank_index[idx] + LAST_FRAME] = 0;
383
0
    }
384
0
  }
385
0
}
386
387
static AOM_INLINE void adjust_rdmult_tpl_model(AV1_COMP *cpi, MACROBLOCK *x,
388
0
                                               int mi_row, int mi_col) {
389
0
  const BLOCK_SIZE sb_size = cpi->common.seq_params->sb_size;
390
0
  const int orig_rdmult = cpi->rd.RDMULT;
391
392
0
  assert(IMPLIES(cpi->ppi->gf_group.size > 0,
393
0
                 cpi->gf_frame_index < cpi->ppi->gf_group.size));
394
0
  const int gf_group_index = cpi->gf_frame_index;
395
0
  if (cpi->oxcf.algo_cfg.enable_tpl_model && cpi->oxcf.q_cfg.aq_mode == NO_AQ &&
396
0
      cpi->oxcf.q_cfg.deltaq_mode == NO_DELTA_Q && gf_group_index > 0 &&
397
0
      cpi->ppi->gf_group.update_type[gf_group_index] == ARF_UPDATE) {
398
0
    const int dr =
399
0
        av1_get_rdmult_delta(cpi, sb_size, mi_row, mi_col, orig_rdmult);
400
0
    x->rdmult = dr;
401
0
  }
402
0
}
403
#endif  // !CONFIG_REALTIME_ONLY
404
405
#if CONFIG_RT_ML_PARTITIONING
406
// Get a prediction(stored in x->est_pred) for the whole superblock.
407
static void get_estimated_pred(AV1_COMP *cpi, const TileInfo *const tile,
408
                               MACROBLOCK *x, int mi_row, int mi_col) {
409
  AV1_COMMON *const cm = &cpi->common;
410
  const int is_key_frame = frame_is_intra_only(cm);
411
  MACROBLOCKD *xd = &x->e_mbd;
412
413
  // TODO(kyslov) Extend to 128x128
414
  assert(cm->seq_params->sb_size == BLOCK_64X64);
415
416
  av1_set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
417
418
  if (!is_key_frame) {
419
    MB_MODE_INFO *mi = xd->mi[0];
420
    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, LAST_FRAME);
421
422
    assert(yv12 != NULL);
423
424
    av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
425
                         get_ref_scale_factors(cm, LAST_FRAME), 1);
426
    mi->ref_frame[0] = LAST_FRAME;
427
    mi->ref_frame[1] = NONE;
428
    mi->bsize = BLOCK_64X64;
429
    mi->mv[0].as_int = 0;
430
    mi->interp_filters = av1_broadcast_interp_filter(BILINEAR);
431
432
    set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
433
434
    xd->plane[0].dst.buf = x->est_pred;
435
    xd->plane[0].dst.stride = 64;
436
    av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
437
  } else {
438
#if CONFIG_AV1_HIGHBITDEPTH
439
    switch (xd->bd) {
440
      case 8: memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0])); break;
441
      case 10:
442
        memset(x->est_pred, 128 * 4, 64 * 64 * sizeof(x->est_pred[0]));
443
        break;
444
      case 12:
445
        memset(x->est_pred, 128 * 16, 64 * 64 * sizeof(x->est_pred[0]));
446
        break;
447
    }
448
#else
449
    memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0]));
450
#endif  // CONFIG_VP9_HIGHBITDEPTH
451
  }
452
}
453
#endif  // CONFIG_RT_ML_PARTITIONING
454
455
0
#define AVG_CDF_WEIGHT_LEFT 3
456
0
#define AVG_CDF_WEIGHT_TOP_RIGHT 1
457
458
/*!\brief Encode a superblock (minimal RD search involved)
459
 *
460
 * \ingroup partition_search
461
 * Encodes the superblock by a pre-determined partition pattern, only minor
462
 * rd-based searches are allowed to adjust the initial pattern. It is only used
463
 * by realtime encoding.
464
 */
465
static AOM_INLINE void encode_nonrd_sb(AV1_COMP *cpi, ThreadData *td,
466
                                       TileDataEnc *tile_data, TokenExtra **tp,
467
                                       const int mi_row, const int mi_col,
468
0
                                       const int seg_skip) {
469
0
  AV1_COMMON *const cm = &cpi->common;
470
0
  MACROBLOCK *const x = &td->mb;
471
0
  const SPEED_FEATURES *const sf = &cpi->sf;
472
0
  const TileInfo *const tile_info = &tile_data->tile_info;
473
0
  MB_MODE_INFO **mi = cm->mi_params.mi_grid_base +
474
0
                      get_mi_grid_idx(&cm->mi_params, mi_row, mi_col);
475
0
  const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
476
477
  // Grade the temporal variation of the sb, the grade will be used to decide
478
  // fast mode search strategy for coding blocks
479
0
  if (sf->rt_sf.source_metrics_sb_nonrd &&
480
0
      cpi->svc.number_spatial_layers <= 1 &&
481
0
      cm->current_frame.frame_type != KEY_FRAME) {
482
0
    int offset = cpi->source->y_stride * (mi_row << 2) + (mi_col << 2);
483
0
    av1_source_content_sb(cpi, x, offset);
484
0
  }
485
#if CONFIG_RT_ML_PARTITIONING
486
  if (sf->part_sf.partition_search_type == ML_BASED_PARTITION) {
487
    PC_TREE *const pc_root = av1_alloc_pc_tree_node(sb_size);
488
    RD_STATS dummy_rdc;
489
    get_estimated_pred(cpi, tile_info, x, mi_row, mi_col);
490
    av1_nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
491
                             BLOCK_64X64, &dummy_rdc, 1, INT64_MAX, pc_root);
492
    av1_free_pc_tree_recursive(pc_root, av1_num_planes(cm), 0, 0);
493
    return;
494
  }
495
#endif
496
  // Set the partition
497
0
  if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip) {
498
    // set a fixed-size partition
499
0
    av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
500
0
    const BLOCK_SIZE bsize =
501
0
        seg_skip ? sb_size : sf->part_sf.fixed_partition_size;
502
0
    av1_set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
503
0
  } else if (sf->part_sf.partition_search_type == VAR_BASED_PARTITION) {
504
    // set a variance-based partition
505
0
    av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
506
0
    av1_choose_var_based_partitioning(cpi, tile_info, td, x, mi_row, mi_col);
507
0
  }
508
0
  assert(sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||
509
0
         sf->part_sf.partition_search_type == VAR_BASED_PARTITION);
510
0
  set_cb_offsets(td->mb.cb_offset, 0, 0);
511
512
  // Adjust and encode the superblock
513
0
  PC_TREE *const pc_root = av1_alloc_pc_tree_node(sb_size);
514
515
  // Initialize the flag to skip cdef to 1.
516
0
  if (sf->rt_sf.skip_cdef_sb) {
517
    // If 128x128 block is used, we need to set the flag for all 4 64x64 sub
518
    // "blocks".
519
0
    const int block64_in_sb = (sb_size == BLOCK_128X128) ? 2 : 1;
520
0
    for (int r = 0; r < block64_in_sb; ++r) {
521
0
      for (int c = 0; c < block64_in_sb; ++c) {
522
0
        const int idx_in_sb =
523
0
            r * MI_SIZE_64X64 * cm->mi_params.mi_stride + c * MI_SIZE_64X64;
524
0
        if (mi[idx_in_sb]) mi[idx_in_sb]->skip_cdef_curr_sb = 1;
525
0
      }
526
0
    }
527
0
  }
528
529
0
  av1_nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
530
0
                          pc_root);
531
532
0
  if (sf->rt_sf.skip_cdef_sb) {
533
    // If 128x128 block is used, we need to set the flag for all 4 64x64 sub
534
    // "blocks".
535
0
    const int block64_in_sb = (sb_size == BLOCK_128X128) ? 2 : 1;
536
0
    const int skip = mi[0]->skip_cdef_curr_sb;
537
0
    for (int r = 0; r < block64_in_sb; ++r) {
538
0
      for (int c = 0; c < block64_in_sb; ++c) {
539
0
        const int idx_in_sb =
540
0
            r * MI_SIZE_64X64 * cm->mi_params.mi_stride + c * MI_SIZE_64X64;
541
0
        if (mi[idx_in_sb]) mi[idx_in_sb]->skip_cdef_curr_sb = skip;
542
0
      }
543
0
    }
544
0
  }
545
0
  av1_free_pc_tree_recursive(pc_root, av1_num_planes(cm), 0, 0);
546
0
}
547
548
// This function initializes the stats for encode_rd_sb.
549
static INLINE void init_encode_rd_sb(AV1_COMP *cpi, ThreadData *td,
550
                                     const TileDataEnc *tile_data,
551
                                     SIMPLE_MOTION_DATA_TREE *sms_root,
552
                                     RD_STATS *rd_cost, int mi_row, int mi_col,
553
0
                                     int gather_tpl_data) {
554
0
  const AV1_COMMON *cm = &cpi->common;
555
0
  const TileInfo *tile_info = &tile_data->tile_info;
556
0
  MACROBLOCK *x = &td->mb;
557
558
0
  const SPEED_FEATURES *sf = &cpi->sf;
559
0
  const int use_simple_motion_search =
560
0
      (sf->part_sf.simple_motion_search_split ||
561
0
       sf->part_sf.simple_motion_search_prune_rect ||
562
0
       sf->part_sf.simple_motion_search_early_term_none ||
563
0
       sf->part_sf.ml_early_term_after_part_split_level) &&
564
0
      !frame_is_intra_only(cm);
565
0
  if (use_simple_motion_search) {
566
0
    av1_init_simple_motion_search_mvs_for_sb(cpi, tile_info, x, sms_root,
567
0
                                             mi_row, mi_col);
568
0
  }
569
570
0
#if !CONFIG_REALTIME_ONLY
571
0
  if (!(has_no_stats_stage(cpi) && cpi->oxcf.mode == REALTIME &&
572
0
        cpi->oxcf.gf_cfg.lag_in_frames == 0)) {
573
0
    init_ref_frame_space(cpi, td, mi_row, mi_col);
574
0
    x->sb_energy_level = 0;
575
0
    x->part_search_info.cnn_output_valid = 0;
576
0
    if (gather_tpl_data) {
577
0
      if (cm->delta_q_info.delta_q_present_flag) {
578
0
        const int num_planes = av1_num_planes(cm);
579
0
        const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
580
0
        setup_delta_q(cpi, td, x, tile_info, mi_row, mi_col, num_planes);
581
0
        av1_tpl_rdmult_setup_sb(cpi, x, sb_size, mi_row, mi_col);
582
0
      }
583
584
      // TODO(jingning): revisit this function.
585
0
      if (cpi->oxcf.algo_cfg.enable_tpl_model && 0) {
586
0
        adjust_rdmult_tpl_model(cpi, x, mi_row, mi_col);
587
0
      }
588
0
    }
589
0
  }
590
#else
591
  (void)tile_info;
592
  (void)mi_row;
593
  (void)mi_col;
594
  (void)gather_tpl_data;
595
#endif
596
597
0
  reset_mb_rd_record(x->txfm_search_info.mb_rd_record);
598
0
  av1_zero(x->picked_ref_frames_mask);
599
0
  av1_invalid_rd_stats(rd_cost);
600
0
}
601
602
/*!\brief Encode a superblock (RD-search-based)
603
 *
604
 * \ingroup partition_search
605
 * Conducts partition search for a superblock, based on rate-distortion costs,
606
 * from scratch or adjusting from a pre-calculated partition pattern.
607
 */
608
static AOM_INLINE void encode_rd_sb(AV1_COMP *cpi, ThreadData *td,
609
                                    TileDataEnc *tile_data, TokenExtra **tp,
610
                                    const int mi_row, const int mi_col,
611
0
                                    const int seg_skip) {
612
0
  AV1_COMMON *const cm = &cpi->common;
613
0
  MACROBLOCK *const x = &td->mb;
614
0
  const SPEED_FEATURES *const sf = &cpi->sf;
615
0
  const TileInfo *const tile_info = &tile_data->tile_info;
616
0
  MB_MODE_INFO **mi = cm->mi_params.mi_grid_base +
617
0
                      get_mi_grid_idx(&cm->mi_params, mi_row, mi_col);
618
0
  const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
619
0
  const int num_planes = av1_num_planes(cm);
620
0
  int dummy_rate;
621
0
  int64_t dummy_dist;
622
0
  RD_STATS dummy_rdc;
623
0
  SIMPLE_MOTION_DATA_TREE *const sms_root = td->sms_root;
624
625
#if CONFIG_REALTIME_ONLY
626
  (void)seg_skip;
627
#endif  // CONFIG_REALTIME_ONLY
628
629
0
  init_encode_rd_sb(cpi, td, tile_data, sms_root, &dummy_rdc, mi_row, mi_col,
630
0
                    1);
631
632
  // Encode the superblock
633
0
  if (sf->part_sf.partition_search_type == VAR_BASED_PARTITION) {
634
#if CONFIG_COLLECT_COMPONENT_TIMING
635
    start_timing(cpi, rd_use_partition_time);
636
#endif
637
    // partition search starting from a variance-based partition
638
0
    av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
639
0
    av1_choose_var_based_partitioning(cpi, tile_info, td, x, mi_row, mi_col);
640
0
    PC_TREE *const pc_root = av1_alloc_pc_tree_node(sb_size);
641
0
    av1_rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
642
0
                         &dummy_rate, &dummy_dist, 1, pc_root);
643
0
    av1_free_pc_tree_recursive(pc_root, num_planes, 0, 0);
644
#if CONFIG_COLLECT_COMPONENT_TIMING
645
    end_timing(cpi, rd_use_partition_time);
646
#endif
647
0
  }
648
0
#if !CONFIG_REALTIME_ONLY
649
0
  else if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip) {
650
    // partition search by adjusting a fixed-size partition
651
0
    av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
652
0
    const BLOCK_SIZE bsize =
653
0
        seg_skip ? sb_size : sf->part_sf.fixed_partition_size;
654
0
    av1_set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
655
0
    PC_TREE *const pc_root = av1_alloc_pc_tree_node(sb_size);
656
0
    av1_rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
657
0
                         &dummy_rate, &dummy_dist, 1, pc_root);
658
0
    av1_free_pc_tree_recursive(pc_root, num_planes, 0, 0);
659
0
  } else {
660
    // The most exhaustive recursive partition search
661
0
    SuperBlockEnc *sb_enc = &x->sb_enc;
662
    // No stats for overlay frames. Exclude key frame.
663
0
    av1_get_tpl_stats_sb(cpi, sb_size, mi_row, mi_col, sb_enc);
664
665
    // Reset the tree for simple motion search data
666
0
    av1_reset_simple_motion_tree_partition(sms_root, sb_size);
667
668
#if CONFIG_COLLECT_COMPONENT_TIMING
669
    start_timing(cpi, rd_pick_partition_time);
670
#endif
671
672
    // Estimate the maximum square partition block size, which will be used
673
    // as the starting block size for partitioning the sb
674
0
    set_max_min_partition_size(sb_enc, cpi, x, sf, sb_size, mi_row, mi_col);
675
676
    // The superblock can be searched only once, or twice consecutively for
677
    // better quality. Note that the meaning of passes here is different from
678
    // the general concept of 1-pass/2-pass encoders.
679
0
    const int num_passes =
680
0
        cpi->oxcf.unit_test_cfg.sb_multipass_unit_test ? 2 : 1;
681
682
0
    if (num_passes == 1) {
683
#if CONFIG_PARTITION_SEARCH_ORDER
684
      if (cpi->ext_part_controller.ready && !frame_is_intra_only(cm)) {
685
        av1_reset_part_sf(&cpi->sf.part_sf);
686
        av1_reset_sf_for_ext_part(cpi);
687
        RD_STATS this_rdc;
688
        av1_rd_partition_search(cpi, td, tile_data, tp, sms_root, mi_row,
689
                                mi_col, sb_size, &this_rdc);
690
      } else {
691
        PC_TREE *const pc_root = av1_alloc_pc_tree_node(sb_size);
692
        av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
693
                              &dummy_rdc, dummy_rdc, pc_root, sms_root, NULL,
694
                              SB_SINGLE_PASS, NULL);
695
      }
696
#else
697
0
      PC_TREE *const pc_root = av1_alloc_pc_tree_node(sb_size);
698
0
      av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
699
0
                            &dummy_rdc, dummy_rdc, pc_root, sms_root, NULL,
700
0
                            SB_SINGLE_PASS, NULL);
701
0
#endif  // CONFIG_PARTITION_SEARCH_ORDER
702
0
    } else {
703
      // First pass
704
0
      SB_FIRST_PASS_STATS sb_fp_stats;
705
0
      av1_backup_sb_state(&sb_fp_stats, cpi, td, tile_data, mi_row, mi_col);
706
0
      PC_TREE *const pc_root_p0 = av1_alloc_pc_tree_node(sb_size);
707
0
      av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
708
0
                            &dummy_rdc, dummy_rdc, pc_root_p0, sms_root, NULL,
709
0
                            SB_DRY_PASS, NULL);
710
711
      // Second pass
712
0
      init_encode_rd_sb(cpi, td, tile_data, sms_root, &dummy_rdc, mi_row,
713
0
                        mi_col, 0);
714
0
      av1_reset_mbmi(&cm->mi_params, sb_size, mi_row, mi_col);
715
0
      av1_reset_simple_motion_tree_partition(sms_root, sb_size);
716
717
0
      av1_restore_sb_state(&sb_fp_stats, cpi, td, tile_data, mi_row, mi_col);
718
719
0
      PC_TREE *const pc_root_p1 = av1_alloc_pc_tree_node(sb_size);
720
0
      av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
721
0
                            &dummy_rdc, dummy_rdc, pc_root_p1, sms_root, NULL,
722
0
                            SB_WET_PASS, NULL);
723
0
    }
724
    // Reset to 0 so that it wouldn't be used elsewhere mistakenly.
725
0
    sb_enc->tpl_data_count = 0;
726
#if CONFIG_COLLECT_COMPONENT_TIMING
727
    end_timing(cpi, rd_pick_partition_time);
728
#endif
729
0
  }
730
0
#endif  // !CONFIG_REALTIME_ONLY
731
732
  // Update the inter rd model
733
  // TODO(angiebird): Let inter_mode_rd_model_estimation support multi-tile.
734
0
  if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1 &&
735
0
      cm->tiles.cols == 1 && cm->tiles.rows == 1) {
736
0
    av1_inter_mode_data_fit(tile_data, x->rdmult);
737
0
  }
738
0
}
739
740
// Check if the cost update of symbols mode, coeff and dv are tile or off.
741
static AOM_INLINE int is_mode_coeff_dv_upd_freq_tile_or_off(
742
0
    const AV1_COMP *const cpi) {
743
0
  const INTER_MODE_SPEED_FEATURES *const inter_sf = &cpi->sf.inter_sf;
744
745
0
  return (inter_sf->coeff_cost_upd_level <= INTERNAL_COST_UPD_TILE &&
746
0
          inter_sf->mode_cost_upd_level <= INTERNAL_COST_UPD_TILE &&
747
0
          cpi->sf.intra_sf.dv_cost_upd_level <= INTERNAL_COST_UPD_TILE);
748
0
}
749
750
// When row-mt is enabled and cost update frequencies are set to off/tile,
751
// processing of current SB can start even before processing of top-right SB
752
// is finished. This function checks if it is sufficient to wait for top SB
753
// to finish processing before current SB starts processing.
754
0
static AOM_INLINE int delay_wait_for_top_right_sb(const AV1_COMP *const cpi) {
755
0
  const MODE mode = cpi->oxcf.mode;
756
0
  if (mode == GOOD) return 0;
757
758
0
  if (mode == ALLINTRA)
759
0
    return is_mode_coeff_dv_upd_freq_tile_or_off(cpi);
760
0
  else if (mode == REALTIME)
761
0
    return (is_mode_coeff_dv_upd_freq_tile_or_off(cpi) &&
762
0
            cpi->sf.inter_sf.mv_cost_upd_level <= INTERNAL_COST_UPD_TILE);
763
0
  else
764
0
    return 0;
765
0
}
766
767
/*!\brief Encode a superblock row by breaking it into superblocks
768
 *
769
 * \ingroup partition_search
770
 * \callgraph
771
 * \callergraph
772
 * Do partition and mode search for an sb row: one row of superblocks filling up
773
 * the width of the current tile.
774
 */
775
static AOM_INLINE void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
776
                                     TileDataEnc *tile_data, int mi_row,
777
0
                                     TokenExtra **tp) {
778
0
  AV1_COMMON *const cm = &cpi->common;
779
0
  const TileInfo *const tile_info = &tile_data->tile_info;
780
0
  MultiThreadInfo *const mt_info = &cpi->mt_info;
781
0
  AV1EncRowMultiThreadInfo *const enc_row_mt = &mt_info->enc_row_mt;
782
0
  AV1EncRowMultiThreadSync *const row_mt_sync = &tile_data->row_mt_sync;
783
0
  bool row_mt_enabled = mt_info->row_mt_enabled;
784
0
  MACROBLOCK *const x = &td->mb;
785
0
  MACROBLOCKD *const xd = &x->e_mbd;
786
0
  const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_data->tile_info);
787
0
  const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
788
0
  const int mib_size = cm->seq_params->mib_size;
789
0
  const int mib_size_log2 = cm->seq_params->mib_size_log2;
790
0
  const int sb_row = (mi_row - tile_info->mi_row_start) >> mib_size_log2;
791
0
  const int use_nonrd_mode = cpi->sf.rt_sf.use_nonrd_pick_mode;
792
793
#if CONFIG_COLLECT_COMPONENT_TIMING
794
  start_timing(cpi, encode_sb_row_time);
795
#endif
796
797
  // Initialize the left context for the new SB row
798
0
  av1_zero_left_context(xd);
799
800
  // Reset delta for quantizer and loof filters at the beginning of every tile
801
0
  if (mi_row == tile_info->mi_row_start || row_mt_enabled) {
802
0
    if (cm->delta_q_info.delta_q_present_flag)
803
0
      xd->current_base_qindex = cm->quant_params.base_qindex;
804
0
    if (cm->delta_q_info.delta_lf_present_flag) {
805
0
      av1_reset_loop_filter_delta(xd, av1_num_planes(cm));
806
0
    }
807
0
  }
808
809
0
  reset_thresh_freq_fact(x);
810
811
  // Code each SB in the row
812
0
  for (int mi_col = tile_info->mi_col_start, sb_col_in_tile = 0;
813
0
       mi_col < tile_info->mi_col_end; mi_col += mib_size, sb_col_in_tile++) {
814
    // In realtime/allintra mode and when frequency of cost updates is off/tile,
815
    // wait for the top superblock to finish encoding. Otherwise, wait for the
816
    // top-right superblock to finish encoding.
817
0
    (*(enc_row_mt->sync_read_ptr))(
818
0
        row_mt_sync, sb_row, sb_col_in_tile - delay_wait_for_top_right_sb(cpi));
819
0
    const int update_cdf = tile_data->allow_update_cdf && row_mt_enabled;
820
0
    if (update_cdf && (tile_info->mi_row_start != mi_row)) {
821
0
      if ((tile_info->mi_col_start == mi_col)) {
822
        // restore frame context at the 1st column sb
823
0
        memcpy(xd->tile_ctx, x->row_ctx, sizeof(*xd->tile_ctx));
824
0
      } else {
825
        // update context
826
0
        int wt_left = AVG_CDF_WEIGHT_LEFT;
827
0
        int wt_tr = AVG_CDF_WEIGHT_TOP_RIGHT;
828
0
        if (tile_info->mi_col_end > (mi_col + mib_size))
829
0
          av1_avg_cdf_symbols(xd->tile_ctx, x->row_ctx + sb_col_in_tile,
830
0
                              wt_left, wt_tr);
831
0
        else
832
0
          av1_avg_cdf_symbols(xd->tile_ctx, x->row_ctx + sb_col_in_tile - 1,
833
0
                              wt_left, wt_tr);
834
0
      }
835
0
    }
836
837
    // Update the rate cost tables for some symbols
838
0
    av1_set_cost_upd_freq(cpi, td, tile_info, mi_row, mi_col);
839
840
    // Reset color coding related parameters
841
0
    x->color_sensitivity_sb[0] = 0;
842
0
    x->color_sensitivity_sb[1] = 0;
843
0
    x->color_sensitivity[0] = 0;
844
0
    x->color_sensitivity[1] = 0;
845
0
    x->content_state_sb.source_sad = kMedSad;
846
0
    x->content_state_sb.lighting_change = 0;
847
0
    x->content_state_sb.low_sumdiff = 0;
848
849
0
    xd->cur_frame_force_integer_mv = cm->features.cur_frame_force_integer_mv;
850
0
    x->source_variance = UINT_MAX;
851
0
    td->mb.cb_coef_buff = av1_get_cb_coeff_buffer(cpi, mi_row, mi_col);
852
853
    // Get segment id and skip flag
854
0
    const struct segmentation *const seg = &cm->seg;
855
0
    int seg_skip = 0;
856
0
    if (seg->enabled) {
857
0
      const uint8_t *const map =
858
0
          seg->update_map ? cpi->enc_seg.map : cm->last_frame_seg_map;
859
0
      const int segment_id =
860
0
          map ? get_segment_id(&cm->mi_params, map, sb_size, mi_row, mi_col)
861
0
              : 0;
862
0
      seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
863
0
    }
864
865
0
    produce_gradients_for_sb(cpi, x, sb_size, mi_row, mi_col);
866
867
    // encode the superblock
868
0
    if (use_nonrd_mode) {
869
0
      encode_nonrd_sb(cpi, td, tile_data, tp, mi_row, mi_col, seg_skip);
870
0
    } else {
871
0
      encode_rd_sb(cpi, td, tile_data, tp, mi_row, mi_col, seg_skip);
872
0
    }
873
874
    // Update the top-right context in row_mt coding
875
0
    if (update_cdf && (tile_info->mi_row_end > (mi_row + mib_size))) {
876
0
      if (sb_cols_in_tile == 1)
877
0
        memcpy(x->row_ctx, xd->tile_ctx, sizeof(*xd->tile_ctx));
878
0
      else if (sb_col_in_tile >= 1)
879
0
        memcpy(x->row_ctx + sb_col_in_tile - 1, xd->tile_ctx,
880
0
               sizeof(*xd->tile_ctx));
881
0
    }
882
0
    (*(enc_row_mt->sync_write_ptr))(row_mt_sync, sb_row, sb_col_in_tile,
883
0
                                    sb_cols_in_tile);
884
0
  }
885
#if CONFIG_COLLECT_COMPONENT_TIMING
886
  end_timing(cpi, encode_sb_row_time);
887
#endif
888
0
}
889
890
0
static AOM_INLINE void init_encode_frame_mb_context(AV1_COMP *cpi) {
891
0
  AV1_COMMON *const cm = &cpi->common;
892
0
  const int num_planes = av1_num_planes(cm);
893
0
  MACROBLOCK *const x = &cpi->td.mb;
894
0
  MACROBLOCKD *const xd = &x->e_mbd;
895
896
  // Copy data over into macro block data structures.
897
0
  av1_setup_src_planes(x, cpi->source, 0, 0, num_planes,
898
0
                       cm->seq_params->sb_size);
899
900
0
  av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
901
0
                         cm->seq_params->subsampling_y, num_planes);
902
0
}
903
904
0
void av1_alloc_tile_data(AV1_COMP *cpi) {
905
0
  AV1_COMMON *const cm = &cpi->common;
906
0
  const int tile_cols = cm->tiles.cols;
907
0
  const int tile_rows = cm->tiles.rows;
908
909
0
  if (cpi->tile_data != NULL) aom_free(cpi->tile_data);
910
0
  CHECK_MEM_ERROR(
911
0
      cm, cpi->tile_data,
912
0
      aom_memalign(32, tile_cols * tile_rows * sizeof(*cpi->tile_data)));
913
914
0
  cpi->allocated_tiles = tile_cols * tile_rows;
915
0
}
916
917
0
void av1_init_tile_data(AV1_COMP *cpi) {
918
0
  AV1_COMMON *const cm = &cpi->common;
919
0
  const int num_planes = av1_num_planes(cm);
920
0
  const int tile_cols = cm->tiles.cols;
921
0
  const int tile_rows = cm->tiles.rows;
922
0
  int tile_col, tile_row;
923
0
  TokenInfo *const token_info = &cpi->token_info;
924
0
  TokenExtra *pre_tok = token_info->tile_tok[0][0];
925
0
  TokenList *tplist = token_info->tplist[0][0];
926
0
  unsigned int tile_tok = 0;
927
0
  int tplist_count = 0;
928
929
0
  if (!is_stat_generation_stage(cpi) &&
930
0
      cm->features.allow_screen_content_tools) {
931
    // Number of tokens for which token info needs to be allocated.
932
0
    unsigned int tokens_required =
933
0
        get_token_alloc(cm->mi_params.mb_rows, cm->mi_params.mb_cols,
934
0
                        MAX_SB_SIZE_LOG2, num_planes);
935
    // Allocate/reallocate memory for token related info if the number of tokens
936
    // required is more than the number of tokens already allocated. This could
937
    // occur in case of the following:
938
    // 1) If the memory is not yet allocated
939
    // 2) If the frame dimensions have changed
940
0
    const bool realloc_tokens = tokens_required > token_info->tokens_allocated;
941
0
    if (realloc_tokens) {
942
0
      free_token_info(token_info);
943
0
      alloc_token_info(cm, token_info, tokens_required);
944
0
      pre_tok = token_info->tile_tok[0][0];
945
0
      tplist = token_info->tplist[0][0];
946
0
    }
947
0
  }
948
949
0
  for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
950
0
    for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
951
0
      TileDataEnc *const tile_data =
952
0
          &cpi->tile_data[tile_row * tile_cols + tile_col];
953
0
      TileInfo *const tile_info = &tile_data->tile_info;
954
0
      av1_tile_init(tile_info, cm, tile_row, tile_col);
955
0
      tile_data->firstpass_top_mv = kZeroMv;
956
0
      tile_data->abs_sum_level = 0;
957
958
0
      if (is_token_info_allocated(token_info)) {
959
0
        token_info->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
960
0
        pre_tok = token_info->tile_tok[tile_row][tile_col];
961
0
        tile_tok = allocated_tokens(
962
0
            *tile_info, cm->seq_params->mib_size_log2 + MI_SIZE_LOG2,
963
0
            num_planes);
964
0
        token_info->tplist[tile_row][tile_col] = tplist + tplist_count;
965
0
        tplist = token_info->tplist[tile_row][tile_col];
966
0
        tplist_count = av1_get_sb_rows_in_tile(cm, tile_data->tile_info);
967
0
      }
968
0
      tile_data->allow_update_cdf = !cm->tiles.large_scale;
969
0
      tile_data->allow_update_cdf = tile_data->allow_update_cdf &&
970
0
                                    !cm->features.disable_cdf_update &&
971
0
                                    !delay_wait_for_top_right_sb(cpi);
972
0
      tile_data->tctx = *cm->fc;
973
0
    }
974
0
  }
975
0
}
976
977
// Populate the start palette token info prior to encoding an SB row.
978
static AOM_INLINE void get_token_start(AV1_COMP *cpi, const TileInfo *tile_info,
979
                                       int tile_row, int tile_col, int mi_row,
980
0
                                       TokenExtra **tp) {
981
0
  const TokenInfo *token_info = &cpi->token_info;
982
0
  if (!is_token_info_allocated(token_info)) return;
983
984
0
  const AV1_COMMON *cm = &cpi->common;
985
0
  const int num_planes = av1_num_planes(cm);
986
0
  TokenList *const tplist = cpi->token_info.tplist[tile_row][tile_col];
987
0
  const int sb_row_in_tile =
988
0
      (mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
989
990
0
  get_start_tok(cpi, tile_row, tile_col, mi_row, tp,
991
0
                cm->seq_params->mib_size_log2 + MI_SIZE_LOG2, num_planes);
992
0
  assert(tplist != NULL);
993
0
  tplist[sb_row_in_tile].start = *tp;
994
0
}
995
996
// Populate the token count after encoding an SB row.
997
static AOM_INLINE void populate_token_count(AV1_COMP *cpi,
998
                                            const TileInfo *tile_info,
999
                                            int tile_row, int tile_col,
1000
0
                                            int mi_row, TokenExtra *tok) {
1001
0
  const TokenInfo *token_info = &cpi->token_info;
1002
0
  if (!is_token_info_allocated(token_info)) return;
1003
1004
0
  const AV1_COMMON *cm = &cpi->common;
1005
0
  const int num_planes = av1_num_planes(cm);
1006
0
  TokenList *const tplist = token_info->tplist[tile_row][tile_col];
1007
0
  const int sb_row_in_tile =
1008
0
      (mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
1009
0
  const int tile_mb_cols =
1010
0
      (tile_info->mi_col_end - tile_info->mi_col_start + 2) >> 2;
1011
0
  const int num_mb_rows_in_sb =
1012
0
      ((1 << (cm->seq_params->mib_size_log2 + MI_SIZE_LOG2)) + 8) >> 4;
1013
0
  tplist[sb_row_in_tile].count =
1014
0
      (unsigned int)(tok - tplist[sb_row_in_tile].start);
1015
1016
0
  assert((unsigned int)(tok - tplist[sb_row_in_tile].start) <=
1017
0
         get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,
1018
0
                         cm->seq_params->mib_size_log2 + MI_SIZE_LOG2,
1019
0
                         num_planes));
1020
1021
0
  (void)num_planes;
1022
0
  (void)tile_mb_cols;
1023
0
  (void)num_mb_rows_in_sb;
1024
0
}
1025
1026
/*!\brief Encode a superblock row
1027
 *
1028
 * \ingroup partition_search
1029
 */
1030
void av1_encode_sb_row(AV1_COMP *cpi, ThreadData *td, int tile_row,
1031
0
                       int tile_col, int mi_row) {
1032
0
  AV1_COMMON *const cm = &cpi->common;
1033
0
  const int tile_cols = cm->tiles.cols;
1034
0
  TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
1035
0
  const TileInfo *const tile_info = &this_tile->tile_info;
1036
0
  TokenExtra *tok = NULL;
1037
1038
0
  get_token_start(cpi, tile_info, tile_row, tile_col, mi_row, &tok);
1039
1040
0
  encode_sb_row(cpi, td, this_tile, mi_row, &tok);
1041
1042
0
  populate_token_count(cpi, tile_info, tile_row, tile_col, mi_row, tok);
1043
0
}
1044
1045
/*!\brief Encode a tile
1046
 *
1047
 * \ingroup partition_search
1048
 */
1049
void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
1050
0
                     int tile_col) {
1051
0
  AV1_COMMON *const cm = &cpi->common;
1052
0
  TileDataEnc *const this_tile =
1053
0
      &cpi->tile_data[tile_row * cm->tiles.cols + tile_col];
1054
0
  const TileInfo *const tile_info = &this_tile->tile_info;
1055
1056
0
  if (!cpi->sf.rt_sf.use_nonrd_pick_mode) av1_inter_mode_data_init(this_tile);
1057
1058
0
  av1_zero_above_context(cm, &td->mb.e_mbd, tile_info->mi_col_start,
1059
0
                         tile_info->mi_col_end, tile_row);
1060
0
  av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), tile_row,
1061
0
                         &td->mb.e_mbd);
1062
1063
0
  if (cpi->oxcf.intra_mode_cfg.enable_cfl_intra)
1064
0
    cfl_init(&td->mb.e_mbd.cfl, cm->seq_params);
1065
1066
0
  if (td->mb.txfm_search_info.mb_rd_record != NULL) {
1067
0
    av1_crc32c_calculator_init(
1068
0
        &td->mb.txfm_search_info.mb_rd_record->crc_calculator);
1069
0
  }
1070
1071
0
  for (int mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
1072
0
       mi_row += cm->seq_params->mib_size) {
1073
0
    av1_encode_sb_row(cpi, td, tile_row, tile_col, mi_row);
1074
0
  }
1075
0
  this_tile->abs_sum_level = td->abs_sum_level;
1076
0
}
1077
1078
/*!\brief Break one frame into tiles and encode the tiles
1079
 *
1080
 * \ingroup partition_search
1081
 *
1082
 * \param[in]    cpi    Top-level encoder structure
1083
 */
1084
0
static AOM_INLINE void encode_tiles(AV1_COMP *cpi) {
1085
0
  AV1_COMMON *const cm = &cpi->common;
1086
0
  const int tile_cols = cm->tiles.cols;
1087
0
  const int tile_rows = cm->tiles.rows;
1088
0
  int tile_col, tile_row;
1089
1090
0
  MACROBLOCK *const mb = &cpi->td.mb;
1091
0
  assert(IMPLIES(cpi->tile_data == NULL,
1092
0
                 cpi->allocated_tiles < tile_cols * tile_rows));
1093
0
  if (cpi->allocated_tiles < tile_cols * tile_rows) av1_alloc_tile_data(cpi);
1094
1095
0
  av1_init_tile_data(cpi);
1096
0
  av1_alloc_mb_data(cm, mb, cpi->sf.rt_sf.use_nonrd_pick_mode,
1097
0
                    cpi->sf.rd_sf.use_mb_rd_hash);
1098
1099
0
  for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1100
0
    for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1101
0
      TileDataEnc *const this_tile =
1102
0
          &cpi->tile_data[tile_row * cm->tiles.cols + tile_col];
1103
0
      cpi->td.intrabc_used = 0;
1104
0
      cpi->td.deltaq_used = 0;
1105
0
      cpi->td.abs_sum_level = 0;
1106
0
      cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
1107
0
      cpi->td.mb.tile_pb_ctx = &this_tile->tctx;
1108
      // Reset cyclic refresh counters.
1109
0
      av1_init_cyclic_refresh_counters(&cpi->td.mb);
1110
1111
0
      av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
1112
      // Accumulate cyclic refresh params.
1113
0
      if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ &&
1114
0
          !frame_is_intra_only(&cpi->common))
1115
0
        av1_accumulate_cyclic_refresh_counters(cpi->cyclic_refresh,
1116
0
                                               &cpi->td.mb);
1117
0
      cpi->intrabc_used |= cpi->td.intrabc_used;
1118
0
      cpi->deltaq_used |= cpi->td.deltaq_used;
1119
0
    }
1120
0
  }
1121
1122
0
  av1_dealloc_mb_data(cm, mb);
1123
0
}
1124
1125
// Set the relative distance of a reference frame w.r.t. current frame
1126
static AOM_INLINE void set_rel_frame_dist(
1127
    const AV1_COMMON *const cm, RefFrameDistanceInfo *const ref_frame_dist_info,
1128
0
    const int ref_frame_flags) {
1129
0
  MV_REFERENCE_FRAME ref_frame;
1130
0
  int min_past_dist = INT32_MAX, min_future_dist = INT32_MAX;
1131
0
  ref_frame_dist_info->nearest_past_ref = NONE_FRAME;
1132
0
  ref_frame_dist_info->nearest_future_ref = NONE_FRAME;
1133
0
  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1134
0
    ref_frame_dist_info->ref_relative_dist[ref_frame - LAST_FRAME] = 0;
1135
0
    if (ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) {
1136
0
      int dist = av1_encoder_get_relative_dist(
1137
0
          cm->cur_frame->ref_display_order_hint[ref_frame - LAST_FRAME],
1138
0
          cm->current_frame.display_order_hint);
1139
0
      ref_frame_dist_info->ref_relative_dist[ref_frame - LAST_FRAME] = dist;
1140
      // Get the nearest ref_frame in the past
1141
0
      if (abs(dist) < min_past_dist && dist < 0) {
1142
0
        ref_frame_dist_info->nearest_past_ref = ref_frame;
1143
0
        min_past_dist = abs(dist);
1144
0
      }
1145
      // Get the nearest ref_frame in the future
1146
0
      if (dist < min_future_dist && dist > 0) {
1147
0
        ref_frame_dist_info->nearest_future_ref = ref_frame;
1148
0
        min_future_dist = dist;
1149
0
      }
1150
0
    }
1151
0
  }
1152
0
}
1153
1154
0
static INLINE int refs_are_one_sided(const AV1_COMMON *cm) {
1155
0
  assert(!frame_is_intra_only(cm));
1156
1157
0
  int one_sided_refs = 1;
1158
0
  const int cur_display_order_hint = cm->current_frame.display_order_hint;
1159
0
  for (int ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref) {
1160
0
    const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref);
1161
0
    if (buf == NULL) continue;
1162
0
    if (av1_encoder_get_relative_dist(buf->display_order_hint,
1163
0
                                      cur_display_order_hint) > 0) {
1164
0
      one_sided_refs = 0;  // bwd reference
1165
0
      break;
1166
0
    }
1167
0
  }
1168
0
  return one_sided_refs;
1169
0
}
1170
1171
static INLINE void get_skip_mode_ref_offsets(const AV1_COMMON *cm,
1172
0
                                             int ref_order_hint[2]) {
1173
0
  const SkipModeInfo *const skip_mode_info = &cm->current_frame.skip_mode_info;
1174
0
  ref_order_hint[0] = ref_order_hint[1] = 0;
1175
0
  if (!skip_mode_info->skip_mode_allowed) return;
1176
1177
0
  const RefCntBuffer *const buf_0 =
1178
0
      get_ref_frame_buf(cm, LAST_FRAME + skip_mode_info->ref_frame_idx_0);
1179
0
  const RefCntBuffer *const buf_1 =
1180
0
      get_ref_frame_buf(cm, LAST_FRAME + skip_mode_info->ref_frame_idx_1);
1181
0
  assert(buf_0 != NULL && buf_1 != NULL);
1182
1183
0
  ref_order_hint[0] = buf_0->order_hint;
1184
0
  ref_order_hint[1] = buf_1->order_hint;
1185
0
}
1186
1187
0
static int check_skip_mode_enabled(AV1_COMP *const cpi) {
1188
0
  AV1_COMMON *const cm = &cpi->common;
1189
1190
0
  av1_setup_skip_mode_allowed(cm);
1191
0
  if (!cm->current_frame.skip_mode_info.skip_mode_allowed) return 0;
1192
1193
  // Turn off skip mode if the temporal distances of the reference pair to the
1194
  // current frame are different by more than 1 frame.
1195
0
  const int cur_offset = (int)cm->current_frame.order_hint;
1196
0
  int ref_offset[2];
1197
0
  get_skip_mode_ref_offsets(cm, ref_offset);
1198
0
  const int cur_to_ref0 = get_relative_dist(&cm->seq_params->order_hint_info,
1199
0
                                            cur_offset, ref_offset[0]);
1200
0
  const int cur_to_ref1 = abs(get_relative_dist(
1201
0
      &cm->seq_params->order_hint_info, cur_offset, ref_offset[1]));
1202
0
  if (abs(cur_to_ref0 - cur_to_ref1) > 1) return 0;
1203
1204
  // High Latency: Turn off skip mode if all refs are fwd.
1205
0
  if (cpi->all_one_sided_refs && cpi->oxcf.gf_cfg.lag_in_frames > 0) return 0;
1206
1207
0
  static const int flag_list[REF_FRAMES] = { 0,
1208
0
                                             AOM_LAST_FLAG,
1209
0
                                             AOM_LAST2_FLAG,
1210
0
                                             AOM_LAST3_FLAG,
1211
0
                                             AOM_GOLD_FLAG,
1212
0
                                             AOM_BWD_FLAG,
1213
0
                                             AOM_ALT2_FLAG,
1214
0
                                             AOM_ALT_FLAG };
1215
0
  const int ref_frame[2] = {
1216
0
    cm->current_frame.skip_mode_info.ref_frame_idx_0 + LAST_FRAME,
1217
0
    cm->current_frame.skip_mode_info.ref_frame_idx_1 + LAST_FRAME
1218
0
  };
1219
0
  if (!(cpi->ref_frame_flags & flag_list[ref_frame[0]]) ||
1220
0
      !(cpi->ref_frame_flags & flag_list[ref_frame[1]]))
1221
0
    return 0;
1222
1223
0
  return 1;
1224
0
}
1225
1226
static AOM_INLINE void set_default_interp_skip_flags(
1227
0
    const AV1_COMMON *cm, InterpSearchFlags *interp_search_flags) {
1228
0
  const int num_planes = av1_num_planes(cm);
1229
0
  interp_search_flags->default_interp_skip_flags =
1230
0
      (num_planes == 1) ? INTERP_SKIP_LUMA_EVAL_CHROMA
1231
0
                        : INTERP_SKIP_LUMA_SKIP_CHROMA;
1232
0
}
1233
1234
0
static AOM_INLINE void setup_prune_ref_frame_mask(AV1_COMP *cpi) {
1235
0
  if ((!cpi->oxcf.ref_frm_cfg.enable_onesided_comp ||
1236
0
       cpi->sf.inter_sf.disable_onesided_comp) &&
1237
0
      cpi->all_one_sided_refs) {
1238
    // Disable all compound references
1239
0
    cpi->prune_ref_frame_mask = (1 << MODE_CTX_REF_FRAMES) - (1 << REF_FRAMES);
1240
0
  } else if (!cpi->sf.rt_sf.use_nonrd_pick_mode &&
1241
0
             cpi->sf.inter_sf.selective_ref_frame >= 2) {
1242
0
    AV1_COMMON *const cm = &cpi->common;
1243
0
    const int cur_frame_display_order_hint =
1244
0
        cm->current_frame.display_order_hint;
1245
0
    unsigned int *ref_display_order_hint =
1246
0
        cm->cur_frame->ref_display_order_hint;
1247
0
    const int arf2_dist = av1_encoder_get_relative_dist(
1248
0
        ref_display_order_hint[ALTREF2_FRAME - LAST_FRAME],
1249
0
        cur_frame_display_order_hint);
1250
0
    const int bwd_dist = av1_encoder_get_relative_dist(
1251
0
        ref_display_order_hint[BWDREF_FRAME - LAST_FRAME],
1252
0
        cur_frame_display_order_hint);
1253
1254
0
    for (int ref_idx = REF_FRAMES; ref_idx < MODE_CTX_REF_FRAMES; ++ref_idx) {
1255
0
      MV_REFERENCE_FRAME rf[2];
1256
0
      av1_set_ref_frame(rf, ref_idx);
1257
0
      if (!(cpi->ref_frame_flags & av1_ref_frame_flag_list[rf[0]]) ||
1258
0
          !(cpi->ref_frame_flags & av1_ref_frame_flag_list[rf[1]])) {
1259
0
        continue;
1260
0
      }
1261
1262
0
      if (!cpi->all_one_sided_refs) {
1263
0
        int ref_dist[2];
1264
0
        for (int i = 0; i < 2; ++i) {
1265
0
          ref_dist[i] = av1_encoder_get_relative_dist(
1266
0
              ref_display_order_hint[rf[i] - LAST_FRAME],
1267
0
              cur_frame_display_order_hint);
1268
0
        }
1269
1270
        // One-sided compound is used only when all reference frames are
1271
        // one-sided.
1272
0
        if ((ref_dist[0] > 0) == (ref_dist[1] > 0)) {
1273
0
          cpi->prune_ref_frame_mask |= 1 << ref_idx;
1274
0
        }
1275
0
      }
1276
1277
0
      if (cpi->sf.inter_sf.selective_ref_frame >= 4 &&
1278
0
          (rf[0] == ALTREF2_FRAME || rf[1] == ALTREF2_FRAME) &&
1279
0
          (cpi->ref_frame_flags & av1_ref_frame_flag_list[BWDREF_FRAME])) {
1280
        // Check if both ALTREF2_FRAME and BWDREF_FRAME are future references.
1281
0
        if (arf2_dist > 0 && bwd_dist > 0 && bwd_dist <= arf2_dist) {
1282
          // Drop ALTREF2_FRAME as a reference if BWDREF_FRAME is a closer
1283
          // reference to the current frame than ALTREF2_FRAME
1284
0
          cpi->prune_ref_frame_mask |= 1 << ref_idx;
1285
0
        }
1286
0
      }
1287
0
    }
1288
0
  }
1289
0
}
1290
1291
0
static int allow_deltaq_mode(AV1_COMP *cpi) {
1292
0
#if !CONFIG_REALTIME_ONLY
1293
0
  AV1_COMMON *const cm = &cpi->common;
1294
0
  BLOCK_SIZE sb_size = cm->seq_params->sb_size;
1295
0
  int sbs_wide = mi_size_wide[sb_size];
1296
0
  int sbs_high = mi_size_high[sb_size];
1297
1298
0
  int64_t delta_rdcost = 0;
1299
0
  for (int mi_row = 0; mi_row < cm->mi_params.mi_rows; mi_row += sbs_high) {
1300
0
    for (int mi_col = 0; mi_col < cm->mi_params.mi_cols; mi_col += sbs_wide) {
1301
0
      int64_t this_delta_rdcost = 0;
1302
0
      av1_get_q_for_deltaq_objective(cpi, &cpi->td, &this_delta_rdcost, sb_size,
1303
0
                                     mi_row, mi_col);
1304
0
      delta_rdcost += this_delta_rdcost;
1305
0
    }
1306
0
  }
1307
0
  return delta_rdcost < 0;
1308
#else
1309
  (void)cpi;
1310
  return 1;
1311
#endif  // !CONFIG_REALTIME_ONLY
1312
0
}
1313
1314
/*!\brief Encoder setup(only for the current frame), encoding, and recontruction
1315
 * for a single frame
1316
 *
1317
 * \ingroup high_level_algo
1318
 */
1319
0
static AOM_INLINE void encode_frame_internal(AV1_COMP *cpi) {
1320
0
  ThreadData *const td = &cpi->td;
1321
0
  MACROBLOCK *const x = &td->mb;
1322
0
  AV1_COMMON *const cm = &cpi->common;
1323
0
  CommonModeInfoParams *const mi_params = &cm->mi_params;
1324
0
  FeatureFlags *const features = &cm->features;
1325
0
  MACROBLOCKD *const xd = &x->e_mbd;
1326
0
  RD_COUNTS *const rdc = &cpi->td.rd_counts;
1327
#if CONFIG_FRAME_PARALLEL_ENCODE && CONFIG_FPMT_TEST
1328
  FrameProbInfo *const temp_frame_probs = &cpi->ppi->temp_frame_probs;
1329
  FrameProbInfo *const temp_frame_probs_simulation =
1330
      &cpi->ppi->temp_frame_probs_simulation;
1331
#endif
1332
0
  FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs;
1333
0
  IntraBCHashInfo *const intrabc_hash_info = &x->intrabc_hash_info;
1334
0
  MultiThreadInfo *const mt_info = &cpi->mt_info;
1335
0
  AV1EncRowMultiThreadInfo *const enc_row_mt = &mt_info->enc_row_mt;
1336
0
  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
1337
0
  const DELTAQ_MODE deltaq_mode = oxcf->q_cfg.deltaq_mode;
1338
0
  int i;
1339
1340
0
  if (!cpi->sf.rt_sf.use_nonrd_pick_mode) {
1341
0
    mi_params->setup_mi(mi_params);
1342
0
  }
1343
1344
0
  set_mi_offsets(mi_params, xd, 0, 0);
1345
1346
0
  av1_zero(*td->counts);
1347
0
  av1_zero(rdc->tx_type_used);
1348
0
  av1_zero(rdc->obmc_used);
1349
0
  av1_zero(rdc->warped_used);
1350
1351
  // Reset the flag.
1352
0
  cpi->intrabc_used = 0;
1353
  // Need to disable intrabc when superres is selected
1354
0
  if (av1_superres_scaled(cm)) {
1355
0
    features->allow_intrabc = 0;
1356
0
  }
1357
1358
0
  features->allow_intrabc &= (oxcf->kf_cfg.enable_intrabc);
1359
1360
0
  if (features->allow_warped_motion &&
1361
0
      cpi->sf.inter_sf.prune_warped_prob_thresh > 0) {
1362
0
    const FRAME_UPDATE_TYPE update_type =
1363
0
        get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
1364
0
    int warped_probability =
1365
#if CONFIG_FRAME_PARALLEL_ENCODE && CONFIG_FPMT_TEST
1366
        cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE
1367
            ? temp_frame_probs->warped_probs[update_type]
1368
            :
1369
#endif  // CONFIG_FRAME_PARALLEL_ENCODE && CONFIG_FPMT_TEST
1370
0
            frame_probs->warped_probs[update_type];
1371
0
    if (warped_probability < cpi->sf.inter_sf.prune_warped_prob_thresh)
1372
0
      features->allow_warped_motion = 0;
1373
0
  }
1374
1375
0
  int hash_table_created = 0;
1376
0
  if (!is_stat_generation_stage(cpi) && av1_use_hash_me(cpi) &&
1377
0
      !cpi->sf.rt_sf.use_nonrd_pick_mode) {
1378
    // TODO(any): move this outside of the recoding loop to avoid recalculating
1379
    // the hash table.
1380
    // add to hash table
1381
0
    const int pic_width = cpi->source->y_crop_width;
1382
0
    const int pic_height = cpi->source->y_crop_height;
1383
0
    uint32_t *block_hash_values[2][2];
1384
0
    int8_t *is_block_same[2][3];
1385
0
    int k, j;
1386
1387
0
    for (k = 0; k < 2; k++) {
1388
0
      for (j = 0; j < 2; j++) {
1389
0
        CHECK_MEM_ERROR(cm, block_hash_values[k][j],
1390
0
                        aom_malloc(sizeof(uint32_t) * pic_width * pic_height));
1391
0
      }
1392
1393
0
      for (j = 0; j < 3; j++) {
1394
0
        CHECK_MEM_ERROR(cm, is_block_same[k][j],
1395
0
                        aom_malloc(sizeof(int8_t) * pic_width * pic_height));
1396
0
      }
1397
0
    }
1398
1399
0
    av1_hash_table_init(intrabc_hash_info);
1400
0
    av1_hash_table_create(&intrabc_hash_info->intrabc_hash_table);
1401
0
    hash_table_created = 1;
1402
0
    av1_generate_block_2x2_hash_value(intrabc_hash_info, cpi->source,
1403
0
                                      block_hash_values[0], is_block_same[0]);
1404
    // Hash data generated for screen contents is used for intraBC ME
1405
0
    const int min_alloc_size = block_size_wide[mi_params->mi_alloc_bsize];
1406
0
    const int max_sb_size =
1407
0
        (1 << (cm->seq_params->mib_size_log2 + MI_SIZE_LOG2));
1408
0
    int src_idx = 0;
1409
0
    for (int size = 4; size <= max_sb_size; size *= 2, src_idx = !src_idx) {
1410
0
      const int dst_idx = !src_idx;
1411
0
      av1_generate_block_hash_value(
1412
0
          intrabc_hash_info, cpi->source, size, block_hash_values[src_idx],
1413
0
          block_hash_values[dst_idx], is_block_same[src_idx],
1414
0
          is_block_same[dst_idx]);
1415
0
      if (size >= min_alloc_size) {
1416
0
        av1_add_to_hash_map_by_row_with_precal_data(
1417
0
            &intrabc_hash_info->intrabc_hash_table, block_hash_values[dst_idx],
1418
0
            is_block_same[dst_idx][2], pic_width, pic_height, size);
1419
0
      }
1420
0
    }
1421
1422
0
    for (k = 0; k < 2; k++) {
1423
0
      for (j = 0; j < 2; j++) {
1424
0
        aom_free(block_hash_values[k][j]);
1425
0
      }
1426
1427
0
      for (j = 0; j < 3; j++) {
1428
0
        aom_free(is_block_same[k][j]);
1429
0
      }
1430
0
    }
1431
0
  }
1432
1433
0
  const CommonQuantParams *quant_params = &cm->quant_params;
1434
0
  for (i = 0; i < MAX_SEGMENTS; ++i) {
1435
0
    const int qindex =
1436
0
        cm->seg.enabled ? av1_get_qindex(&cm->seg, i, quant_params->base_qindex)
1437
0
                        : quant_params->base_qindex;
1438
0
    xd->lossless[i] =
1439
0
        qindex == 0 && quant_params->y_dc_delta_q == 0 &&
1440
0
        quant_params->u_dc_delta_q == 0 && quant_params->u_ac_delta_q == 0 &&
1441
0
        quant_params->v_dc_delta_q == 0 && quant_params->v_ac_delta_q == 0;
1442
0
    if (xd->lossless[i]) cpi->enc_seg.has_lossless_segment = 1;
1443
0
    xd->qindex[i] = qindex;
1444
0
    if (xd->lossless[i]) {
1445
0
      cpi->optimize_seg_arr[i] = NO_TRELLIS_OPT;
1446
0
    } else {
1447
0
      cpi->optimize_seg_arr[i] = cpi->sf.rd_sf.optimize_coefficients;
1448
0
    }
1449
0
  }
1450
0
  features->coded_lossless = is_coded_lossless(cm, xd);
1451
0
  features->all_lossless = features->coded_lossless && !av1_superres_scaled(cm);
1452
1453
  // Fix delta q resolution for the moment
1454
0
  cm->delta_q_info.delta_q_res = 0;
1455
0
  if (cpi->oxcf.q_cfg.aq_mode != CYCLIC_REFRESH_AQ) {
1456
0
    if (deltaq_mode == DELTA_Q_OBJECTIVE)
1457
0
      cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_OBJECTIVE;
1458
0
    else if (deltaq_mode == DELTA_Q_PERCEPTUAL)
1459
0
      cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL;
1460
0
    else if (deltaq_mode == DELTA_Q_PERCEPTUAL_AI)
1461
0
      cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL;
1462
0
    else if (deltaq_mode == DELTA_Q_USER_RATING_BASED)
1463
0
      cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL;
1464
0
    else if (deltaq_mode == DELTA_Q_HDR)
1465
0
      cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL;
1466
    // Set delta_q_present_flag before it is used for the first time
1467
0
    cm->delta_q_info.delta_lf_res = DEFAULT_DELTA_LF_RES;
1468
0
    cm->delta_q_info.delta_q_present_flag = deltaq_mode != NO_DELTA_Q;
1469
1470
    // Turn off cm->delta_q_info.delta_q_present_flag if objective delta_q
1471
    // is used for ineligible frames. That effectively will turn off row_mt
1472
    // usage. Note objective delta_q and tpl eligible frames are only altref
1473
    // frames currently.
1474
0
    const GF_GROUP *gf_group = &cpi->ppi->gf_group;
1475
0
    if (cm->delta_q_info.delta_q_present_flag) {
1476
0
      if (deltaq_mode == DELTA_Q_OBJECTIVE &&
1477
0
          gf_group->update_type[cpi->gf_frame_index] == LF_UPDATE)
1478
0
        cm->delta_q_info.delta_q_present_flag = 0;
1479
1480
0
      if (deltaq_mode == DELTA_Q_OBJECTIVE &&
1481
0
          cm->delta_q_info.delta_q_present_flag) {
1482
0
        cm->delta_q_info.delta_q_present_flag &= allow_deltaq_mode(cpi);
1483
0
      }
1484
0
    }
1485
1486
    // Reset delta_q_used flag
1487
0
    cpi->deltaq_used = 0;
1488
1489
0
    cm->delta_q_info.delta_lf_present_flag =
1490
0
        cm->delta_q_info.delta_q_present_flag &&
1491
0
        oxcf->tool_cfg.enable_deltalf_mode;
1492
0
    cm->delta_q_info.delta_lf_multi = DEFAULT_DELTA_LF_MULTI;
1493
1494
    // update delta_q_present_flag and delta_lf_present_flag based on
1495
    // base_qindex
1496
0
    cm->delta_q_info.delta_q_present_flag &= quant_params->base_qindex > 0;
1497
0
    cm->delta_q_info.delta_lf_present_flag &= quant_params->base_qindex > 0;
1498
0
  } else {
1499
0
    cpi->cyclic_refresh->actual_num_seg1_blocks = 0;
1500
0
    cpi->cyclic_refresh->actual_num_seg2_blocks = 0;
1501
0
    cpi->cyclic_refresh->cnt_zeromv = 0;
1502
0
  }
1503
1504
0
  av1_frame_init_quantizer(cpi);
1505
1506
0
  init_encode_frame_mb_context(cpi);
1507
0
  set_default_interp_skip_flags(cm, &cpi->interp_search_flags);
1508
0
  if (cm->prev_frame && cm->prev_frame->seg.enabled)
1509
0
    cm->last_frame_seg_map = cm->prev_frame->seg_map;
1510
0
  else
1511
0
    cm->last_frame_seg_map = NULL;
1512
0
  if (features->allow_intrabc || features->coded_lossless) {
1513
0
    av1_set_default_ref_deltas(cm->lf.ref_deltas);
1514
0
    av1_set_default_mode_deltas(cm->lf.mode_deltas);
1515
0
  } else if (cm->prev_frame) {
1516
0
    memcpy(cm->lf.ref_deltas, cm->prev_frame->ref_deltas, REF_FRAMES);
1517
0
    memcpy(cm->lf.mode_deltas, cm->prev_frame->mode_deltas, MAX_MODE_LF_DELTAS);
1518
0
  }
1519
0
  memcpy(cm->cur_frame->ref_deltas, cm->lf.ref_deltas, REF_FRAMES);
1520
0
  memcpy(cm->cur_frame->mode_deltas, cm->lf.mode_deltas, MAX_MODE_LF_DELTAS);
1521
1522
0
  cpi->all_one_sided_refs =
1523
0
      frame_is_intra_only(cm) ? 0 : refs_are_one_sided(cm);
1524
1525
0
  cpi->prune_ref_frame_mask = 0;
1526
  // Figure out which ref frames can be skipped at frame level.
1527
0
  setup_prune_ref_frame_mask(cpi);
1528
1529
0
  x->txfm_search_info.txb_split_count = 0;
1530
#if CONFIG_SPEED_STATS
1531
  x->txfm_search_info.tx_search_count = 0;
1532
#endif  // CONFIG_SPEED_STATS
1533
1534
0
#if !CONFIG_REALTIME_ONLY
1535
#if CONFIG_COLLECT_COMPONENT_TIMING
1536
  start_timing(cpi, av1_compute_global_motion_time);
1537
#endif
1538
0
  av1_compute_global_motion_facade(cpi);
1539
#if CONFIG_COLLECT_COMPONENT_TIMING
1540
  end_timing(cpi, av1_compute_global_motion_time);
1541
#endif
1542
0
#endif  // !CONFIG_REALTIME_ONLY
1543
1544
#if CONFIG_COLLECT_COMPONENT_TIMING
1545
  start_timing(cpi, av1_setup_motion_field_time);
1546
#endif
1547
0
  av1_calculate_ref_frame_side(cm);
1548
0
  if (features->allow_ref_frame_mvs) av1_setup_motion_field(cm);
1549
#if CONFIG_COLLECT_COMPONENT_TIMING
1550
  end_timing(cpi, av1_setup_motion_field_time);
1551
#endif
1552
1553
0
  cm->current_frame.skip_mode_info.skip_mode_flag =
1554
0
      check_skip_mode_enabled(cpi);
1555
1556
  // Initialization of skip mode cost depends on the value of
1557
  // 'skip_mode_flag'. This initialization happens in the function
1558
  // av1_fill_mode_rates(), which is in turn called in
1559
  // av1_initialize_rd_consts(). Thus, av1_initialize_rd_consts()
1560
  // has to be called after 'skip_mode_flag' is initialized.
1561
0
  av1_initialize_rd_consts(cpi);
1562
0
  av1_set_sad_per_bit(cpi, &x->sadperbit, quant_params->base_qindex);
1563
1564
0
  enc_row_mt->sync_read_ptr = av1_row_mt_sync_read_dummy;
1565
0
  enc_row_mt->sync_write_ptr = av1_row_mt_sync_write_dummy;
1566
0
  mt_info->row_mt_enabled = 0;
1567
1568
0
  if (oxcf->row_mt && (mt_info->num_workers > 1)) {
1569
0
    mt_info->row_mt_enabled = 1;
1570
0
    enc_row_mt->sync_read_ptr = av1_row_mt_sync_read;
1571
0
    enc_row_mt->sync_write_ptr = av1_row_mt_sync_write;
1572
0
    av1_encode_tiles_row_mt(cpi);
1573
0
  } else {
1574
0
    if (AOMMIN(mt_info->num_workers, cm->tiles.cols * cm->tiles.rows) > 1)
1575
0
      av1_encode_tiles_mt(cpi);
1576
0
    else
1577
0
      encode_tiles(cpi);
1578
0
  }
1579
1580
  // If intrabc is allowed but never selected, reset the allow_intrabc flag.
1581
0
  if (features->allow_intrabc && !cpi->intrabc_used) {
1582
0
    features->allow_intrabc = 0;
1583
0
  }
1584
0
  if (features->allow_intrabc) {
1585
0
    cm->delta_q_info.delta_lf_present_flag = 0;
1586
0
  }
1587
1588
0
  if (cm->delta_q_info.delta_q_present_flag && cpi->deltaq_used == 0) {
1589
0
    cm->delta_q_info.delta_q_present_flag = 0;
1590
0
  }
1591
1592
  // Set the transform size appropriately before bitstream creation
1593
0
  const MODE_EVAL_TYPE eval_type =
1594
0
      cpi->sf.winner_mode_sf.enable_winner_mode_for_tx_size_srch
1595
0
          ? WINNER_MODE_EVAL
1596
0
          : DEFAULT_EVAL;
1597
0
  const TX_SIZE_SEARCH_METHOD tx_search_type =
1598
0
      cpi->winner_mode_params.tx_size_search_methods[eval_type];
1599
0
  assert(oxcf->txfm_cfg.enable_tx64 || tx_search_type != USE_LARGESTALL);
1600
0
  features->tx_mode = select_tx_mode(cm, tx_search_type);
1601
1602
#if CONFIG_FRAME_PARALLEL_ENCODE
1603
  // Retain the frame level probability update conditions for parallel frames.
1604
  // These conditions will be consumed during postencode stage to update the
1605
  // probability.
1606
  if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
1607
    cpi->do_update_frame_probs_txtype[cpi->num_frame_recode] =
1608
        cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats;
1609
    cpi->do_update_frame_probs_obmc[cpi->num_frame_recode] =
1610
        (cpi->sf.inter_sf.prune_obmc_prob_thresh > 0 &&
1611
         cpi->sf.inter_sf.prune_obmc_prob_thresh < INT_MAX);
1612
    cpi->do_update_frame_probs_warp[cpi->num_frame_recode] =
1613
        (features->allow_warped_motion &&
1614
         cpi->sf.inter_sf.prune_warped_prob_thresh > 0);
1615
    cpi->do_update_frame_probs_interpfilter[cpi->num_frame_recode] =
1616
        (cm->current_frame.frame_type != KEY_FRAME &&
1617
         cpi->sf.interp_sf.adaptive_interp_filter_search == 2 &&
1618
         features->interp_filter == SWITCHABLE);
1619
  }
1620
#endif
1621
1622
0
  if (cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats ||
1623
0
      ((cpi->sf.tx_sf.tx_type_search.fast_inter_tx_type_prob_thresh !=
1624
0
        INT_MAX) &&
1625
0
       (cpi->sf.tx_sf.tx_type_search.fast_inter_tx_type_prob_thresh != 0))) {
1626
0
    const FRAME_UPDATE_TYPE update_type =
1627
0
        get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
1628
0
    for (i = 0; i < TX_SIZES_ALL; i++) {
1629
0
      int sum = 0;
1630
0
      int j;
1631
0
      int left = MAX_TX_TYPE_PROB;
1632
1633
0
      for (j = 0; j < TX_TYPES; j++)
1634
0
        sum += cpi->td.rd_counts.tx_type_used[i][j];
1635
1636
0
      for (j = TX_TYPES - 1; j >= 0; j--) {
1637
0
        int update_txtype_frameprobs = 1;
1638
0
        const int new_prob =
1639
0
            sum ? MAX_TX_TYPE_PROB * cpi->td.rd_counts.tx_type_used[i][j] / sum
1640
0
                : (j ? 0 : MAX_TX_TYPE_PROB);
1641
#if CONFIG_FRAME_PARALLEL_ENCODE
1642
#if CONFIG_FPMT_TEST
1643
        if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
1644
          if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] ==
1645
              0) {
1646
            int prob =
1647
                (temp_frame_probs_simulation->tx_type_probs[update_type][i][j] +
1648
                 new_prob) >>
1649
                1;
1650
            left -= prob;
1651
            if (j == 0) prob += left;
1652
            temp_frame_probs_simulation->tx_type_probs[update_type][i][j] =
1653
                prob;
1654
            // Copy temp_frame_probs_simulation to temp_frame_probs
1655
            for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
1656
                 update_type_idx++) {
1657
              temp_frame_probs->tx_type_probs[update_type_idx][i][j] =
1658
                  temp_frame_probs_simulation
1659
                      ->tx_type_probs[update_type_idx][i][j];
1660
            }
1661
          }
1662
          update_txtype_frameprobs = 0;
1663
        }
1664
#endif  // CONFIG_FPMT_TEST
1665
        // Track the frame probabilities of parallel encode frames to update
1666
        // during postencode stage.
1667
        if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
1668
          update_txtype_frameprobs = 0;
1669
          cpi->frame_new_probs[cpi->num_frame_recode]
1670
              .tx_type_probs[update_type][i][j] = new_prob;
1671
        }
1672
#endif  // CONFIG_FRAME_PARALLEL_ENCODE
1673
0
        if (update_txtype_frameprobs) {
1674
0
          int prob =
1675
0
              (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1;
1676
0
          left -= prob;
1677
0
          if (j == 0) prob += left;
1678
0
          frame_probs->tx_type_probs[update_type][i][j] = prob;
1679
0
        }
1680
0
      }
1681
0
    }
1682
0
  }
1683
1684
0
  if (cpi->sf.inter_sf.prune_obmc_prob_thresh > 0 &&
1685
0
      cpi->sf.inter_sf.prune_obmc_prob_thresh < INT_MAX) {
1686
0
    const FRAME_UPDATE_TYPE update_type =
1687
0
        get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
1688
1689
0
    for (i = 0; i < BLOCK_SIZES_ALL; i++) {
1690
0
      int sum = 0;
1691
0
      int update_obmc_frameprobs = 1;
1692
0
      for (int j = 0; j < 2; j++) sum += cpi->td.rd_counts.obmc_used[i][j];
1693
1694
0
      const int new_prob =
1695
0
          sum ? 128 * cpi->td.rd_counts.obmc_used[i][1] / sum : 0;
1696
#if CONFIG_FRAME_PARALLEL_ENCODE
1697
#if CONFIG_FPMT_TEST
1698
      if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
1699
        if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] == 0) {
1700
          temp_frame_probs_simulation->obmc_probs[update_type][i] =
1701
              (temp_frame_probs_simulation->obmc_probs[update_type][i] +
1702
               new_prob) >>
1703
              1;
1704
          // Copy temp_frame_probs_simulation to temp_frame_probs
1705
          for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
1706
               update_type_idx++) {
1707
            temp_frame_probs->obmc_probs[update_type_idx][i] =
1708
                temp_frame_probs_simulation->obmc_probs[update_type_idx][i];
1709
          }
1710
        }
1711
        update_obmc_frameprobs = 0;
1712
      }
1713
#endif  // CONFIG_FPMT_TEST
1714
      // Track the frame probabilities of parallel encode frames to update
1715
      // during postencode stage.
1716
      if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
1717
        update_obmc_frameprobs = 0;
1718
        cpi->frame_new_probs[cpi->num_frame_recode].obmc_probs[update_type][i] =
1719
            new_prob;
1720
      }
1721
#endif  // CONFIG_FRAME_PARALLEL_ENCODE
1722
0
      if (update_obmc_frameprobs) {
1723
0
        frame_probs->obmc_probs[update_type][i] =
1724
0
            (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1;
1725
0
      }
1726
0
    }
1727
0
  }
1728
1729
0
  if (features->allow_warped_motion &&
1730
0
      cpi->sf.inter_sf.prune_warped_prob_thresh > 0) {
1731
0
    const FRAME_UPDATE_TYPE update_type =
1732
0
        get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
1733
0
    int update_warp_frameprobs = 1;
1734
0
    int sum = 0;
1735
0
    for (i = 0; i < 2; i++) sum += cpi->td.rd_counts.warped_used[i];
1736
0
    const int new_prob = sum ? 128 * cpi->td.rd_counts.warped_used[1] / sum : 0;
1737
#if CONFIG_FRAME_PARALLEL_ENCODE
1738
#if CONFIG_FPMT_TEST
1739
    if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
1740
      if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] == 0) {
1741
        temp_frame_probs_simulation->warped_probs[update_type] =
1742
            (temp_frame_probs_simulation->warped_probs[update_type] +
1743
             new_prob) >>
1744
            1;
1745
        // Copy temp_frame_probs_simulation to temp_frame_probs
1746
        for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
1747
             update_type_idx++) {
1748
          temp_frame_probs->warped_probs[update_type_idx] =
1749
              temp_frame_probs_simulation->warped_probs[update_type_idx];
1750
        }
1751
      }
1752
      update_warp_frameprobs = 0;
1753
    }
1754
#endif  // CONFIG_FPMT_TEST
1755
    // Track the frame probabilities of parallel encode frames to update
1756
    // during postencode stage.
1757
    if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
1758
      update_warp_frameprobs = 0;
1759
      cpi->frame_new_probs[cpi->num_frame_recode].warped_probs[update_type] =
1760
          new_prob;
1761
    }
1762
#endif  // CONFIG_FRAME_PARALLEL_ENCODE
1763
0
    if (update_warp_frameprobs) {
1764
0
      frame_probs->warped_probs[update_type] =
1765
0
          (frame_probs->warped_probs[update_type] + new_prob) >> 1;
1766
0
    }
1767
0
  }
1768
1769
0
  if (cm->current_frame.frame_type != KEY_FRAME &&
1770
0
      cpi->sf.interp_sf.adaptive_interp_filter_search == 2 &&
1771
0
      features->interp_filter == SWITCHABLE) {
1772
0
    const FRAME_UPDATE_TYPE update_type =
1773
0
        get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
1774
1775
0
    for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
1776
0
      int sum = 0;
1777
0
      int j;
1778
0
      int left = 1536;
1779
1780
0
      for (j = 0; j < SWITCHABLE_FILTERS; j++) {
1781
0
        sum += cpi->td.counts->switchable_interp[i][j];
1782
0
      }
1783
1784
0
      for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) {
1785
0
        int update_interpfilter_frameprobs = 1;
1786
0
        const int new_prob =
1787
0
            sum ? 1536 * cpi->td.counts->switchable_interp[i][j] / sum
1788
0
                : (j ? 0 : 1536);
1789
#if CONFIG_FRAME_PARALLEL_ENCODE
1790
#if CONFIG_FPMT_TEST
1791
        if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
1792
          if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] ==
1793
              0) {
1794
            int prob = (temp_frame_probs_simulation
1795
                            ->switchable_interp_probs[update_type][i][j] +
1796
                        new_prob) >>
1797
                       1;
1798
            left -= prob;
1799
            if (j == 0) prob += left;
1800
            temp_frame_probs_simulation
1801
                ->switchable_interp_probs[update_type][i][j] = prob;
1802
            // Copy temp_frame_probs_simulation to temp_frame_probs
1803
            for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
1804
                 update_type_idx++) {
1805
              temp_frame_probs->switchable_interp_probs[update_type_idx][i][j] =
1806
                  temp_frame_probs_simulation
1807
                      ->switchable_interp_probs[update_type_idx][i][j];
1808
            }
1809
          }
1810
          update_interpfilter_frameprobs = 0;
1811
        }
1812
#endif  // CONFIG_FPMT_TEST
1813
        // Track the frame probabilities of parallel encode frames to update
1814
        // during postencode stage.
1815
        if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
1816
          update_interpfilter_frameprobs = 0;
1817
          cpi->frame_new_probs[cpi->num_frame_recode]
1818
              .switchable_interp_probs[update_type][i][j] = new_prob;
1819
        }
1820
#endif  // CONFIG_FRAME_PARALLEL_ENCODE
1821
0
        if (update_interpfilter_frameprobs) {
1822
0
          int prob = (frame_probs->switchable_interp_probs[update_type][i][j] +
1823
0
                      new_prob) >>
1824
0
                     1;
1825
0
          left -= prob;
1826
0
          if (j == 0) prob += left;
1827
0
          frame_probs->switchable_interp_probs[update_type][i][j] = prob;
1828
0
        }
1829
0
      }
1830
0
    }
1831
0
  }
1832
0
  if (hash_table_created) {
1833
0
    av1_hash_table_destroy(&intrabc_hash_info->intrabc_hash_table);
1834
0
  }
1835
0
}
1836
1837
/*!\brief Setup reference frame buffers and encode a frame
1838
 *
1839
 * \ingroup high_level_algo
1840
 * \callgraph
1841
 * \callergraph
1842
 *
1843
 * \param[in]    cpi    Top-level encoder structure
1844
 */
1845
0
void av1_encode_frame(AV1_COMP *cpi) {
1846
0
  AV1_COMMON *const cm = &cpi->common;
1847
0
  CurrentFrame *const current_frame = &cm->current_frame;
1848
0
  FeatureFlags *const features = &cm->features;
1849
0
  const int num_planes = av1_num_planes(cm);
1850
0
  RD_COUNTS *const rdc = &cpi->td.rd_counts;
1851
  // Indicates whether or not to use a default reduced set for ext-tx
1852
  // rather than the potential full set of 16 transforms
1853
0
  features->reduced_tx_set_used = cpi->oxcf.txfm_cfg.reduced_tx_type_set;
1854
1855
  // Make sure segment_id is no larger than last_active_segid.
1856
0
  if (cm->seg.enabled && cm->seg.update_map) {
1857
0
    const int mi_rows = cm->mi_params.mi_rows;
1858
0
    const int mi_cols = cm->mi_params.mi_cols;
1859
0
    const int last_active_segid = cm->seg.last_active_segid;
1860
0
    uint8_t *map = cpi->enc_seg.map;
1861
0
    for (int mi_row = 0; mi_row < mi_rows; ++mi_row) {
1862
0
      for (int mi_col = 0; mi_col < mi_cols; ++mi_col) {
1863
0
        map[mi_col] = AOMMIN(map[mi_col], last_active_segid);
1864
0
      }
1865
0
      map += mi_cols;
1866
0
    }
1867
0
  }
1868
1869
0
  av1_setup_frame_buf_refs(cm);
1870
0
  enforce_max_ref_frames(cpi, &cpi->ref_frame_flags,
1871
0
                         cm->cur_frame->ref_display_order_hint,
1872
0
                         cm->current_frame.display_order_hint);
1873
0
  set_rel_frame_dist(&cpi->common, &cpi->ref_frame_dist_info,
1874
0
                     cpi->ref_frame_flags);
1875
0
  av1_setup_frame_sign_bias(cm);
1876
1877
#if CONFIG_MISMATCH_DEBUG
1878
  mismatch_reset_frame(num_planes);
1879
#else
1880
0
  (void)num_planes;
1881
0
#endif
1882
1883
0
  rdc->newmv_or_intra_blocks = 0;
1884
1885
0
  if (cpi->sf.hl_sf.frame_parameter_update ||
1886
0
      cpi->sf.rt_sf.use_comp_ref_nonrd) {
1887
0
    if (frame_is_intra_only(cm))
1888
0
      current_frame->reference_mode = SINGLE_REFERENCE;
1889
0
    else
1890
0
      current_frame->reference_mode = REFERENCE_MODE_SELECT;
1891
1892
0
    features->interp_filter = SWITCHABLE;
1893
0
    if (cm->tiles.large_scale) features->interp_filter = EIGHTTAP_REGULAR;
1894
1895
0
    features->switchable_motion_mode = 1;
1896
1897
0
    rdc->compound_ref_used_flag = 0;
1898
0
    rdc->skip_mode_used_flag = 0;
1899
1900
0
    encode_frame_internal(cpi);
1901
1902
0
    if (current_frame->reference_mode == REFERENCE_MODE_SELECT) {
1903
      // Use a flag that includes 4x4 blocks
1904
0
      if (rdc->compound_ref_used_flag == 0) {
1905
0
        current_frame->reference_mode = SINGLE_REFERENCE;
1906
#if CONFIG_ENTROPY_STATS
1907
        av1_zero(cpi->td.counts->comp_inter);
1908
#endif  // CONFIG_ENTROPY_STATS
1909
0
      }
1910
0
    }
1911
    // Re-check on the skip mode status as reference mode may have been
1912
    // changed.
1913
0
    SkipModeInfo *const skip_mode_info = &current_frame->skip_mode_info;
1914
0
    if (frame_is_intra_only(cm) ||
1915
0
        current_frame->reference_mode == SINGLE_REFERENCE) {
1916
0
      skip_mode_info->skip_mode_allowed = 0;
1917
0
      skip_mode_info->skip_mode_flag = 0;
1918
0
    }
1919
0
    if (skip_mode_info->skip_mode_flag && rdc->skip_mode_used_flag == 0)
1920
0
      skip_mode_info->skip_mode_flag = 0;
1921
1922
0
    if (!cm->tiles.large_scale) {
1923
0
      if (features->tx_mode == TX_MODE_SELECT &&
1924
0
          cpi->td.mb.txfm_search_info.txb_split_count == 0)
1925
0
        features->tx_mode = TX_MODE_LARGEST;
1926
0
    }
1927
0
  } else {
1928
    // This is needed if real-time speed setting is changed on the fly
1929
    // from one using compound prediction to one using single reference.
1930
0
    if (current_frame->reference_mode == REFERENCE_MODE_SELECT)
1931
0
      current_frame->reference_mode = SINGLE_REFERENCE;
1932
0
    encode_frame_internal(cpi);
1933
0
  }
1934
0
}