Coverage Report

Created: 2025-08-28 07:12

/src/libvpx/vp9/encoder/vp9_tpl_model.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 *  Copyright (c) 2023 The WebM project authors. All Rights Reserved.
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
 *  that can be found in the LICENSE file in the root of the source
6
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
9
 */
10
11
#include <math.h>
12
13
#include "./vpx_dsp_rtcd.h"
14
#if CONFIG_NON_GREEDY_MV
15
#include "vp9/common/vp9_mvref_common.h"
16
#endif
17
#include "vp9/common/vp9_reconinter.h"
18
#include "vp9/common/vp9_reconintra.h"
19
#include "vp9/common/vp9_scan.h"
20
#include "vp9/encoder/vp9_encoder.h"
21
#include "vp9/encoder/vp9_ext_ratectrl.h"
22
#include "vp9/encoder/vp9_firstpass.h"
23
#include "vp9/encoder/vp9_ratectrl.h"
24
#include "vp9/encoder/vp9_tpl_model.h"
25
#include "vpx/internal/vpx_codec_internal.h"
26
#include "vpx/vpx_codec.h"
27
#include "vpx/vpx_ext_ratectrl.h"
28
29
static int init_gop_frames_rc(VP9_COMP *cpi, GF_PICTURE *gf_picture,
30
0
                              const GF_GROUP *gf_group, int *tpl_group_frames) {
31
0
  VP9_COMMON *cm = &cpi->common;
32
0
  int frame_idx = 0;
33
0
  int i;
34
0
  int extend_frame_count = 0;
35
0
  int pframe_qindex = cpi->tpl_stats[2].base_qindex;
36
0
  int frame_gop_offset = 0;
37
38
0
  int added_overlay = 0;
39
40
0
  RefCntBuffer *frame_bufs = cm->buffer_pool->frame_bufs;
41
0
  int8_t recon_frame_index[REFS_PER_FRAME + MAX_ARF_LAYERS];
42
43
0
  memset(recon_frame_index, -1, sizeof(recon_frame_index));
44
45
0
  for (i = 0; i < FRAME_BUFFERS; ++i) {
46
0
    if (frame_bufs[i].ref_count == 0) {
47
0
      alloc_frame_mvs(cm, i);
48
0
      if (vpx_realloc_frame_buffer(&frame_bufs[i].buf, cm->width, cm->height,
49
0
                                   cm->subsampling_x, cm->subsampling_y,
50
0
#if CONFIG_VP9_HIGHBITDEPTH
51
0
                                   cm->use_highbitdepth,
52
0
#endif
53
0
                                   VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
54
0
                                   NULL, NULL, NULL))
55
0
        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
56
0
                           "Failed to allocate frame buffer");
57
58
0
      recon_frame_index[frame_idx] = i;
59
0
      ++frame_idx;
60
61
0
      if (frame_idx >= REFS_PER_FRAME + cpi->oxcf.enable_auto_arf) break;
62
0
    }
63
0
  }
64
65
0
  for (i = 0; i < REFS_PER_FRAME + 1; ++i) {
66
0
    assert(recon_frame_index[i] >= 0);
67
0
    cpi->tpl_recon_frames[i] = &frame_bufs[recon_frame_index[i]].buf;
68
0
  }
69
70
0
  *tpl_group_frames = 0;
71
72
0
  int ref_table[3];
73
74
0
  if (gf_group->index == 1 && gf_group->update_type[1] == ARF_UPDATE) {
75
0
    if (gf_group->update_type[0] == KF_UPDATE) {
76
      // This is the only frame in ref buffer. We need it to be on
77
      // gf_picture[0].
78
0
      for (i = 0; i < 3; ++i) ref_table[i] = -REFS_PER_FRAME;
79
80
0
      gf_picture[0].frame =
81
0
          &cm->buffer_pool
82
0
               ->frame_bufs[cm->ref_frame_map[gf_group->update_ref_idx[0]]]
83
0
               .buf;
84
0
      ref_table[gf_group->update_ref_idx[0]] = 0;
85
86
0
      for (i = 0; i < 3; ++i) gf_picture[0].ref_frame[i] = -REFS_PER_FRAME;
87
0
      gf_picture[0].update_type = gf_group->update_type[0];
88
0
    } else {
89
0
      for (i = 0; i < REFS_PER_FRAME; i++) {
90
0
        if (cm->ref_frame_map[i] != -1) {
91
0
          gf_picture[-i].frame =
92
0
              &cm->buffer_pool->frame_bufs[cm->ref_frame_map[i]].buf;
93
0
          ref_table[i] = -i;
94
0
        } else {
95
0
          ref_table[i] = -REFS_PER_FRAME;
96
0
        }
97
0
      }
98
0
      for (i = 0; i < 3; ++i) {
99
0
        gf_picture[0].ref_frame[i] = ref_table[i];
100
0
      }
101
0
    }
102
0
    ++*tpl_group_frames;
103
104
    // Initialize base layer ARF frame
105
0
    gf_picture[1].frame = cpi->Source;
106
0
    for (i = 0; i < 3; ++i) gf_picture[1].ref_frame[i] = ref_table[i];
107
0
    gf_picture[1].update_type = gf_group->update_type[1];
108
0
    ref_table[gf_group->update_ref_idx[1]] = 1;
109
110
0
    ++*tpl_group_frames;
111
0
  } else {
112
0
    assert(gf_group->index == 0);
113
0
    if (gf_group->update_type[0] == KF_UPDATE) {
114
      // This is the only frame in ref buffer. We need it to be on
115
      // gf_picture[0].
116
0
      gf_picture[0].frame = cpi->Source;
117
0
      for (i = 0; i < 3; ++i) gf_picture[0].ref_frame[i] = -REFS_PER_FRAME;
118
0
      gf_picture[0].update_type = gf_group->update_type[0];
119
120
0
      for (i = 0; i < 3; ++i) ref_table[i] = -REFS_PER_FRAME;
121
0
      ref_table[gf_group->update_ref_idx[0]] = 0;
122
0
    } else {
123
      // Initialize ref table
124
0
      for (i = 0; i < REFS_PER_FRAME; i++) {
125
0
        if (cm->ref_frame_map[i] != -1) {
126
0
          gf_picture[-i].frame =
127
0
              &cm->buffer_pool->frame_bufs[cm->ref_frame_map[i]].buf;
128
0
          ref_table[i] = -i;
129
0
        } else {
130
0
          ref_table[i] = -REFS_PER_FRAME;
131
0
        }
132
0
      }
133
0
      for (i = 0; i < 3; ++i) {
134
0
        gf_picture[0].ref_frame[i] = ref_table[i];
135
0
      }
136
0
      gf_picture[0].update_type = gf_group->update_type[0];
137
0
      if (gf_group->update_type[0] != OVERLAY_UPDATE &&
138
0
          gf_group->update_ref_idx[0] != -1) {
139
0
        ref_table[gf_group->update_ref_idx[0]] = 0;
140
0
      }
141
0
    }
142
0
    ++*tpl_group_frames;
143
0
  }
144
145
0
  int has_arf =
146
0
      gf_group->gf_group_size > 1 && gf_group->update_type[1] == ARF_UPDATE &&
147
0
      gf_group->update_type[gf_group->gf_group_size] == OVERLAY_UPDATE;
148
149
  // Initialize P frames
150
0
  for (frame_idx = *tpl_group_frames; frame_idx < MAX_ARF_GOP_SIZE;
151
0
       ++frame_idx) {
152
0
    if (frame_idx >= gf_group->gf_group_size && !has_arf) break;
153
0
    struct lookahead_entry *buf;
154
0
    frame_gop_offset = gf_group->frame_gop_index[frame_idx];
155
0
    buf = vp9_lookahead_peek(cpi->lookahead, frame_gop_offset - 1);
156
157
0
    if (buf == NULL) break;
158
159
0
    gf_picture[frame_idx].frame = &buf->img;
160
0
    for (i = 0; i < 3; ++i) {
161
0
      gf_picture[frame_idx].ref_frame[i] = ref_table[i];
162
0
    }
163
164
0
    if (gf_group->update_type[frame_idx] != OVERLAY_UPDATE &&
165
0
        gf_group->update_ref_idx[frame_idx] != -1) {
166
0
      ref_table[gf_group->update_ref_idx[frame_idx]] = frame_idx;
167
0
    }
168
169
0
    gf_picture[frame_idx].update_type = gf_group->update_type[frame_idx];
170
171
0
    ++*tpl_group_frames;
172
173
    // The length of group of pictures is baseline_gf_interval, plus the
174
    // beginning golden frame from last GOP, plus the last overlay frame in
175
    // the same GOP.
176
0
    if (frame_idx == gf_group->gf_group_size) {
177
0
      added_overlay = 1;
178
179
0
      ++frame_idx;
180
0
      ++frame_gop_offset;
181
0
      break;
182
0
    }
183
184
0
    if (frame_idx == gf_group->gf_group_size - 1 &&
185
0
        gf_group->update_type[gf_group->gf_group_size] != OVERLAY_UPDATE) {
186
0
      ++frame_idx;
187
0
      ++frame_gop_offset;
188
0
      break;
189
0
    }
190
0
  }
191
192
0
  int lst_index = frame_idx - 1;
193
  // Extend two frames outside the current gf group.
194
0
  for (; has_arf && frame_idx < MAX_LAG_BUFFERS && extend_frame_count < 2;
195
0
       ++frame_idx) {
196
0
    struct lookahead_entry *buf =
197
0
        vp9_lookahead_peek(cpi->lookahead, frame_gop_offset - 1);
198
199
0
    if (buf == NULL) break;
200
201
0
    cpi->tpl_stats[frame_idx].base_qindex = pframe_qindex;
202
203
0
    gf_picture[frame_idx].frame = &buf->img;
204
0
    gf_picture[frame_idx].ref_frame[0] = gf_picture[lst_index].ref_frame[0];
205
0
    gf_picture[frame_idx].ref_frame[1] = gf_picture[lst_index].ref_frame[1];
206
0
    gf_picture[frame_idx].ref_frame[2] = gf_picture[lst_index].ref_frame[2];
207
208
0
    if (gf_picture[frame_idx].ref_frame[0] >
209
0
            gf_picture[frame_idx].ref_frame[1] &&
210
0
        gf_picture[frame_idx].ref_frame[0] >
211
0
            gf_picture[frame_idx].ref_frame[2]) {
212
0
      gf_picture[frame_idx].ref_frame[0] = lst_index;
213
0
    } else if (gf_picture[frame_idx].ref_frame[1] >
214
0
                   gf_picture[frame_idx].ref_frame[0] &&
215
0
               gf_picture[frame_idx].ref_frame[1] >
216
0
                   gf_picture[frame_idx].ref_frame[2]) {
217
0
      gf_picture[frame_idx].ref_frame[1] = lst_index;
218
0
    } else {
219
0
      gf_picture[frame_idx].ref_frame[2] = lst_index;
220
0
    }
221
222
0
    gf_picture[frame_idx].update_type = LF_UPDATE;
223
0
    lst_index = frame_idx;
224
0
    ++*tpl_group_frames;
225
0
    ++extend_frame_count;
226
0
    ++frame_gop_offset;
227
0
  }
228
229
0
  return extend_frame_count + added_overlay;
230
0
}
231
232
static int init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
233
0
                           const GF_GROUP *gf_group, int *tpl_group_frames) {
234
0
  if (cpi->ext_ratectrl.ready &&
235
0
      (cpi->ext_ratectrl.funcs.rc_type & VPX_RC_GOP) != 0) {
236
0
    return init_gop_frames_rc(cpi, gf_picture, gf_group, tpl_group_frames);
237
0
  }
238
239
0
  VP9_COMMON *cm = &cpi->common;
240
0
  int frame_idx = 0;
241
0
  int i;
242
0
  int gld_index = -1;
243
0
  int alt_index = -2;
244
0
  int lst_index = -1;
245
0
  int arf_index_stack[MAX_ARF_LAYERS];
246
0
  int arf_stack_size = 0;
247
0
  int extend_frame_count = 0;
248
0
  int pframe_qindex = cpi->tpl_stats[2].base_qindex;
249
0
  int frame_gop_offset = 0;
250
251
0
  RefCntBuffer *frame_bufs = cm->buffer_pool->frame_bufs;
252
0
  int8_t recon_frame_index[REFS_PER_FRAME + MAX_ARF_LAYERS];
253
254
0
  memset(recon_frame_index, -1, sizeof(recon_frame_index));
255
0
  stack_init(arf_index_stack, MAX_ARF_LAYERS);
256
257
0
  for (i = 0; i < FRAME_BUFFERS; ++i) {
258
0
    if (frame_bufs[i].ref_count == 0) {
259
0
      alloc_frame_mvs(cm, i);
260
0
      if (vpx_realloc_frame_buffer(&frame_bufs[i].buf, cm->width, cm->height,
261
0
                                   cm->subsampling_x, cm->subsampling_y,
262
0
#if CONFIG_VP9_HIGHBITDEPTH
263
0
                                   cm->use_highbitdepth,
264
0
#endif
265
0
                                   VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
266
0
                                   NULL, NULL, NULL))
267
0
        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
268
0
                           "Failed to allocate frame buffer");
269
270
0
      recon_frame_index[frame_idx] = i;
271
0
      ++frame_idx;
272
273
0
      if (frame_idx >= REFS_PER_FRAME + cpi->oxcf.enable_auto_arf) break;
274
0
    }
275
0
  }
276
277
0
  for (i = 0; i < REFS_PER_FRAME + 1; ++i) {
278
0
    assert(recon_frame_index[i] >= 0);
279
0
    cpi->tpl_recon_frames[i] = &frame_bufs[recon_frame_index[i]].buf;
280
0
  }
281
282
0
  *tpl_group_frames = 0;
283
284
  // Initialize Golden reference frame.
285
0
  gf_picture[0].frame = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
286
0
  for (i = 0; i < 3; ++i) gf_picture[0].ref_frame[i] = -REFS_PER_FRAME;
287
0
  gf_picture[0].update_type = gf_group->update_type[0];
288
0
  gld_index = 0;
289
0
  ++*tpl_group_frames;
290
291
0
  gf_picture[-1].frame = get_ref_frame_buffer(cpi, LAST_FRAME);
292
0
  gf_picture[-2].frame = get_ref_frame_buffer(cpi, ALTREF_FRAME);
293
294
  // Initialize base layer ARF frame
295
0
  gf_picture[1].frame = cpi->Source;
296
0
  gf_picture[1].ref_frame[0] = gld_index;
297
0
  gf_picture[1].ref_frame[1] = lst_index;
298
0
  gf_picture[1].ref_frame[2] = alt_index;
299
0
  gf_picture[1].update_type = gf_group->update_type[1];
300
0
  alt_index = 1;
301
0
  ++*tpl_group_frames;
302
303
  // Initialize P frames
304
0
  for (frame_idx = 2; frame_idx < MAX_ARF_GOP_SIZE; ++frame_idx) {
305
0
    struct lookahead_entry *buf;
306
0
    frame_gop_offset = gf_group->frame_gop_index[frame_idx];
307
0
    buf = vp9_lookahead_peek(cpi->lookahead, frame_gop_offset - 1);
308
309
0
    if (buf == NULL) break;
310
311
0
    gf_picture[frame_idx].frame = &buf->img;
312
0
    gf_picture[frame_idx].ref_frame[0] = gld_index;
313
0
    gf_picture[frame_idx].ref_frame[1] = lst_index;
314
0
    gf_picture[frame_idx].ref_frame[2] = alt_index;
315
0
    gf_picture[frame_idx].update_type = gf_group->update_type[frame_idx];
316
317
0
    switch (gf_group->update_type[frame_idx]) {
318
0
      case ARF_UPDATE:
319
0
        stack_push(arf_index_stack, alt_index, arf_stack_size);
320
0
        ++arf_stack_size;
321
0
        alt_index = frame_idx;
322
0
        break;
323
0
      case LF_UPDATE: lst_index = frame_idx; break;
324
0
      case OVERLAY_UPDATE:
325
0
        gld_index = frame_idx;
326
0
        alt_index = stack_pop(arf_index_stack, arf_stack_size);
327
0
        --arf_stack_size;
328
0
        break;
329
0
      case USE_BUF_FRAME:
330
0
        lst_index = alt_index;
331
0
        alt_index = stack_pop(arf_index_stack, arf_stack_size);
332
0
        --arf_stack_size;
333
0
        break;
334
0
      default: break;
335
0
    }
336
337
0
    ++*tpl_group_frames;
338
339
    // The length of group of pictures is baseline_gf_interval, plus the
340
    // beginning golden frame from last GOP, plus the last overlay frame in
341
    // the same GOP.
342
0
    if (frame_idx == gf_group->gf_group_size) break;
343
0
  }
344
345
0
  alt_index = -1;
346
0
  ++frame_idx;
347
0
  ++frame_gop_offset;
348
349
  // Extend two frames outside the current gf group.
350
0
  for (; frame_idx < MAX_LAG_BUFFERS && extend_frame_count < 2; ++frame_idx) {
351
0
    struct lookahead_entry *buf =
352
0
        vp9_lookahead_peek(cpi->lookahead, frame_gop_offset - 1);
353
354
0
    if (buf == NULL) break;
355
356
0
    cpi->tpl_stats[frame_idx].base_qindex = pframe_qindex;
357
358
0
    gf_picture[frame_idx].frame = &buf->img;
359
0
    gf_picture[frame_idx].ref_frame[0] = gld_index;
360
0
    gf_picture[frame_idx].ref_frame[1] = lst_index;
361
0
    gf_picture[frame_idx].ref_frame[2] = alt_index;
362
0
    gf_picture[frame_idx].update_type = LF_UPDATE;
363
0
    lst_index = frame_idx;
364
0
    ++*tpl_group_frames;
365
0
    ++extend_frame_count;
366
0
    ++frame_gop_offset;
367
0
  }
368
369
0
  return extend_frame_count;
370
0
}
371
372
0
static void init_tpl_stats(VP9_COMP *cpi) {
373
0
  int frame_idx;
374
0
  for (frame_idx = 0; frame_idx < MAX_ARF_GOP_SIZE; ++frame_idx) {
375
0
    TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
376
0
    memset(tpl_frame->tpl_stats_ptr, 0,
377
0
           tpl_frame->height * tpl_frame->width *
378
0
               sizeof(*tpl_frame->tpl_stats_ptr));
379
0
    tpl_frame->is_valid = 0;
380
0
  }
381
0
}
382
383
3.99k
static void free_tpl_frame_stats_list(VpxTplGopStats *tpl_gop_stats) {
384
3.99k
  int frame_idx;
385
3.99k
  for (frame_idx = 0; frame_idx < tpl_gop_stats->size; ++frame_idx) {
386
0
    vpx_free(tpl_gop_stats->frame_stats_list[frame_idx].block_stats_list);
387
0
  }
388
3.99k
  vpx_free(tpl_gop_stats->frame_stats_list);
389
3.99k
}
390
391
static void init_tpl_stats_before_propagation(
392
    struct vpx_internal_error_info *error_info, VpxTplGopStats *tpl_gop_stats,
393
    TplDepFrame *tpl_stats, int tpl_gop_frames, int frame_width,
394
0
    int frame_height) {
395
0
  int frame_idx;
396
0
  free_tpl_frame_stats_list(tpl_gop_stats);
397
0
  CHECK_MEM_ERROR(
398
0
      error_info, tpl_gop_stats->frame_stats_list,
399
0
      vpx_calloc(tpl_gop_frames, sizeof(*tpl_gop_stats->frame_stats_list)));
400
0
  tpl_gop_stats->size = tpl_gop_frames;
401
0
  for (frame_idx = 0; frame_idx < tpl_gop_frames; ++frame_idx) {
402
0
    const int mi_rows = tpl_stats[frame_idx].mi_rows;
403
0
    const int mi_cols = tpl_stats[frame_idx].mi_cols;
404
0
    CHECK_MEM_ERROR(
405
0
        error_info, tpl_gop_stats->frame_stats_list[frame_idx].block_stats_list,
406
0
        vpx_calloc(
407
0
            mi_rows * mi_cols,
408
0
            sizeof(
409
0
                *tpl_gop_stats->frame_stats_list[frame_idx].block_stats_list)));
410
0
    tpl_gop_stats->frame_stats_list[frame_idx].num_blocks = mi_rows * mi_cols;
411
0
    tpl_gop_stats->frame_stats_list[frame_idx].frame_width = frame_width;
412
0
    tpl_gop_stats->frame_stats_list[frame_idx].frame_height = frame_height;
413
0
  }
414
0
}
415
416
#if CONFIG_NON_GREEDY_MV
417
static uint32_t full_pixel_motion_search(VP9_COMP *cpi, ThreadData *td,
418
                                         MotionField *motion_field,
419
                                         int frame_idx, uint8_t *cur_frame_buf,
420
                                         uint8_t *ref_frame_buf, int stride,
421
                                         BLOCK_SIZE bsize, int mi_row,
422
                                         int mi_col, MV *mv) {
423
  MACROBLOCK *const x = &td->mb;
424
  MACROBLOCKD *const xd = &x->e_mbd;
425
  MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
426
  int step_param;
427
  uint32_t bestsme = UINT_MAX;
428
  const MvLimits tmp_mv_limits = x->mv_limits;
429
  // lambda is used to adjust the importance of motion vector consistency.
430
  // TODO(angiebird): Figure out lambda's proper value.
431
  const int lambda = cpi->tpl_stats[frame_idx].lambda;
432
  int_mv nb_full_mvs[NB_MVS_NUM];
433
  int nb_full_mv_num;
434
435
  MV best_ref_mv1 = { 0, 0 };
436
  MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
437
438
  best_ref_mv1_full.col = best_ref_mv1.col >> 3;
439
  best_ref_mv1_full.row = best_ref_mv1.row >> 3;
440
441
  // Setup frame pointers
442
  x->plane[0].src.buf = cur_frame_buf;
443
  x->plane[0].src.stride = stride;
444
  xd->plane[0].pre[0].buf = ref_frame_buf;
445
  xd->plane[0].pre[0].stride = stride;
446
447
  step_param = mv_sf->reduce_first_step_size;
448
  step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
449
450
  vp9_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
451
452
  nb_full_mv_num =
453
      vp9_prepare_nb_full_mvs(motion_field, mi_row, mi_col, nb_full_mvs);
454
  vp9_full_pixel_diamond_new(cpi, x, bsize, &best_ref_mv1_full, step_param,
455
                             lambda, 1, nb_full_mvs, nb_full_mv_num, mv);
456
457
  /* restore UMV window */
458
  x->mv_limits = tmp_mv_limits;
459
460
  return bestsme;
461
}
462
463
static uint32_t sub_pixel_motion_search(VP9_COMP *cpi, ThreadData *td,
464
                                        uint8_t *cur_frame_buf,
465
                                        uint8_t *ref_frame_buf, int stride,
466
                                        BLOCK_SIZE bsize, MV *mv) {
467
  MACROBLOCK *const x = &td->mb;
468
  MACROBLOCKD *const xd = &x->e_mbd;
469
  MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
470
  uint32_t bestsme = UINT_MAX;
471
  uint32_t distortion;
472
  uint32_t sse;
473
  int cost_list[5];
474
475
  MV best_ref_mv1 = { 0, 0 };
476
477
  // Setup frame pointers
478
  x->plane[0].src.buf = cur_frame_buf;
479
  x->plane[0].src.stride = stride;
480
  xd->plane[0].pre[0].buf = ref_frame_buf;
481
  xd->plane[0].pre[0].stride = stride;
482
483
  // TODO(yunqing): may use higher tap interp filter than 2 taps.
484
  // Ignore mv costing by sending NULL pointer instead of cost array
485
  bestsme = cpi->find_fractional_mv_step(
486
      x, mv, &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit,
487
      &cpi->fn_ptr[bsize], 0, mv_sf->subpel_search_level,
488
      cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, 0,
489
      USE_2_TAPS);
490
491
  return bestsme;
492
}
493
494
#else  // CONFIG_NON_GREEDY_MV
495
static uint32_t motion_compensated_prediction(VP9_COMP *cpi, ThreadData *td,
496
                                              uint8_t *cur_frame_buf,
497
                                              uint8_t *ref_frame_buf,
498
                                              int stride, BLOCK_SIZE bsize,
499
0
                                              MV *mv) {
500
0
  MACROBLOCK *const x = &td->mb;
501
0
  MACROBLOCKD *const xd = &x->e_mbd;
502
0
  MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
503
0
  const SEARCH_METHODS search_method = NSTEP;
504
0
  int step_param;
505
0
  int sadpb = x->sadperbit16;
506
0
  uint32_t bestsme = UINT_MAX;
507
0
  uint32_t distortion;
508
0
  uint32_t sse;
509
0
  int cost_list[5];
510
0
  const MvLimits tmp_mv_limits = x->mv_limits;
511
512
0
  MV best_ref_mv1 = { 0, 0 };
513
0
  MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
514
515
0
  best_ref_mv1_full.col = best_ref_mv1.col >> 3;
516
0
  best_ref_mv1_full.row = best_ref_mv1.row >> 3;
517
518
  // Setup frame pointers
519
0
  x->plane[0].src.buf = cur_frame_buf;
520
0
  x->plane[0].src.stride = stride;
521
0
  xd->plane[0].pre[0].buf = ref_frame_buf;
522
0
  xd->plane[0].pre[0].stride = stride;
523
524
0
  step_param = mv_sf->reduce_first_step_size;
525
0
  step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
526
527
0
  vp9_set_mv_search_range(&x->mv_limits, &best_ref_mv1);
528
529
0
  vp9_full_pixel_search(cpi, x, bsize, &best_ref_mv1_full, step_param,
530
0
                        search_method, sadpb, cond_cost_list(cpi, cost_list),
531
0
                        &best_ref_mv1, mv, 0, 0);
532
533
  /* restore UMV window */
534
0
  x->mv_limits = tmp_mv_limits;
535
536
  // TODO(yunqing): may use higher tap interp filter than 2 taps.
537
  // Ignore mv costing by sending NULL pointer instead of cost array
538
0
  bestsme = cpi->find_fractional_mv_step(
539
0
      x, mv, &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit,
540
0
      &cpi->fn_ptr[bsize], 0, mv_sf->subpel_search_level,
541
0
      cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, 0,
542
0
      USE_2_TAPS);
543
544
0
  return bestsme;
545
0
}
546
#endif
547
548
static int get_overlap_area(int grid_pos_row, int grid_pos_col, int ref_pos_row,
549
0
                            int ref_pos_col, int block, BLOCK_SIZE bsize) {
550
0
  int width = 0, height = 0;
551
0
  int bw = 4 << b_width_log2_lookup[bsize];
552
0
  int bh = 4 << b_height_log2_lookup[bsize];
553
554
0
  switch (block) {
555
0
    case 0:
556
0
      width = grid_pos_col + bw - ref_pos_col;
557
0
      height = grid_pos_row + bh - ref_pos_row;
558
0
      break;
559
0
    case 1:
560
0
      width = ref_pos_col + bw - grid_pos_col;
561
0
      height = grid_pos_row + bh - ref_pos_row;
562
0
      break;
563
0
    case 2:
564
0
      width = grid_pos_col + bw - ref_pos_col;
565
0
      height = ref_pos_row + bh - grid_pos_row;
566
0
      break;
567
0
    case 3:
568
0
      width = ref_pos_col + bw - grid_pos_col;
569
0
      height = ref_pos_row + bh - grid_pos_row;
570
0
      break;
571
0
    default: assert(0);
572
0
  }
573
574
0
  return width * height;
575
0
}
576
577
0
static int round_floor(int ref_pos, int bsize_pix) {
578
0
  int round;
579
0
  if (ref_pos < 0)
580
0
    round = -(1 + (-ref_pos - 1) / bsize_pix);
581
0
  else
582
0
    round = ref_pos / bsize_pix;
583
584
0
  return round;
585
0
}
586
587
static void tpl_model_store(TplDepStats *tpl_stats, int mi_row, int mi_col,
588
0
                            BLOCK_SIZE bsize, int stride) {
589
0
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
590
0
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
591
0
  const TplDepStats *src_stats = &tpl_stats[mi_row * stride + mi_col];
592
0
  int idx, idy;
593
594
0
  for (idy = 0; idy < mi_height; ++idy) {
595
0
    for (idx = 0; idx < mi_width; ++idx) {
596
0
      TplDepStats *tpl_ptr = &tpl_stats[(mi_row + idy) * stride + mi_col + idx];
597
0
      const int64_t mc_flow = tpl_ptr->mc_flow;
598
0
      const int64_t mc_ref_cost = tpl_ptr->mc_ref_cost;
599
0
      *tpl_ptr = *src_stats;
600
0
      tpl_ptr->mc_flow = mc_flow;
601
0
      tpl_ptr->mc_ref_cost = mc_ref_cost;
602
0
      tpl_ptr->mc_dep_cost = tpl_ptr->intra_cost + tpl_ptr->mc_flow;
603
0
    }
604
0
  }
605
0
}
606
607
static void tpl_store_before_propagation(VpxTplBlockStats *tpl_block_stats,
608
                                         TplDepStats *tpl_stats, int mi_row,
609
                                         int mi_col, BLOCK_SIZE bsize,
610
                                         int src_stride, int64_t recon_error,
611
                                         int64_t pred_error, int64_t rate_cost,
612
                                         int ref_frame_idx, int mi_rows,
613
0
                                         int mi_cols) {
614
0
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
615
0
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
616
0
  const TplDepStats *src_stats = &tpl_stats[mi_row * src_stride + mi_col];
617
0
  int idx, idy;
618
619
0
  for (idy = 0; idy < mi_height; ++idy) {
620
0
    for (idx = 0; idx < mi_width; ++idx) {
621
0
      if (mi_row + idy >= mi_rows || mi_col + idx >= mi_cols) continue;
622
0
      VpxTplBlockStats *tpl_block_stats_ptr =
623
0
          &tpl_block_stats[(mi_row + idy) * mi_cols + mi_col + idx];
624
0
      tpl_block_stats_ptr->row = mi_row * 8 + idy * 8;
625
0
      tpl_block_stats_ptr->col = mi_col * 8 + idx * 8;
626
0
      tpl_block_stats_ptr->inter_cost = src_stats->inter_cost;
627
0
      tpl_block_stats_ptr->intra_cost = src_stats->intra_cost;
628
      // inter/intra_cost here is calculated with SATD which should be close
629
      // enough to be used as inter/intra_pred_error
630
0
      tpl_block_stats_ptr->inter_pred_err = src_stats->inter_cost;
631
0
      tpl_block_stats_ptr->intra_pred_err = src_stats->intra_cost;
632
0
      tpl_block_stats_ptr->srcrf_dist = recon_error << TPL_DEP_COST_SCALE_LOG2;
633
0
      tpl_block_stats_ptr->srcrf_rate = rate_cost << TPL_DEP_COST_SCALE_LOG2;
634
0
      tpl_block_stats_ptr->pred_error = pred_error << TPL_DEP_COST_SCALE_LOG2;
635
0
      tpl_block_stats_ptr->mv_r = (src_stats->mv.as_mv.row >= 0 ? 1 : -1) *
636
0
                                  (abs(src_stats->mv.as_mv.row) + 4) / 8;
637
0
      tpl_block_stats_ptr->mv_c = (src_stats->mv.as_mv.col >= 0 ? 1 : -1) *
638
0
                                  (abs(src_stats->mv.as_mv.col) + 4) / 8;
639
0
      tpl_block_stats_ptr->ref_frame_index = ref_frame_idx;
640
0
    }
641
0
  }
642
0
}
643
644
static void tpl_model_update_b(TplDepFrame *tpl_frame, TplDepStats *tpl_stats,
645
0
                               int mi_row, int mi_col, const BLOCK_SIZE bsize) {
646
0
  if (tpl_stats->ref_frame_index < 0) return;
647
648
0
  TplDepFrame *ref_tpl_frame = &tpl_frame[tpl_stats->ref_frame_index];
649
0
  TplDepStats *ref_stats = ref_tpl_frame->tpl_stats_ptr;
650
0
  MV mv = tpl_stats->mv.as_mv;
651
0
  int mv_row = mv.row >> 3;
652
0
  int mv_col = mv.col >> 3;
653
654
0
  int ref_pos_row = mi_row * MI_SIZE + mv_row;
655
0
  int ref_pos_col = mi_col * MI_SIZE + mv_col;
656
657
0
  const int bw = 4 << b_width_log2_lookup[bsize];
658
0
  const int bh = 4 << b_height_log2_lookup[bsize];
659
0
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
660
0
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
661
0
  const int pix_num = bw * bh;
662
663
  // top-left on grid block location in pixel
664
0
  int grid_pos_row_base = round_floor(ref_pos_row, bh) * bh;
665
0
  int grid_pos_col_base = round_floor(ref_pos_col, bw) * bw;
666
0
  int block;
667
668
0
  for (block = 0; block < 4; ++block) {
669
0
    int grid_pos_row = grid_pos_row_base + bh * (block >> 1);
670
0
    int grid_pos_col = grid_pos_col_base + bw * (block & 0x01);
671
672
0
    if (grid_pos_row >= 0 && grid_pos_row < ref_tpl_frame->mi_rows * MI_SIZE &&
673
0
        grid_pos_col >= 0 && grid_pos_col < ref_tpl_frame->mi_cols * MI_SIZE) {
674
0
      int overlap_area = get_overlap_area(
675
0
          grid_pos_row, grid_pos_col, ref_pos_row, ref_pos_col, block, bsize);
676
0
      int ref_mi_row = round_floor(grid_pos_row, bh) * mi_height;
677
0
      int ref_mi_col = round_floor(grid_pos_col, bw) * mi_width;
678
679
0
      int64_t mc_flow = tpl_stats->mc_dep_cost -
680
0
                        (tpl_stats->mc_dep_cost * tpl_stats->inter_cost) /
681
0
                            tpl_stats->intra_cost;
682
683
0
      int idx, idy;
684
685
0
      for (idy = 0; idy < mi_height; ++idy) {
686
0
        for (idx = 0; idx < mi_width; ++idx) {
687
0
          TplDepStats *des_stats =
688
0
              &ref_stats[(ref_mi_row + idy) * ref_tpl_frame->stride +
689
0
                         (ref_mi_col + idx)];
690
691
0
          des_stats->mc_flow += (mc_flow * overlap_area) / pix_num;
692
0
          des_stats->mc_ref_cost +=
693
0
              ((tpl_stats->intra_cost - tpl_stats->inter_cost) * overlap_area) /
694
0
              pix_num;
695
0
          assert(overlap_area >= 0);
696
0
        }
697
0
      }
698
0
    }
699
0
  }
700
0
}
701
702
static void tpl_model_update(TplDepFrame *tpl_frame, TplDepStats *tpl_stats,
703
0
                             int mi_row, int mi_col, const BLOCK_SIZE bsize) {
704
0
  int idx, idy;
705
0
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
706
0
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
707
708
0
  for (idy = 0; idy < mi_height; ++idy) {
709
0
    for (idx = 0; idx < mi_width; ++idx) {
710
0
      TplDepStats *tpl_ptr =
711
0
          &tpl_stats[(mi_row + idy) * tpl_frame->stride + (mi_col + idx)];
712
0
      tpl_model_update_b(tpl_frame, tpl_ptr, mi_row + idy, mi_col + idx,
713
0
                         BLOCK_8X8);
714
0
    }
715
0
  }
716
0
}
717
718
static void get_quantize_error(MACROBLOCK *x, int plane, tran_low_t *coeff,
719
                               tran_low_t *qcoeff, tran_low_t *dqcoeff,
720
                               TX_SIZE tx_size, int64_t *recon_error,
721
0
                               int64_t *sse, uint16_t *eob) {
722
0
  MACROBLOCKD *const xd = &x->e_mbd;
723
0
  const struct macroblock_plane *const p = &x->plane[plane];
724
0
  const struct macroblockd_plane *const pd = &xd->plane[plane];
725
0
  const ScanOrder *const scan_order = &vp9_default_scan_orders[tx_size];
726
0
  int pix_num = 1 << num_pels_log2_lookup[txsize_to_bsize[tx_size]];
727
0
  const int shift = tx_size == TX_32X32 ? 0 : 2;
728
729
  // skip block condition should be handled before this is called.
730
0
  assert(!x->skip_block);
731
732
0
#if CONFIG_VP9_HIGHBITDEPTH
733
0
  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
734
0
    vp9_highbd_quantize_fp_32x32(coeff, pix_num, p, qcoeff, dqcoeff,
735
0
                                 pd->dequant, eob, scan_order);
736
0
  } else {
737
0
    vp9_quantize_fp_32x32(coeff, pix_num, p, qcoeff, dqcoeff, pd->dequant, eob,
738
0
                          scan_order);
739
0
  }
740
#else
741
  vp9_quantize_fp_32x32(coeff, pix_num, p, qcoeff, dqcoeff, pd->dequant, eob,
742
                        scan_order);
743
#endif  // CONFIG_VP9_HIGHBITDEPTH
744
745
0
  *recon_error = vp9_block_error(coeff, dqcoeff, pix_num, sse) >> shift;
746
0
  *recon_error = VPXMAX(*recon_error, 1);
747
748
0
  *sse = (*sse) >> shift;
749
0
  *sse = VPXMAX(*sse, 1);
750
0
}
751
752
#if CONFIG_VP9_HIGHBITDEPTH
753
void vp9_highbd_wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
754
0
                             TX_SIZE tx_size) {
755
  // TODO(sdeng): Implement SIMD based high bit-depth Hadamard transforms.
756
0
  switch (tx_size) {
757
0
    case TX_8X8: vpx_highbd_hadamard_8x8(src_diff, bw, coeff); break;
758
0
    case TX_16X16: vpx_highbd_hadamard_16x16(src_diff, bw, coeff); break;
759
0
    case TX_32X32: vpx_highbd_hadamard_32x32(src_diff, bw, coeff); break;
760
0
    default: assert(0);
761
0
  }
762
0
}
763
#endif  // CONFIG_VP9_HIGHBITDEPTH
764
765
void vp9_wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
766
0
                      TX_SIZE tx_size) {
767
0
  switch (tx_size) {
768
0
    case TX_8X8: vpx_hadamard_8x8(src_diff, bw, coeff); break;
769
0
    case TX_16X16: vpx_hadamard_16x16(src_diff, bw, coeff); break;
770
0
    case TX_32X32: vpx_hadamard_32x32(src_diff, bw, coeff); break;
771
0
    default: assert(0);
772
0
  }
773
0
}
774
775
static void set_mv_limits(const VP9_COMMON *cm, MACROBLOCK *x, int mi_row,
776
0
                          int mi_col) {
777
0
  x->mv_limits.row_min = -((mi_row * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND));
778
0
  x->mv_limits.row_max =
779
0
      (cm->mi_rows - 1 - mi_row) * MI_SIZE + (17 - 2 * VP9_INTERP_EXTEND);
780
0
  x->mv_limits.col_min = -((mi_col * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND));
781
0
  x->mv_limits.col_max =
782
0
      ((cm->mi_cols - 1 - mi_col) * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND);
783
0
}
784
785
0
static int rate_estimator(const tran_low_t *qcoeff, int eob, TX_SIZE tx_size) {
786
0
  const ScanOrder *const scan_order = &vp9_scan_orders[tx_size][DCT_DCT];
787
0
  int rate_cost = 1;
788
0
  int idx;
789
0
  assert((1 << num_pels_log2_lookup[txsize_to_bsize[tx_size]]) >= eob);
790
0
  for (idx = 0; idx < eob; ++idx) {
791
0
    unsigned int abs_level = abs(qcoeff[scan_order->scan[idx]]);
792
0
    rate_cost += get_msb(abs_level + 1) + 1 + (abs_level > 0);
793
0
  }
794
795
0
  return (rate_cost << VP9_PROB_COST_SHIFT);
796
0
}
797
798
static void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
799
                            struct scale_factors *sf, GF_PICTURE *gf_picture,
800
                            int frame_idx, TplDepFrame *tpl_frame,
801
                            int16_t *src_diff, tran_low_t *coeff,
802
                            tran_low_t *qcoeff, tran_low_t *dqcoeff, int mi_row,
803
                            int mi_col, BLOCK_SIZE bsize, TX_SIZE tx_size,
804
                            YV12_BUFFER_CONFIG *ref_frame[], uint8_t *predictor,
805
                            int64_t *recon_error, int64_t *rate_cost,
806
0
                            int64_t *sse, int *ref_frame_idx) {
807
0
  VP9_COMMON *cm = &cpi->common;
808
0
  ThreadData *td = &cpi->td;
809
810
0
  const int bw = 4 << b_width_log2_lookup[bsize];
811
0
  const int bh = 4 << b_height_log2_lookup[bsize];
812
0
  const int pix_num = bw * bh;
813
0
  int best_rf_idx = -1;
814
0
  int_mv best_mv;
815
0
  int64_t best_inter_cost = INT64_MAX;
816
0
  int64_t inter_cost;
817
0
  int rf_idx;
818
0
  const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP];
819
820
0
  int64_t best_intra_cost = INT64_MAX;
821
0
  int64_t intra_cost;
822
0
  PREDICTION_MODE mode;
823
0
  int mb_y_offset = mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
824
0
  MODE_INFO mi_above, mi_left;
825
0
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
826
0
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
827
0
  TplDepStats *tpl_stats =
828
0
      &tpl_frame->tpl_stats_ptr[mi_row * tpl_frame->stride + mi_col];
829
830
0
  xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
831
0
  xd->mb_to_bottom_edge = ((cm->mi_rows - 1 - mi_row) * MI_SIZE) * 8;
832
0
  xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
833
0
  xd->mb_to_right_edge = ((cm->mi_cols - 1 - mi_col) * MI_SIZE) * 8;
834
0
  xd->above_mi = (mi_row > 0) ? &mi_above : NULL;
835
0
  xd->left_mi = (mi_col > 0) ? &mi_left : NULL;
836
837
  // Intra prediction search
838
0
  for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
839
0
    uint8_t *src, *dst;
840
0
    int src_stride, dst_stride;
841
842
0
    src = xd->cur_buf->y_buffer + mb_y_offset;
843
0
    src_stride = xd->cur_buf->y_stride;
844
845
0
    dst = &predictor[0];
846
0
    dst_stride = bw;
847
848
0
    xd->mi[0]->sb_type = bsize;
849
0
    xd->mi[0]->ref_frame[0] = INTRA_FRAME;
850
851
0
    vp9_predict_intra_block(xd, b_width_log2_lookup[bsize], tx_size, mode, src,
852
0
                            src_stride, dst, dst_stride, 0, 0, 0);
853
854
0
#if CONFIG_VP9_HIGHBITDEPTH
855
0
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
856
0
      vpx_highbd_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst,
857
0
                                dst_stride, xd->bd);
858
0
      vp9_highbd_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
859
0
      intra_cost = vpx_highbd_satd(coeff, pix_num);
860
0
    } else {
861
0
      vpx_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst,
862
0
                         dst_stride);
863
0
      vp9_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
864
0
      intra_cost = vpx_satd(coeff, pix_num);
865
0
    }
866
#else
867
    vpx_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst, dst_stride);
868
    vp9_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
869
    intra_cost = vpx_satd(coeff, pix_num);
870
#endif  // CONFIG_VP9_HIGHBITDEPTH
871
872
0
    if (intra_cost < best_intra_cost) best_intra_cost = intra_cost;
873
0
  }
874
875
  // Motion compensated prediction
876
0
  best_mv.as_int = 0;
877
878
0
  set_mv_limits(cm, x, mi_row, mi_col);
879
880
0
  for (rf_idx = 0; rf_idx < MAX_INTER_REF_FRAMES; ++rf_idx) {
881
0
    int_mv mv;
882
#if CONFIG_NON_GREEDY_MV
883
    MotionField *motion_field;
884
#endif
885
0
    if (ref_frame[rf_idx] == NULL) continue;
886
887
#if CONFIG_NON_GREEDY_MV
888
    (void)td;
889
    motion_field = vp9_motion_field_info_get_motion_field(
890
        &cpi->motion_field_info, frame_idx, rf_idx, bsize);
891
    mv = vp9_motion_field_mi_get_mv(motion_field, mi_row, mi_col);
892
#else
893
0
    motion_compensated_prediction(cpi, td, xd->cur_buf->y_buffer + mb_y_offset,
894
0
                                  ref_frame[rf_idx]->y_buffer + mb_y_offset,
895
0
                                  xd->cur_buf->y_stride, bsize, &mv.as_mv);
896
0
#endif
897
898
0
#if CONFIG_VP9_HIGHBITDEPTH
899
0
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
900
0
      vp9_highbd_build_inter_predictor(
901
0
          CONVERT_TO_SHORTPTR(ref_frame[rf_idx]->y_buffer + mb_y_offset),
902
0
          ref_frame[rf_idx]->y_stride, CONVERT_TO_SHORTPTR(&predictor[0]), bw,
903
0
          &mv.as_mv, sf, bw, bh, 0, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE,
904
0
          mi_row * MI_SIZE, xd->bd);
905
0
      vpx_highbd_subtract_block(
906
0
          bh, bw, src_diff, bw, xd->cur_buf->y_buffer + mb_y_offset,
907
0
          xd->cur_buf->y_stride, &predictor[0], bw, xd->bd);
908
0
      vp9_highbd_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
909
0
      inter_cost = vpx_highbd_satd(coeff, pix_num);
910
0
    } else {
911
0
      vp9_build_inter_predictor(
912
0
          ref_frame[rf_idx]->y_buffer + mb_y_offset,
913
0
          ref_frame[rf_idx]->y_stride, &predictor[0], bw, &mv.as_mv, sf, bw, bh,
914
0
          0, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE);
915
0
      vpx_subtract_block(bh, bw, src_diff, bw,
916
0
                         xd->cur_buf->y_buffer + mb_y_offset,
917
0
                         xd->cur_buf->y_stride, &predictor[0], bw);
918
0
      vp9_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
919
0
      inter_cost = vpx_satd(coeff, pix_num);
920
0
    }
921
#else
922
    vp9_build_inter_predictor(ref_frame[rf_idx]->y_buffer + mb_y_offset,
923
                              ref_frame[rf_idx]->y_stride, &predictor[0], bw,
924
                              &mv.as_mv, sf, bw, bh, 0, kernel, MV_PRECISION_Q3,
925
                              mi_col * MI_SIZE, mi_row * MI_SIZE);
926
    vpx_subtract_block(bh, bw, src_diff, bw,
927
                       xd->cur_buf->y_buffer + mb_y_offset,
928
                       xd->cur_buf->y_stride, &predictor[0], bw);
929
    vp9_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
930
    inter_cost = vpx_satd(coeff, pix_num);
931
#endif
932
933
0
    if (inter_cost < best_inter_cost) {
934
0
      uint16_t eob = 0;
935
0
      best_rf_idx = rf_idx;
936
0
      best_inter_cost = inter_cost;
937
0
      best_mv.as_int = mv.as_int;
938
      // Since best_inter_cost is initialized as INT64_MAX, recon_error and
939
      // rate_cost will be calculated with the best reference frame.
940
0
      get_quantize_error(x, 0, coeff, qcoeff, dqcoeff, tx_size, recon_error,
941
0
                         sse, &eob);
942
0
      *rate_cost = rate_estimator(qcoeff, eob, tx_size);
943
0
    }
944
0
  }
945
0
  best_intra_cost = VPXMAX(best_intra_cost, 1);
946
0
  best_inter_cost = VPXMIN(best_intra_cost, best_inter_cost);
947
0
  tpl_stats->inter_cost = VPXMAX(
948
0
      1, (best_inter_cost << TPL_DEP_COST_SCALE_LOG2) / (mi_height * mi_width));
949
0
  tpl_stats->intra_cost = VPXMAX(
950
0
      1, (best_intra_cost << TPL_DEP_COST_SCALE_LOG2) / (mi_height * mi_width));
951
0
  if (best_rf_idx >= 0) {
952
0
    tpl_stats->ref_frame_index = gf_picture[frame_idx].ref_frame[best_rf_idx];
953
0
  }
954
0
  tpl_stats->mv.as_int = best_mv.as_int;
955
0
  *ref_frame_idx = best_rf_idx;
956
0
}
957
958
#if CONFIG_NON_GREEDY_MV
959
static int get_block_src_pred_buf(MACROBLOCKD *xd, GF_PICTURE *gf_picture,
960
                                  int frame_idx, int rf_idx, int mi_row,
961
                                  int mi_col, struct buf_2d *src,
962
                                  struct buf_2d *pre) {
963
  const int mb_y_offset =
964
      mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
965
  YV12_BUFFER_CONFIG *ref_frame = NULL;
966
  int ref_frame_idx = gf_picture[frame_idx].ref_frame[rf_idx];
967
  if (ref_frame_idx != -1) {
968
    ref_frame = gf_picture[ref_frame_idx].frame;
969
    src->buf = xd->cur_buf->y_buffer + mb_y_offset;
970
    src->stride = xd->cur_buf->y_stride;
971
    pre->buf = ref_frame->y_buffer + mb_y_offset;
972
    pre->stride = ref_frame->y_stride;
973
    assert(src->stride == pre->stride);
974
    return 1;
975
  } else {
976
    printf("invalid ref_frame_idx");
977
    assert(ref_frame_idx != -1);
978
    return 0;
979
  }
980
}
981
982
#define kMvPreCheckLines 5
983
#define kMvPreCheckSize 15
984
985
#define MV_REF_POS_NUM 3
986
POSITION mv_ref_pos[MV_REF_POS_NUM] = {
987
  { -1, 0 },
988
  { 0, -1 },
989
  { -1, -1 },
990
};
991
992
static int_mv *get_select_mv(VP9_COMP *cpi, TplDepFrame *tpl_frame, int mi_row,
993
                             int mi_col) {
994
  return &cpi->select_mv_arr[mi_row * tpl_frame->stride + mi_col];
995
}
996
997
static int_mv find_ref_mv(int mv_mode, VP9_COMP *cpi, TplDepFrame *tpl_frame,
998
                          BLOCK_SIZE bsize, int mi_row, int mi_col) {
999
  int i;
1000
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
1001
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1002
  int_mv nearest_mv, near_mv, invalid_mv;
1003
  nearest_mv.as_int = INVALID_MV;
1004
  near_mv.as_int = INVALID_MV;
1005
  invalid_mv.as_int = INVALID_MV;
1006
  for (i = 0; i < MV_REF_POS_NUM; ++i) {
1007
    int nb_row = mi_row + mv_ref_pos[i].row * mi_height;
1008
    int nb_col = mi_col + mv_ref_pos[i].col * mi_width;
1009
    assert(mv_ref_pos[i].row <= 0);
1010
    assert(mv_ref_pos[i].col <= 0);
1011
    if (nb_row >= 0 && nb_col >= 0) {
1012
      if (nearest_mv.as_int == INVALID_MV) {
1013
        nearest_mv = *get_select_mv(cpi, tpl_frame, nb_row, nb_col);
1014
      } else {
1015
        int_mv mv = *get_select_mv(cpi, tpl_frame, nb_row, nb_col);
1016
        if (mv.as_int == nearest_mv.as_int) {
1017
          continue;
1018
        } else {
1019
          near_mv = mv;
1020
          break;
1021
        }
1022
      }
1023
    }
1024
  }
1025
  if (nearest_mv.as_int == INVALID_MV) {
1026
    nearest_mv.as_mv.row = 0;
1027
    nearest_mv.as_mv.col = 0;
1028
  }
1029
  if (near_mv.as_int == INVALID_MV) {
1030
    near_mv.as_mv.row = 0;
1031
    near_mv.as_mv.col = 0;
1032
  }
1033
  if (mv_mode == NEAREST_MV_MODE) {
1034
    return nearest_mv;
1035
  }
1036
  if (mv_mode == NEAR_MV_MODE) {
1037
    return near_mv;
1038
  }
1039
  assert(0);
1040
  return invalid_mv;
1041
}
1042
1043
static int_mv get_mv_from_mv_mode(int mv_mode, VP9_COMP *cpi,
1044
                                  MotionField *motion_field,
1045
                                  TplDepFrame *tpl_frame, BLOCK_SIZE bsize,
1046
                                  int mi_row, int mi_col) {
1047
  int_mv mv;
1048
  switch (mv_mode) {
1049
    case ZERO_MV_MODE:
1050
      mv.as_mv.row = 0;
1051
      mv.as_mv.col = 0;
1052
      break;
1053
    case NEW_MV_MODE:
1054
      mv = vp9_motion_field_mi_get_mv(motion_field, mi_row, mi_col);
1055
      break;
1056
    case NEAREST_MV_MODE:
1057
      mv = find_ref_mv(mv_mode, cpi, tpl_frame, bsize, mi_row, mi_col);
1058
      break;
1059
    case NEAR_MV_MODE:
1060
      mv = find_ref_mv(mv_mode, cpi, tpl_frame, bsize, mi_row, mi_col);
1061
      break;
1062
    default:
1063
      mv.as_int = INVALID_MV;
1064
      assert(0);
1065
      break;
1066
  }
1067
  return mv;
1068
}
1069
1070
static double get_mv_dist(int mv_mode, VP9_COMP *cpi, MACROBLOCKD *xd,
1071
                          GF_PICTURE *gf_picture, MotionField *motion_field,
1072
                          int frame_idx, TplDepFrame *tpl_frame, int rf_idx,
1073
                          BLOCK_SIZE bsize, int mi_row, int mi_col,
1074
                          int_mv *mv) {
1075
  uint32_t sse;
1076
  struct buf_2d src;
1077
  struct buf_2d pre;
1078
  MV full_mv;
1079
  *mv = get_mv_from_mv_mode(mv_mode, cpi, motion_field, tpl_frame, bsize,
1080
                            mi_row, mi_col);
1081
  full_mv = get_full_mv(&mv->as_mv);
1082
  if (get_block_src_pred_buf(xd, gf_picture, frame_idx, rf_idx, mi_row, mi_col,
1083
                             &src, &pre)) {
1084
    // TODO(angiebird): Consider subpixel when computing the sse.
1085
    cpi->fn_ptr[bsize].vf(src.buf, src.stride, get_buf_from_mv(&pre, &full_mv),
1086
                          pre.stride, &sse);
1087
    return (double)(sse << VP9_DIST_SCALE_LOG2);
1088
  } else {
1089
    assert(0);
1090
    return 0;
1091
  }
1092
}
1093
1094
static int get_mv_mode_cost(int mv_mode) {
1095
  // TODO(angiebird): The probabilities are roughly inferred from
1096
  // default_inter_mode_probs. Check if there is a better way to set the
1097
  // probabilities.
1098
  const int zero_mv_prob = 16;
1099
  const int new_mv_prob = 24 * 1;
1100
  const int ref_mv_prob = 256 - zero_mv_prob - new_mv_prob;
1101
  assert(zero_mv_prob + new_mv_prob + ref_mv_prob == 256);
1102
  switch (mv_mode) {
1103
    case ZERO_MV_MODE: return vp9_prob_cost[zero_mv_prob]; break;
1104
    case NEW_MV_MODE: return vp9_prob_cost[new_mv_prob]; break;
1105
    case NEAREST_MV_MODE: return vp9_prob_cost[ref_mv_prob]; break;
1106
    case NEAR_MV_MODE: return vp9_prob_cost[ref_mv_prob]; break;
1107
    default: assert(0); return -1;
1108
  }
1109
}
1110
1111
static INLINE double get_mv_diff_cost(MV *new_mv, MV *ref_mv) {
1112
  double mv_diff_cost = log2(1 + abs(new_mv->row - ref_mv->row)) +
1113
                        log2(1 + abs(new_mv->col - ref_mv->col));
1114
  mv_diff_cost *= (1 << VP9_PROB_COST_SHIFT);
1115
  return mv_diff_cost;
1116
}
1117
static double get_mv_cost(int mv_mode, VP9_COMP *cpi, MotionField *motion_field,
1118
                          TplDepFrame *tpl_frame, BLOCK_SIZE bsize, int mi_row,
1119
                          int mi_col) {
1120
  double mv_cost = get_mv_mode_cost(mv_mode);
1121
  if (mv_mode == NEW_MV_MODE) {
1122
    MV new_mv = get_mv_from_mv_mode(mv_mode, cpi, motion_field, tpl_frame,
1123
                                    bsize, mi_row, mi_col)
1124
                    .as_mv;
1125
    MV nearest_mv = get_mv_from_mv_mode(NEAREST_MV_MODE, cpi, motion_field,
1126
                                        tpl_frame, bsize, mi_row, mi_col)
1127
                        .as_mv;
1128
    MV near_mv = get_mv_from_mv_mode(NEAR_MV_MODE, cpi, motion_field, tpl_frame,
1129
                                     bsize, mi_row, mi_col)
1130
                     .as_mv;
1131
    double nearest_cost = get_mv_diff_cost(&new_mv, &nearest_mv);
1132
    double near_cost = get_mv_diff_cost(&new_mv, &near_mv);
1133
    mv_cost += nearest_cost < near_cost ? nearest_cost : near_cost;
1134
  }
1135
  return mv_cost;
1136
}
1137
1138
static double eval_mv_mode(int mv_mode, VP9_COMP *cpi, MACROBLOCK *x,
1139
                           GF_PICTURE *gf_picture, MotionField *motion_field,
1140
                           int frame_idx, TplDepFrame *tpl_frame, int rf_idx,
1141
                           BLOCK_SIZE bsize, int mi_row, int mi_col,
1142
                           int_mv *mv) {
1143
  MACROBLOCKD *xd = &x->e_mbd;
1144
  double mv_dist =
1145
      get_mv_dist(mv_mode, cpi, xd, gf_picture, motion_field, frame_idx,
1146
                  tpl_frame, rf_idx, bsize, mi_row, mi_col, mv);
1147
  double mv_cost =
1148
      get_mv_cost(mv_mode, cpi, motion_field, tpl_frame, bsize, mi_row, mi_col);
1149
  double mult = 180;
1150
1151
  return mv_cost + mult * log2f(1 + mv_dist);
1152
}
1153
1154
static int find_best_ref_mv_mode(VP9_COMP *cpi, MACROBLOCK *x,
1155
                                 GF_PICTURE *gf_picture,
1156
                                 MotionField *motion_field, int frame_idx,
1157
                                 TplDepFrame *tpl_frame, int rf_idx,
1158
                                 BLOCK_SIZE bsize, int mi_row, int mi_col,
1159
                                 double *rd, int_mv *mv) {
1160
  int best_mv_mode = ZERO_MV_MODE;
1161
  int update = 0;
1162
  int mv_mode;
1163
  *rd = 0;
1164
  for (mv_mode = 0; mv_mode < MAX_MV_MODE; ++mv_mode) {
1165
    double this_rd;
1166
    int_mv this_mv;
1167
    if (mv_mode == NEW_MV_MODE) {
1168
      continue;
1169
    }
1170
    this_rd = eval_mv_mode(mv_mode, cpi, x, gf_picture, motion_field, frame_idx,
1171
                           tpl_frame, rf_idx, bsize, mi_row, mi_col, &this_mv);
1172
    if (update == 0) {
1173
      *rd = this_rd;
1174
      *mv = this_mv;
1175
      best_mv_mode = mv_mode;
1176
      update = 1;
1177
    } else {
1178
      if (this_rd < *rd) {
1179
        *rd = this_rd;
1180
        *mv = this_mv;
1181
        best_mv_mode = mv_mode;
1182
      }
1183
    }
1184
  }
1185
  return best_mv_mode;
1186
}
1187
1188
static void predict_mv_mode(VP9_COMP *cpi, MACROBLOCK *x,
1189
                            GF_PICTURE *gf_picture, MotionField *motion_field,
1190
                            int frame_idx, TplDepFrame *tpl_frame, int rf_idx,
1191
                            BLOCK_SIZE bsize, int mi_row, int mi_col) {
1192
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
1193
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1194
  int tmp_mv_mode_arr[kMvPreCheckSize];
1195
  int *mv_mode_arr = tpl_frame->mv_mode_arr[rf_idx];
1196
  double *rd_diff_arr = tpl_frame->rd_diff_arr[rf_idx];
1197
  int_mv *select_mv_arr = cpi->select_mv_arr;
1198
  int_mv tmp_select_mv_arr[kMvPreCheckSize];
1199
  int stride = tpl_frame->stride;
1200
  double new_mv_rd = 0;
1201
  double no_new_mv_rd = 0;
1202
  double this_new_mv_rd = 0;
1203
  double this_no_new_mv_rd = 0;
1204
  int idx;
1205
  int tmp_idx;
1206
  assert(kMvPreCheckSize == (kMvPreCheckLines * (kMvPreCheckLines + 1)) >> 1);
1207
1208
  // no new mv
1209
  // diagonal scan order
1210
  tmp_idx = 0;
1211
  for (idx = 0; idx < kMvPreCheckLines; ++idx) {
1212
    int r;
1213
    for (r = 0; r <= idx; ++r) {
1214
      int c = idx - r;
1215
      int nb_row = mi_row + r * mi_height;
1216
      int nb_col = mi_col + c * mi_width;
1217
      if (nb_row < tpl_frame->mi_rows && nb_col < tpl_frame->mi_cols) {
1218
        double this_rd;
1219
        int_mv *mv = &select_mv_arr[nb_row * stride + nb_col];
1220
        mv_mode_arr[nb_row * stride + nb_col] = find_best_ref_mv_mode(
1221
            cpi, x, gf_picture, motion_field, frame_idx, tpl_frame, rf_idx,
1222
            bsize, nb_row, nb_col, &this_rd, mv);
1223
        if (r == 0 && c == 0) {
1224
          this_no_new_mv_rd = this_rd;
1225
        }
1226
        no_new_mv_rd += this_rd;
1227
        tmp_mv_mode_arr[tmp_idx] = mv_mode_arr[nb_row * stride + nb_col];
1228
        tmp_select_mv_arr[tmp_idx] = select_mv_arr[nb_row * stride + nb_col];
1229
        ++tmp_idx;
1230
      }
1231
    }
1232
  }
1233
1234
  // new mv
1235
  mv_mode_arr[mi_row * stride + mi_col] = NEW_MV_MODE;
1236
  this_new_mv_rd = eval_mv_mode(
1237
      NEW_MV_MODE, cpi, x, gf_picture, motion_field, frame_idx, tpl_frame,
1238
      rf_idx, bsize, mi_row, mi_col, &select_mv_arr[mi_row * stride + mi_col]);
1239
  new_mv_rd = this_new_mv_rd;
1240
  // We start from idx = 1 because idx = 0 is evaluated as NEW_MV_MODE
1241
  // beforehand.
1242
  for (idx = 1; idx < kMvPreCheckLines; ++idx) {
1243
    int r;
1244
    for (r = 0; r <= idx; ++r) {
1245
      int c = idx - r;
1246
      int nb_row = mi_row + r * mi_height;
1247
      int nb_col = mi_col + c * mi_width;
1248
      if (nb_row < tpl_frame->mi_rows && nb_col < tpl_frame->mi_cols) {
1249
        double this_rd;
1250
        int_mv *mv = &select_mv_arr[nb_row * stride + nb_col];
1251
        mv_mode_arr[nb_row * stride + nb_col] = find_best_ref_mv_mode(
1252
            cpi, x, gf_picture, motion_field, frame_idx, tpl_frame, rf_idx,
1253
            bsize, nb_row, nb_col, &this_rd, mv);
1254
        new_mv_rd += this_rd;
1255
      }
1256
    }
1257
  }
1258
1259
  // update best_mv_mode
1260
  tmp_idx = 0;
1261
  if (no_new_mv_rd < new_mv_rd) {
1262
    for (idx = 0; idx < kMvPreCheckLines; ++idx) {
1263
      int r;
1264
      for (r = 0; r <= idx; ++r) {
1265
        int c = idx - r;
1266
        int nb_row = mi_row + r * mi_height;
1267
        int nb_col = mi_col + c * mi_width;
1268
        if (nb_row < tpl_frame->mi_rows && nb_col < tpl_frame->mi_cols) {
1269
          mv_mode_arr[nb_row * stride + nb_col] = tmp_mv_mode_arr[tmp_idx];
1270
          select_mv_arr[nb_row * stride + nb_col] = tmp_select_mv_arr[tmp_idx];
1271
          ++tmp_idx;
1272
        }
1273
      }
1274
    }
1275
    rd_diff_arr[mi_row * stride + mi_col] = 0;
1276
  } else {
1277
    rd_diff_arr[mi_row * stride + mi_col] =
1278
        (no_new_mv_rd - this_no_new_mv_rd) - (new_mv_rd - this_new_mv_rd);
1279
  }
1280
}
1281
1282
static void predict_mv_mode_arr(VP9_COMP *cpi, MACROBLOCK *x,
1283
                                GF_PICTURE *gf_picture,
1284
                                MotionField *motion_field, int frame_idx,
1285
                                TplDepFrame *tpl_frame, int rf_idx,
1286
                                BLOCK_SIZE bsize) {
1287
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
1288
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1289
  const int unit_rows = tpl_frame->mi_rows / mi_height;
1290
  const int unit_cols = tpl_frame->mi_cols / mi_width;
1291
  const int max_diagonal_lines = unit_rows + unit_cols - 1;
1292
  int idx;
1293
  for (idx = 0; idx < max_diagonal_lines; ++idx) {
1294
    int r;
1295
    for (r = VPXMAX(idx - unit_cols + 1, 0); r <= VPXMIN(idx, unit_rows - 1);
1296
         ++r) {
1297
      int c = idx - r;
1298
      int mi_row = r * mi_height;
1299
      int mi_col = c * mi_width;
1300
      assert(c >= 0 && c < unit_cols);
1301
      assert(mi_row >= 0 && mi_row < tpl_frame->mi_rows);
1302
      assert(mi_col >= 0 && mi_col < tpl_frame->mi_cols);
1303
      predict_mv_mode(cpi, x, gf_picture, motion_field, frame_idx, tpl_frame,
1304
                      rf_idx, bsize, mi_row, mi_col);
1305
    }
1306
  }
1307
}
1308
1309
static void do_motion_search(VP9_COMP *cpi, ThreadData *td,
1310
                             MotionField *motion_field, int frame_idx,
1311
                             YV12_BUFFER_CONFIG *ref_frame, BLOCK_SIZE bsize,
1312
                             int mi_row, int mi_col) {
1313
  VP9_COMMON *cm = &cpi->common;
1314
  MACROBLOCK *x = &td->mb;
1315
  MACROBLOCKD *xd = &x->e_mbd;
1316
  const int mb_y_offset =
1317
      mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
1318
  assert(ref_frame != NULL);
1319
  set_mv_limits(cm, x, mi_row, mi_col);
1320
  {
1321
    int_mv mv = vp9_motion_field_mi_get_mv(motion_field, mi_row, mi_col);
1322
    uint8_t *cur_frame_buf = xd->cur_buf->y_buffer + mb_y_offset;
1323
    uint8_t *ref_frame_buf = ref_frame->y_buffer + mb_y_offset;
1324
    const int stride = xd->cur_buf->y_stride;
1325
    full_pixel_motion_search(cpi, td, motion_field, frame_idx, cur_frame_buf,
1326
                             ref_frame_buf, stride, bsize, mi_row, mi_col,
1327
                             &mv.as_mv);
1328
    sub_pixel_motion_search(cpi, td, cur_frame_buf, ref_frame_buf, stride,
1329
                            bsize, &mv.as_mv);
1330
    vp9_motion_field_mi_set_mv(motion_field, mi_row, mi_col, mv);
1331
  }
1332
}
1333
1334
static void build_motion_field(
1335
    VP9_COMP *cpi, int frame_idx,
1336
    YV12_BUFFER_CONFIG *ref_frame[MAX_INTER_REF_FRAMES], BLOCK_SIZE bsize) {
1337
  VP9_COMMON *cm = &cpi->common;
1338
  ThreadData *td = &cpi->td;
1339
  TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
1340
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
1341
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1342
  const int pw = num_4x4_blocks_wide_lookup[bsize] << 2;
1343
  const int ph = num_4x4_blocks_high_lookup[bsize] << 2;
1344
  int mi_row, mi_col;
1345
  int rf_idx;
1346
1347
  tpl_frame->lambda = (pw * ph) >> 2;
1348
  assert(pw * ph == tpl_frame->lambda << 2);
1349
1350
  for (rf_idx = 0; rf_idx < MAX_INTER_REF_FRAMES; ++rf_idx) {
1351
    MotionField *motion_field = vp9_motion_field_info_get_motion_field(
1352
        &cpi->motion_field_info, frame_idx, rf_idx, bsize);
1353
    if (ref_frame[rf_idx] == NULL) {
1354
      continue;
1355
    }
1356
    vp9_motion_field_reset_mvs(motion_field);
1357
    for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
1358
      for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
1359
        do_motion_search(cpi, td, motion_field, frame_idx, ref_frame[rf_idx],
1360
                         bsize, mi_row, mi_col);
1361
      }
1362
    }
1363
  }
1364
}
1365
#endif  // CONFIG_NON_GREEDY_MV
1366
1367
static void mc_flow_dispenser(VP9_COMP *cpi, GF_PICTURE *gf_picture,
1368
0
                              int frame_idx, BLOCK_SIZE bsize) {
1369
0
  TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
1370
0
  VpxTplFrameStats *tpl_frame_stats_before_propagation =
1371
0
      &cpi->tpl_gop_stats.frame_stats_list[frame_idx];
1372
0
  YV12_BUFFER_CONFIG *this_frame = gf_picture[frame_idx].frame;
1373
0
  YV12_BUFFER_CONFIG *ref_frame[MAX_INTER_REF_FRAMES] = { NULL, NULL, NULL };
1374
1375
0
  VP9_COMMON *cm = &cpi->common;
1376
0
  struct scale_factors sf;
1377
0
  int rdmult, idx;
1378
0
  ThreadData *td = &cpi->td;
1379
0
  MACROBLOCK *x = &td->mb;
1380
0
  MACROBLOCKD *xd = &x->e_mbd;
1381
0
  int mi_row, mi_col;
1382
1383
0
#if CONFIG_VP9_HIGHBITDEPTH
1384
0
  DECLARE_ALIGNED(16, uint16_t, predictor16[32 * 32 * 3]);
1385
0
  DECLARE_ALIGNED(16, uint8_t, predictor8[32 * 32 * 3]);
1386
0
  uint8_t *predictor;
1387
#else
1388
  DECLARE_ALIGNED(16, uint8_t, predictor[32 * 32 * 3]);
1389
#endif
1390
0
  DECLARE_ALIGNED(16, int16_t, src_diff[32 * 32]);
1391
0
  DECLARE_ALIGNED(16, tran_low_t, coeff[32 * 32]);
1392
0
  DECLARE_ALIGNED(16, tran_low_t, qcoeff[32 * 32]);
1393
0
  DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]);
1394
1395
0
  const TX_SIZE tx_size = max_txsize_lookup[bsize];
1396
0
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
1397
0
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1398
1399
0
  tpl_frame_stats_before_propagation->frame_width = cm->width;
1400
0
  tpl_frame_stats_before_propagation->frame_height = cm->height;
1401
  // Setup scaling factor
1402
0
#if CONFIG_VP9_HIGHBITDEPTH
1403
0
  vp9_setup_scale_factors_for_frame(
1404
0
      &sf, this_frame->y_crop_width, this_frame->y_crop_height,
1405
0
      this_frame->y_crop_width, this_frame->y_crop_height,
1406
0
      cpi->common.use_highbitdepth);
1407
1408
0
  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
1409
0
    predictor = CONVERT_TO_BYTEPTR(predictor16);
1410
0
  else
1411
0
    predictor = predictor8;
1412
#else
1413
  vp9_setup_scale_factors_for_frame(
1414
      &sf, this_frame->y_crop_width, this_frame->y_crop_height,
1415
      this_frame->y_crop_width, this_frame->y_crop_height);
1416
#endif  // CONFIG_VP9_HIGHBITDEPTH
1417
1418
  // Prepare reference frame pointers. If any reference frame slot is
1419
  // unavailable, the pointer will be set to Null.
1420
0
  for (idx = 0; idx < MAX_INTER_REF_FRAMES; ++idx) {
1421
0
    int rf_idx = gf_picture[frame_idx].ref_frame[idx];
1422
0
    if (rf_idx != -REFS_PER_FRAME) ref_frame[idx] = gf_picture[rf_idx].frame;
1423
0
  }
1424
1425
0
  xd->mi = cm->mi_grid_visible;
1426
0
  xd->mi[0] = cm->mi;
1427
0
  xd->cur_buf = this_frame;
1428
1429
  // Get rd multiplier set up.
1430
0
  rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, tpl_frame->base_qindex);
1431
0
  set_error_per_bit(&cpi->td.mb, rdmult);
1432
0
  vp9_initialize_me_consts(cpi, &cpi->td.mb, tpl_frame->base_qindex);
1433
1434
0
  tpl_frame->is_valid = 1;
1435
1436
0
  cm->base_qindex = tpl_frame->base_qindex;
1437
0
  vp9_frame_init_quantizer(cpi);
1438
1439
#if CONFIG_NON_GREEDY_MV
1440
  {
1441
    int square_block_idx;
1442
    int rf_idx;
1443
    for (square_block_idx = 0; square_block_idx < SQUARE_BLOCK_SIZES;
1444
         ++square_block_idx) {
1445
      BLOCK_SIZE square_bsize = square_block_idx_to_bsize(square_block_idx);
1446
      build_motion_field(cpi, frame_idx, ref_frame, square_bsize);
1447
    }
1448
    for (rf_idx = 0; rf_idx < MAX_INTER_REF_FRAMES; ++rf_idx) {
1449
      int ref_frame_idx = gf_picture[frame_idx].ref_frame[rf_idx];
1450
      if (ref_frame_idx != -1) {
1451
        MotionField *motion_field = vp9_motion_field_info_get_motion_field(
1452
            &cpi->motion_field_info, frame_idx, rf_idx, bsize);
1453
        predict_mv_mode_arr(cpi, x, gf_picture, motion_field, frame_idx,
1454
                            tpl_frame, rf_idx, bsize);
1455
      }
1456
    }
1457
  }
1458
#endif  // CONFIG_NON_GREEDY_MV
1459
1460
0
  for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
1461
0
    for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
1462
0
      int64_t recon_error = 0;
1463
0
      int64_t rate_cost = 0;
1464
0
      int64_t sse = 0;
1465
      // Ref frame index in the ref frame buffer.
1466
0
      int ref_frame_idx = -1;
1467
0
      mode_estimation(cpi, x, xd, &sf, gf_picture, frame_idx, tpl_frame,
1468
0
                      src_diff, coeff, qcoeff, dqcoeff, mi_row, mi_col, bsize,
1469
0
                      tx_size, ref_frame, predictor, &recon_error, &rate_cost,
1470
0
                      &sse, &ref_frame_idx);
1471
      // Motion flow dependency dispenser.
1472
0
      tpl_model_store(tpl_frame->tpl_stats_ptr, mi_row, mi_col, bsize,
1473
0
                      tpl_frame->stride);
1474
1475
0
      tpl_store_before_propagation(
1476
0
          tpl_frame_stats_before_propagation->block_stats_list,
1477
0
          tpl_frame->tpl_stats_ptr, mi_row, mi_col, bsize, tpl_frame->stride,
1478
0
          recon_error, sse, rate_cost, ref_frame_idx, tpl_frame->mi_rows,
1479
0
          tpl_frame->mi_cols);
1480
1481
0
      tpl_model_update(cpi->tpl_stats, tpl_frame->tpl_stats_ptr, mi_row, mi_col,
1482
0
                       bsize);
1483
0
    }
1484
0
  }
1485
0
}
1486
1487
static void trim_tpl_stats(struct vpx_internal_error_info *error_info,
1488
0
                           VpxTplGopStats *tpl_gop_stats, int extra_frames) {
1489
0
  int i;
1490
0
  VpxTplFrameStats *new_frame_stats;
1491
0
  const int new_size = tpl_gop_stats->size - extra_frames;
1492
0
  if (tpl_gop_stats->size <= extra_frames)
1493
0
    vpx_internal_error(
1494
0
        error_info, VPX_CODEC_ERROR,
1495
0
        "The number of frames in VpxTplGopStats is fewer than expected.");
1496
0
  CHECK_MEM_ERROR(error_info, new_frame_stats,
1497
0
                  vpx_calloc(new_size, sizeof(*new_frame_stats)));
1498
0
  for (i = 0; i < new_size; i++) {
1499
0
    VpxTplFrameStats *frame_stats = &tpl_gop_stats->frame_stats_list[i];
1500
0
    const int num_blocks = frame_stats->num_blocks;
1501
0
    new_frame_stats[i].num_blocks = frame_stats->num_blocks;
1502
0
    new_frame_stats[i].frame_width = frame_stats->frame_width;
1503
0
    new_frame_stats[i].frame_height = frame_stats->frame_height;
1504
0
    new_frame_stats[i].num_blocks = num_blocks;
1505
0
    CHECK_MEM_ERROR(
1506
0
        error_info, new_frame_stats[i].block_stats_list,
1507
0
        vpx_calloc(num_blocks, sizeof(*new_frame_stats[i].block_stats_list)));
1508
0
    memcpy(new_frame_stats[i].block_stats_list, frame_stats->block_stats_list,
1509
0
           num_blocks * sizeof(*new_frame_stats[i].block_stats_list));
1510
0
  }
1511
0
  free_tpl_frame_stats_list(tpl_gop_stats);
1512
0
  tpl_gop_stats->size = new_size;
1513
0
  tpl_gop_stats->frame_stats_list = new_frame_stats;
1514
0
}
1515
1516
#if CONFIG_NON_GREEDY_MV
1517
#define DUMP_TPL_STATS 0
1518
#if DUMP_TPL_STATS
1519
static void dump_buf(uint8_t *buf, int stride, int row, int col, int h, int w) {
1520
  int i, j;
1521
  printf("%d %d\n", h, w);
1522
  for (i = 0; i < h; ++i) {
1523
    for (j = 0; j < w; ++j) {
1524
      printf("%d ", buf[(row + i) * stride + col + j]);
1525
    }
1526
  }
1527
  printf("\n");
1528
}
1529
1530
static void dump_frame_buf(const YV12_BUFFER_CONFIG *frame_buf) {
1531
  dump_buf(frame_buf->y_buffer, frame_buf->y_stride, 0, 0, frame_buf->y_height,
1532
           frame_buf->y_width);
1533
  dump_buf(frame_buf->u_buffer, frame_buf->uv_stride, 0, 0,
1534
           frame_buf->uv_height, frame_buf->uv_width);
1535
  dump_buf(frame_buf->v_buffer, frame_buf->uv_stride, 0, 0,
1536
           frame_buf->uv_height, frame_buf->uv_width);
1537
}
1538
1539
static void dump_tpl_stats(const VP9_COMP *cpi, int tpl_group_frames,
1540
                           const GF_GROUP *gf_group,
1541
                           const GF_PICTURE *gf_picture, BLOCK_SIZE bsize) {
1542
  int frame_idx;
1543
  const VP9_COMMON *cm = &cpi->common;
1544
  int rf_idx;
1545
  for (frame_idx = 1; frame_idx < tpl_group_frames; ++frame_idx) {
1546
    for (rf_idx = 0; rf_idx < MAX_INTER_REF_FRAMES; ++rf_idx) {
1547
      const TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
1548
      int mi_row, mi_col;
1549
      int ref_frame_idx;
1550
      const int mi_height = num_8x8_blocks_high_lookup[bsize];
1551
      const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1552
      ref_frame_idx = gf_picture[frame_idx].ref_frame[rf_idx];
1553
      if (ref_frame_idx != -1) {
1554
        YV12_BUFFER_CONFIG *ref_frame_buf = gf_picture[ref_frame_idx].frame;
1555
        const int gf_frame_offset = gf_group->frame_gop_index[frame_idx];
1556
        const int ref_gf_frame_offset =
1557
            gf_group->frame_gop_index[ref_frame_idx];
1558
        printf("=\n");
1559
        printf(
1560
            "frame_idx %d mi_rows %d mi_cols %d bsize %d ref_frame_idx %d "
1561
            "rf_idx %d gf_frame_offset %d ref_gf_frame_offset %d\n",
1562
            frame_idx, cm->mi_rows, cm->mi_cols, mi_width * MI_SIZE,
1563
            ref_frame_idx, rf_idx, gf_frame_offset, ref_gf_frame_offset);
1564
        for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row) {
1565
          for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
1566
            if ((mi_row % mi_height) == 0 && (mi_col % mi_width) == 0) {
1567
              int_mv mv = vp9_motion_field_info_get_mv(&cpi->motion_field_info,
1568
                                                       frame_idx, rf_idx, bsize,
1569
                                                       mi_row, mi_col);
1570
              printf("%d %d %d %d\n", mi_row, mi_col, mv.as_mv.row,
1571
                     mv.as_mv.col);
1572
            }
1573
          }
1574
        }
1575
        for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row) {
1576
          for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
1577
            if ((mi_row % mi_height) == 0 && (mi_col % mi_width) == 0) {
1578
              const TplDepStats *tpl_ptr =
1579
                  &tpl_frame
1580
                       ->tpl_stats_ptr[mi_row * tpl_frame->stride + mi_col];
1581
              printf("%f ", tpl_ptr->feature_score);
1582
            }
1583
          }
1584
        }
1585
        printf("\n");
1586
1587
        for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
1588
          for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
1589
            const int mv_mode =
1590
                tpl_frame
1591
                    ->mv_mode_arr[rf_idx][mi_row * tpl_frame->stride + mi_col];
1592
            printf("%d ", mv_mode);
1593
          }
1594
        }
1595
        printf("\n");
1596
1597
        dump_frame_buf(gf_picture[frame_idx].frame);
1598
        dump_frame_buf(ref_frame_buf);
1599
      }
1600
    }
1601
  }
1602
}
1603
#endif  // DUMP_TPL_STATS
1604
#endif  // CONFIG_NON_GREEDY_MV
1605
1606
0
void vp9_init_tpl_buffer(VP9_COMP *cpi) {
1607
0
  VP9_COMMON *cm = &cpi->common;
1608
0
  int frame;
1609
1610
0
  const int mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1611
0
  const int mi_rows = mi_cols_aligned_to_sb(cm->mi_rows);
1612
#if CONFIG_NON_GREEDY_MV
1613
  int rf_idx;
1614
1615
  vpx_free(cpi->select_mv_arr);
1616
  CHECK_MEM_ERROR(
1617
      &cm->error, cpi->select_mv_arr,
1618
      vpx_calloc(mi_rows * mi_cols * 4, sizeof(*cpi->select_mv_arr)));
1619
#endif
1620
1621
  // TODO(jingning): Reduce the actual memory use for tpl model build up.
1622
0
  for (frame = 0; frame < MAX_ARF_GOP_SIZE; ++frame) {
1623
0
    if (cpi->tpl_stats[frame].width >= mi_cols &&
1624
0
        cpi->tpl_stats[frame].height >= mi_rows &&
1625
0
        cpi->tpl_stats[frame].tpl_stats_ptr)
1626
0
      continue;
1627
1628
#if CONFIG_NON_GREEDY_MV
1629
    for (rf_idx = 0; rf_idx < MAX_INTER_REF_FRAMES; ++rf_idx) {
1630
      vpx_free(cpi->tpl_stats[frame].mv_mode_arr[rf_idx]);
1631
      CHECK_MEM_ERROR(
1632
          &cm->error, cpi->tpl_stats[frame].mv_mode_arr[rf_idx],
1633
          vpx_calloc(mi_rows * mi_cols * 4,
1634
                     sizeof(*cpi->tpl_stats[frame].mv_mode_arr[rf_idx])));
1635
      vpx_free(cpi->tpl_stats[frame].rd_diff_arr[rf_idx]);
1636
      CHECK_MEM_ERROR(
1637
          &cm->error, cpi->tpl_stats[frame].rd_diff_arr[rf_idx],
1638
          vpx_calloc(mi_rows * mi_cols * 4,
1639
                     sizeof(*cpi->tpl_stats[frame].rd_diff_arr[rf_idx])));
1640
    }
1641
#endif
1642
0
    vpx_free(cpi->tpl_stats[frame].tpl_stats_ptr);
1643
0
    CHECK_MEM_ERROR(&cm->error, cpi->tpl_stats[frame].tpl_stats_ptr,
1644
0
                    vpx_calloc(mi_rows * mi_cols,
1645
0
                               sizeof(*cpi->tpl_stats[frame].tpl_stats_ptr)));
1646
0
    cpi->tpl_stats[frame].is_valid = 0;
1647
0
    cpi->tpl_stats[frame].width = mi_cols;
1648
0
    cpi->tpl_stats[frame].height = mi_rows;
1649
0
    cpi->tpl_stats[frame].stride = mi_cols;
1650
0
    cpi->tpl_stats[frame].mi_rows = cm->mi_rows;
1651
0
    cpi->tpl_stats[frame].mi_cols = cm->mi_cols;
1652
0
  }
1653
1654
0
  for (frame = 0; frame < REF_FRAMES; ++frame) {
1655
0
    cpi->enc_frame_buf[frame].mem_valid = 0;
1656
0
    cpi->enc_frame_buf[frame].released = 1;
1657
0
  }
1658
0
}
1659
1660
3.99k
void vp9_free_tpl_buffer(VP9_COMP *cpi) {
1661
3.99k
  int frame;
1662
#if CONFIG_NON_GREEDY_MV
1663
  vp9_free_motion_field_info(&cpi->motion_field_info);
1664
  vpx_free(cpi->select_mv_arr);
1665
#endif
1666
203k
  for (frame = 0; frame < MAX_ARF_GOP_SIZE; ++frame) {
1667
#if CONFIG_NON_GREEDY_MV
1668
    int rf_idx;
1669
    for (rf_idx = 0; rf_idx < MAX_INTER_REF_FRAMES; ++rf_idx) {
1670
      vpx_free(cpi->tpl_stats[frame].mv_mode_arr[rf_idx]);
1671
      vpx_free(cpi->tpl_stats[frame].rd_diff_arr[rf_idx]);
1672
    }
1673
#endif
1674
199k
    vpx_free(cpi->tpl_stats[frame].tpl_stats_ptr);
1675
199k
    cpi->tpl_stats[frame].is_valid = 0;
1676
199k
  }
1677
3.99k
  free_tpl_frame_stats_list(&cpi->tpl_gop_stats);
1678
3.99k
}
1679
1680
0
void vp9_estimate_tpl_qp_gop(VP9_COMP *cpi) {
1681
0
  VP9_COMMON *cm = &cpi->common;
1682
0
  int gop_length = cpi->twopass.gf_group.gf_group_size;
1683
0
  int bottom_index, top_index;
1684
0
  int idx;
1685
0
  const int gf_index = cpi->twopass.gf_group.index;
1686
0
  const int is_src_frame_alt_ref = cpi->rc.is_src_frame_alt_ref;
1687
0
  const int refresh_frame_context = cpi->common.refresh_frame_context;
1688
1689
0
  const int sb_size = num_8x8_blocks_wide_lookup[BLOCK_64X64] * MI_SIZE;
1690
0
  const int frame_height_sb = (cm->height + sb_size - 1) / sb_size;
1691
0
  const int frame_width_sb = (cm->width + sb_size - 1) / sb_size;
1692
1693
0
  vpx_codec_err_t codec_status;
1694
0
  const GF_GROUP *gf_group = &cpi->twopass.gf_group;
1695
0
  vpx_rc_encodeframe_decision_t encode_frame_decision;
1696
1697
0
  CHECK_MEM_ERROR(
1698
0
      &cm->error, encode_frame_decision.sb_params_list,
1699
0
      (sb_params *)vpx_malloc(frame_height_sb * frame_width_sb *
1700
0
                              sizeof(*encode_frame_decision.sb_params_list)));
1701
1702
0
  for (idx = gf_index; idx <= gop_length; ++idx) {
1703
0
    TplDepFrame *tpl_frame = &cpi->tpl_stats[idx];
1704
0
    int target_rate = cpi->twopass.gf_group.bit_allocation[idx];
1705
0
    cpi->twopass.gf_group.index = idx;
1706
0
    vp9_rc_set_frame_target(cpi, target_rate);
1707
0
    vp9_configure_buffer_updates(cpi, idx);
1708
0
    if (cpi->ext_ratectrl.ready &&
1709
0
        (cpi->ext_ratectrl.funcs.rc_type & VPX_RC_QP) != 0 &&
1710
0
        cpi->ext_ratectrl.funcs.get_encodeframe_decision != NULL) {
1711
0
      if (idx == gop_length) break;
1712
0
      memset(encode_frame_decision.sb_params_list, 0,
1713
0
             sizeof(*encode_frame_decision.sb_params_list) * frame_height_sb *
1714
0
                 frame_width_sb);
1715
0
      codec_status = vp9_extrc_get_encodeframe_decision(
1716
0
          &cpi->ext_ratectrl, gf_group->index, &encode_frame_decision);
1717
0
      if (codec_status != VPX_CODEC_OK) {
1718
0
        vpx_internal_error(&cm->error, codec_status,
1719
0
                           "vp9_extrc_get_encodeframe_decision() failed");
1720
0
      }
1721
0
      for (int i = 0; i < frame_height_sb * frame_width_sb; ++i) {
1722
0
        cpi->sb_mul_scale[i] =
1723
0
            (((int64_t)encode_frame_decision.sb_params_list[i].rdmult * 256) /
1724
0
             (encode_frame_decision.rdmult + 1));
1725
0
      }
1726
0
      tpl_frame->base_qindex = encode_frame_decision.q_index;
1727
0
    } else {
1728
0
      tpl_frame->base_qindex = vp9_rc_pick_q_and_bounds_two_pass(
1729
0
          cpi, &bottom_index, &top_index, idx);
1730
0
      tpl_frame->base_qindex = VPXMAX(tpl_frame->base_qindex, 1);
1731
0
    }
1732
0
  }
1733
  // Reset the actual index and frame update
1734
0
  cpi->twopass.gf_group.index = gf_index;
1735
0
  cpi->rc.is_src_frame_alt_ref = is_src_frame_alt_ref;
1736
0
  cpi->common.refresh_frame_context = refresh_frame_context;
1737
0
  vp9_configure_buffer_updates(cpi, gf_index);
1738
1739
0
  vpx_free(encode_frame_decision.sb_params_list);
1740
0
}
1741
1742
0
void vp9_setup_tpl_stats(VP9_COMP *cpi) {
1743
0
  GF_PICTURE gf_picture_buf[MAX_ARF_GOP_SIZE + REFS_PER_FRAME];
1744
0
  GF_PICTURE *gf_picture = &gf_picture_buf[REFS_PER_FRAME];
1745
0
  const GF_GROUP *gf_group = &cpi->twopass.gf_group;
1746
0
  int tpl_group_frames = 0;
1747
0
  int frame_idx;
1748
0
  int extended_frame_count;
1749
0
  cpi->tpl_bsize = BLOCK_32X32;
1750
1751
0
  memset(gf_picture_buf, 0, sizeof(gf_picture_buf));
1752
0
  extended_frame_count =
1753
0
      init_gop_frames(cpi, gf_picture, gf_group, &tpl_group_frames);
1754
1755
0
  init_tpl_stats(cpi);
1756
1757
0
  init_tpl_stats_before_propagation(&cpi->common.error, &cpi->tpl_gop_stats,
1758
0
                                    cpi->tpl_stats, tpl_group_frames,
1759
0
                                    cpi->common.width, cpi->common.height);
1760
1761
  // Backward propagation from tpl_group_frames to 1.
1762
0
  for (frame_idx = tpl_group_frames - 1; frame_idx > 0; --frame_idx) {
1763
0
    if (gf_picture[frame_idx].update_type == USE_BUF_FRAME) continue;
1764
0
    mc_flow_dispenser(cpi, gf_picture, frame_idx, cpi->tpl_bsize);
1765
0
  }
1766
1767
0
  if (cpi->ext_ratectrl.ready &&
1768
0
      cpi->ext_ratectrl.funcs.send_tpl_gop_stats != NULL) {
1769
    // Intra search on key frame
1770
0
    if (gf_group->update_type[0] != OVERLAY_UPDATE) {
1771
0
      mc_flow_dispenser(cpi, gf_picture, 0, cpi->tpl_bsize);
1772
0
    }
1773
    // TPL stats has extra frames from next GOP. Trim those extra frames for
1774
    // Qmode.
1775
0
    trim_tpl_stats(&cpi->common.error, &cpi->tpl_gop_stats,
1776
0
                   extended_frame_count);
1777
0
    const vpx_codec_err_t codec_status =
1778
0
        vp9_extrc_send_tpl_stats(&cpi->ext_ratectrl, &cpi->tpl_gop_stats);
1779
0
    if (codec_status != VPX_CODEC_OK) {
1780
0
      vpx_internal_error(&cpi->common.error, codec_status,
1781
0
                         "vp9_extrc_send_tpl_stats() failed");
1782
0
    }
1783
0
  }
1784
1785
#if CONFIG_NON_GREEDY_MV
1786
  cpi->tpl_ready = 1;
1787
#if DUMP_TPL_STATS
1788
  dump_tpl_stats(cpi, tpl_group_frames, gf_group, gf_picture, cpi->tpl_bsize);
1789
#endif  // DUMP_TPL_STATS
1790
#endif  // CONFIG_NON_GREEDY_MV
1791
0
}