Coverage Report

Created: 2025-11-29 06:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libvpx/vp8/common/mfqe.c
Line
Count
Source
1
/*
2
 *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
 *  that can be found in the LICENSE file in the root of the source
6
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
9
 */
10
11
/* MFQE: Multiframe Quality Enhancement
12
 * In rate limited situations keyframes may cause significant visual artifacts
13
 * commonly referred to as "popping." This file implements a postproccesing
14
 * algorithm which blends data from the preceeding frame when there is no
15
 * motion and the q from the previous frame is lower which indicates that it is
16
 * higher quality.
17
 */
18
19
#include "./vp8_rtcd.h"
20
#include "./vpx_dsp_rtcd.h"
21
#include "vp8/common/common.h"
22
#include "vp8/common/postproc.h"
23
#include "vpx_dsp/variance.h"
24
#include "vpx_mem/vpx_mem.h"
25
#include "vpx_scale/yv12config.h"
26
27
#include <limits.h>
28
#include <stdlib.h>
29
30
static void filter_by_weight(unsigned char *src, int src_stride,
31
                             unsigned char *dst, int dst_stride, int block_size,
32
23.3k
                             int src_weight) {
33
23.3k
  int dst_weight = (1 << MFQE_PRECISION) - src_weight;
34
23.3k
  int rounding_bit = 1 << (MFQE_PRECISION - 1);
35
23.3k
  int r, c;
36
37
116k
  for (r = 0; r < block_size; ++r) {
38
467k
    for (c = 0; c < block_size; ++c) {
39
373k
      dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >>
40
373k
               MFQE_PRECISION;
41
373k
    }
42
93.4k
    src += src_stride;
43
93.4k
    dst += dst_stride;
44
93.4k
  }
45
23.3k
}
46
47
void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride,
48
                                 unsigned char *dst, int dst_stride,
49
0
                                 int src_weight) {
50
0
  filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
51
0
}
52
53
void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride,
54
                               unsigned char *dst, int dst_stride,
55
0
                               int src_weight) {
56
0
  filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
57
0
}
58
59
void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride,
60
                               unsigned char *dst, int dst_stride,
61
23.3k
                               int src_weight) {
62
23.3k
  filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight);
63
23.3k
}
64
65
static void apply_ifactor(unsigned char *y_src, int y_src_stride,
66
                          unsigned char *y_dst, int y_dst_stride,
67
                          unsigned char *u_src, unsigned char *v_src,
68
                          int uv_src_stride, unsigned char *u_dst,
69
                          unsigned char *v_dst, int uv_dst_stride,
70
58.8k
                          int block_size, int src_weight) {
71
58.8k
  if (block_size == 16) {
72
47.1k
    vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride,
73
47.1k
                              src_weight);
74
47.1k
    vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride,
75
47.1k
                            src_weight);
76
47.1k
    vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride,
77
47.1k
                            src_weight);
78
47.1k
  } else {
79
11.6k
    vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride,
80
11.6k
                            src_weight);
81
11.6k
    vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride,
82
11.6k
                            src_weight);
83
11.6k
    vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride,
84
11.6k
                            src_weight);
85
11.6k
  }
86
58.8k
}
87
88
235k
static unsigned int int_sqrt(unsigned int x) {
89
235k
  unsigned int y = x;
90
235k
  unsigned int guess;
91
235k
  int p = 1;
92
410k
  while (y >>= 1) p++;
93
235k
  p >>= 1;
94
95
235k
  guess = 0;
96
574k
  while (p >= 0) {
97
338k
    guess |= (1 << p);
98
338k
    if (x < guess * guess) guess -= (1 << p);
99
338k
    p--;
100
338k
  }
101
  /* choose between guess or guess+1 */
102
235k
  return guess + (guess * guess + guess + 1 <= x);
103
235k
}
104
105
#define USE_SSD
106
static void multiframe_quality_enhance_block(
107
    int blksize, /* Currently only values supported are 16, 8 */
108
    int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v,
109
    int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud,
110
364k
    unsigned char *vd, int yd_stride, int uvd_stride) {
111
364k
  static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
112
364k
                                               0, 0, 0, 0, 0, 0, 0, 0 };
113
364k
  int uvblksize = blksize >> 1;
114
364k
  int qdiff = qcurr - qprev;
115
116
364k
  int i;
117
364k
  unsigned char *up;
118
364k
  unsigned char *udp;
119
364k
  unsigned char *vp;
120
364k
  unsigned char *vdp;
121
122
364k
  unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
123
124
364k
  if (blksize == 16) {
125
238k
    actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
126
238k
    act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
127
238k
#ifdef USE_SSD
128
238k
    vpx_variance16x16(y, y_stride, yd, yd_stride, &sse);
129
238k
    sad = (sse + 128) >> 8;
130
238k
    vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse);
131
238k
    usad = (sse + 32) >> 6;
132
238k
    vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse);
133
238k
    vsad = (sse + 32) >> 6;
134
#else
135
    sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
136
    usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6;
137
    vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6;
138
#endif
139
238k
  } else {
140
126k
    actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
141
126k
    act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
142
126k
#ifdef USE_SSD
143
126k
    vpx_variance8x8(y, y_stride, yd, yd_stride, &sse);
144
126k
    sad = (sse + 32) >> 6;
145
126k
    vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse);
146
126k
    usad = (sse + 8) >> 4;
147
126k
    vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse);
148
126k
    vsad = (sse + 8) >> 4;
149
#else
150
    sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6;
151
    usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4;
152
    vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4;
153
#endif
154
126k
  }
155
156
364k
  actrisk = (actd > act * 5);
157
158
  /* thr = qdiff/16 + log2(act) + log4(qprev) */
159
364k
  thr = (qdiff >> 4);
160
1.01M
  while (actd >>= 1) thr++;
161
1.09M
  while (qprev >>= 2) thr++;
162
163
364k
#ifdef USE_SSD
164
364k
  thrsq = thr * thr;
165
364k
  if (sad < thrsq &&
166
      /* additional checks for color mismatch and excessive addition of
167
       * high-frequencies */
168
298k
      4 * usad < thrsq && 4 * vsad < thrsq && !actrisk)
169
#else
170
  if (sad < thr &&
171
      /* additional checks for color mismatch and excessive addition of
172
       * high-frequencies */
173
      2 * usad < thr && 2 * vsad < thr && !actrisk)
174
#endif
175
235k
  {
176
235k
    int ifactor;
177
235k
#ifdef USE_SSD
178
    /* TODO: optimize this later to not need sqr root */
179
235k
    sad = int_sqrt(sad);
180
235k
#endif
181
235k
    ifactor = (sad << MFQE_PRECISION) / thr;
182
235k
    ifactor >>= (qdiff >> 5);
183
184
235k
    if (ifactor) {
185
58.8k
      apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd,
186
58.8k
                    uvd_stride, blksize, ifactor);
187
58.8k
    }
188
235k
  } else { /* else implicitly copy from previous frame */
189
129k
    if (blksize == 16) {
190
77.0k
      vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
191
77.0k
      vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
192
77.0k
      vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
193
77.0k
    } else {
194
52.1k
      vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
195
260k
      for (up = u, udp = ud, i = 0; i < uvblksize;
196
208k
           ++i, up += uv_stride, udp += uvd_stride) {
197
208k
        memcpy(udp, up, uvblksize);
198
208k
      }
199
260k
      for (vp = v, vdp = vd, i = 0; i < uvblksize;
200
208k
           ++i, vp += uv_stride, vdp += uvd_stride) {
201
208k
        memcpy(vdp, vp, uvblksize);
202
208k
      }
203
52.1k
    }
204
129k
  }
205
364k
}
206
207
3.04M
static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) {
208
3.04M
  if (mode_info_context->mbmi.mb_skip_coeff) {
209
173k
    map[0] = map[1] = map[2] = map[3] = 1;
210
2.86M
  } else if (mode_info_context->mbmi.mode == SPLITMV) {
211
70.0k
    static int ndx[4][4] = {
212
70.0k
      { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 }
213
70.0k
    };
214
70.0k
    int i, j;
215
70.0k
    vp8_zero(*map);
216
350k
    for (i = 0; i < 4; ++i) {
217
280k
      map[i] = 1;
218
791k
      for (j = 0; j < 4 && map[j]; ++j) {
219
511k
        map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
220
454k
                   mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
221
511k
      }
222
280k
    }
223
2.79M
  } else {
224
2.79M
    map[0] = map[1] = map[2] = map[3] =
225
2.79M
        (mode_info_context->mbmi.mode > B_PRED &&
226
127k
         abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 &&
227
58.8k
         abs(mode_info_context->mbmi.mv.as_mv.col) <= 2);
228
2.79M
  }
229
3.04M
  return (map[0] + map[1] + map[2] + map[3]);
230
3.04M
}
231
232
1.36k
void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
233
1.36k
  YV12_BUFFER_CONFIG *show = cm->frame_to_show;
234
1.36k
  YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer;
235
236
1.36k
  FRAME_TYPE frame_type = cm->frame_type;
237
  /* Point at base of Mb MODE_INFO list has motion vectors etc */
238
1.36k
  const MODE_INFO *mode_info_context = cm->mi;
239
1.36k
  int mb_row;
240
1.36k
  int mb_col;
241
1.36k
  int totmap, map[4];
242
1.36k
  int qcurr = cm->base_qindex;
243
1.36k
  int qprev = cm->postproc_state.last_base_qindex;
244
245
1.36k
  unsigned char *y_ptr, *u_ptr, *v_ptr;
246
1.36k
  unsigned char *yd_ptr, *ud_ptr, *vd_ptr;
247
248
  /* Set up the buffer pointers */
249
1.36k
  y_ptr = show->y_buffer;
250
1.36k
  u_ptr = show->u_buffer;
251
1.36k
  v_ptr = show->v_buffer;
252
1.36k
  yd_ptr = dest->y_buffer;
253
1.36k
  ud_ptr = dest->u_buffer;
254
1.36k
  vd_ptr = dest->v_buffer;
255
256
  /* postprocess each macro block */
257
71.7k
  for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
258
3.11M
    for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
259
      /* if motion is high there will likely be no benefit */
260
3.04M
      if (frame_type == INTER_FRAME) {
261
3.04M
        totmap = qualify_inter_mb(mode_info_context, map);
262
3.04M
      } else {
263
439
        totmap = (frame_type == KEY_FRAME ? 4 : 0);
264
439
      }
265
3.04M
      if (totmap) {
266
284k
        if (totmap < 4) {
267
45.6k
          int i, j;
268
136k
          for (i = 0; i < 2; ++i) {
269
273k
            for (j = 0; j < 2; ++j) {
270
182k
              if (map[i * 2 + j]) {
271
126k
                multiframe_quality_enhance_block(
272
126k
                    8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j),
273
126k
                    u_ptr + 4 * (i * show->uv_stride + j),
274
126k
                    v_ptr + 4 * (i * show->uv_stride + j), show->y_stride,
275
126k
                    show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j),
276
126k
                    ud_ptr + 4 * (i * dest->uv_stride + j),
277
126k
                    vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride,
278
126k
                    dest->uv_stride);
279
126k
              } else {
280
                /* copy a 8x8 block */
281
56.1k
                int k;
282
56.1k
                unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j);
283
56.1k
                unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j);
284
56.1k
                unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j);
285
56.1k
                unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j);
286
56.1k
                vp8_copy_mem8x8(
287
56.1k
                    y_ptr + 8 * (i * show->y_stride + j), show->y_stride,
288
56.1k
                    yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride);
289
280k
                for (k = 0; k < 4; ++k, up += show->uv_stride,
290
224k
                    udp += dest->uv_stride, vp += show->uv_stride,
291
224k
                    vdp += dest->uv_stride) {
292
224k
                  memcpy(udp, up, 4);
293
224k
                  memcpy(vdp, vp, 4);
294
224k
                }
295
56.1k
              }
296
182k
            }
297
91.3k
          }
298
238k
        } else { /* totmap = 4 */
299
238k
          multiframe_quality_enhance_block(
300
238k
              16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride,
301
238k
              show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride,
302
238k
              dest->uv_stride);
303
238k
        }
304
2.75M
      } else {
305
2.75M
        vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride);
306
2.75M
        vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride);
307
2.75M
        vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride);
308
2.75M
      }
309
3.04M
      y_ptr += 16;
310
3.04M
      u_ptr += 8;
311
3.04M
      v_ptr += 8;
312
3.04M
      yd_ptr += 16;
313
3.04M
      ud_ptr += 8;
314
3.04M
      vd_ptr += 8;
315
3.04M
      mode_info_context++; /* step to next MB */
316
3.04M
    }
317
318
70.4k
    y_ptr += show->y_stride * 16 - 16 * cm->mb_cols;
319
70.4k
    u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
320
70.4k
    v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
321
70.4k
    yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols;
322
70.4k
    ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
323
70.4k
    vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
324
325
70.4k
    mode_info_context++; /* Skip border mb */
326
70.4k
  }
327
1.36k
}