Coverage Report

Created: 2024-09-06 07:53

/src/libvpx/vp8/common/mfqe.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
 *  that can be found in the LICENSE file in the root of the source
6
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
9
 */
10
11
/* MFQE: Multiframe Quality Enhancement
12
 * In rate limited situations keyframes may cause significant visual artifacts
13
 * commonly referred to as "popping." This file implements a postproccesing
14
 * algorithm which blends data from the preceeding frame when there is no
15
 * motion and the q from the previous frame is lower which indicates that it is
16
 * higher quality.
17
 */
18
19
#include "./vp8_rtcd.h"
20
#include "./vpx_dsp_rtcd.h"
21
#include "vp8/common/common.h"
22
#include "vp8/common/postproc.h"
23
#include "vpx_dsp/variance.h"
24
#include "vpx_mem/vpx_mem.h"
25
#include "vpx_scale/yv12config.h"
26
27
#include <limits.h>
28
#include <stdlib.h>
29
30
static void filter_by_weight(unsigned char *src, int src_stride,
31
                             unsigned char *dst, int dst_stride, int block_size,
32
0
                             int src_weight) {
33
0
  int dst_weight = (1 << MFQE_PRECISION) - src_weight;
34
0
  int rounding_bit = 1 << (MFQE_PRECISION - 1);
35
0
  int r, c;
36
37
0
  for (r = 0; r < block_size; ++r) {
38
0
    for (c = 0; c < block_size; ++c) {
39
0
      dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >>
40
0
               MFQE_PRECISION;
41
0
    }
42
0
    src += src_stride;
43
0
    dst += dst_stride;
44
0
  }
45
0
}
46
47
void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride,
48
                                 unsigned char *dst, int dst_stride,
49
0
                                 int src_weight) {
50
0
  filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
51
0
}
52
53
void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride,
54
                               unsigned char *dst, int dst_stride,
55
0
                               int src_weight) {
56
0
  filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
57
0
}
58
59
void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride,
60
                               unsigned char *dst, int dst_stride,
61
0
                               int src_weight) {
62
0
  filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight);
63
0
}
64
65
static void apply_ifactor(unsigned char *y_src, int y_src_stride,
66
                          unsigned char *y_dst, int y_dst_stride,
67
                          unsigned char *u_src, unsigned char *v_src,
68
                          int uv_src_stride, unsigned char *u_dst,
69
                          unsigned char *v_dst, int uv_dst_stride,
70
0
                          int block_size, int src_weight) {
71
0
  if (block_size == 16) {
72
0
    vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride,
73
0
                              src_weight);
74
0
    vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride,
75
0
                            src_weight);
76
0
    vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride,
77
0
                            src_weight);
78
0
  } else {
79
0
    vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride,
80
0
                            src_weight);
81
0
    vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride,
82
0
                            src_weight);
83
0
    vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride,
84
0
                            src_weight);
85
0
  }
86
0
}
87
88
0
static unsigned int int_sqrt(unsigned int x) {
89
0
  unsigned int y = x;
90
0
  unsigned int guess;
91
0
  int p = 1;
92
0
  while (y >>= 1) p++;
93
0
  p >>= 1;
94
95
0
  guess = 0;
96
0
  while (p >= 0) {
97
0
    guess |= (1 << p);
98
0
    if (x < guess * guess) guess -= (1 << p);
99
0
    p--;
100
0
  }
101
  /* choose between guess or guess+1 */
102
0
  return guess + (guess * guess + guess + 1 <= x);
103
0
}
104
105
#define USE_SSD
106
static void multiframe_quality_enhance_block(
107
    int blksize, /* Currently only values supported are 16, 8 */
108
    int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v,
109
    int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud,
110
0
    unsigned char *vd, int yd_stride, int uvd_stride) {
111
0
  static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
112
0
                                               0, 0, 0, 0, 0, 0, 0, 0 };
113
0
  int uvblksize = blksize >> 1;
114
0
  int qdiff = qcurr - qprev;
115
116
0
  int i;
117
0
  unsigned char *up;
118
0
  unsigned char *udp;
119
0
  unsigned char *vp;
120
0
  unsigned char *vdp;
121
122
0
  unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
123
124
0
  if (blksize == 16) {
125
0
    actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
126
0
    act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
127
0
#ifdef USE_SSD
128
0
    vpx_variance16x16(y, y_stride, yd, yd_stride, &sse);
129
0
    sad = (sse + 128) >> 8;
130
0
    vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse);
131
0
    usad = (sse + 32) >> 6;
132
0
    vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse);
133
0
    vsad = (sse + 32) >> 6;
134
#else
135
    sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
136
    usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6;
137
    vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6;
138
#endif
139
0
  } else {
140
0
    actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
141
0
    act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
142
0
#ifdef USE_SSD
143
0
    vpx_variance8x8(y, y_stride, yd, yd_stride, &sse);
144
0
    sad = (sse + 32) >> 6;
145
0
    vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse);
146
0
    usad = (sse + 8) >> 4;
147
0
    vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse);
148
0
    vsad = (sse + 8) >> 4;
149
#else
150
    sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6;
151
    usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4;
152
    vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4;
153
#endif
154
0
  }
155
156
0
  actrisk = (actd > act * 5);
157
158
  /* thr = qdiff/16 + log2(act) + log4(qprev) */
159
0
  thr = (qdiff >> 4);
160
0
  while (actd >>= 1) thr++;
161
0
  while (qprev >>= 2) thr++;
162
163
0
#ifdef USE_SSD
164
0
  thrsq = thr * thr;
165
0
  if (sad < thrsq &&
166
      /* additional checks for color mismatch and excessive addition of
167
       * high-frequencies */
168
0
      4 * usad < thrsq && 4 * vsad < thrsq && !actrisk)
169
#else
170
  if (sad < thr &&
171
      /* additional checks for color mismatch and excessive addition of
172
       * high-frequencies */
173
      2 * usad < thr && 2 * vsad < thr && !actrisk)
174
#endif
175
0
  {
176
0
    int ifactor;
177
0
#ifdef USE_SSD
178
    /* TODO: optimize this later to not need sqr root */
179
0
    sad = int_sqrt(sad);
180
0
#endif
181
0
    ifactor = (sad << MFQE_PRECISION) / thr;
182
0
    ifactor >>= (qdiff >> 5);
183
184
0
    if (ifactor) {
185
0
      apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd,
186
0
                    uvd_stride, blksize, ifactor);
187
0
    }
188
0
  } else { /* else implicitly copy from previous frame */
189
0
    if (blksize == 16) {
190
0
      vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
191
0
      vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
192
0
      vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
193
0
    } else {
194
0
      vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
195
0
      for (up = u, udp = ud, i = 0; i < uvblksize;
196
0
           ++i, up += uv_stride, udp += uvd_stride) {
197
0
        memcpy(udp, up, uvblksize);
198
0
      }
199
0
      for (vp = v, vdp = vd, i = 0; i < uvblksize;
200
0
           ++i, vp += uv_stride, vdp += uvd_stride) {
201
0
        memcpy(vdp, vp, uvblksize);
202
0
      }
203
0
    }
204
0
  }
205
0
}
206
207
0
static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) {
208
0
  if (mode_info_context->mbmi.mb_skip_coeff) {
209
0
    map[0] = map[1] = map[2] = map[3] = 1;
210
0
  } else if (mode_info_context->mbmi.mode == SPLITMV) {
211
0
    static int ndx[4][4] = {
212
0
      { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 }
213
0
    };
214
0
    int i, j;
215
0
    vp8_zero(*map);
216
0
    for (i = 0; i < 4; ++i) {
217
0
      map[i] = 1;
218
0
      for (j = 0; j < 4 && map[j]; ++j) {
219
0
        map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
220
0
                   mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
221
0
      }
222
0
    }
223
0
  } else {
224
0
    map[0] = map[1] = map[2] = map[3] =
225
0
        (mode_info_context->mbmi.mode > B_PRED &&
226
0
         abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 &&
227
0
         abs(mode_info_context->mbmi.mv.as_mv.col) <= 2);
228
0
  }
229
0
  return (map[0] + map[1] + map[2] + map[3]);
230
0
}
231
232
0
void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
233
0
  YV12_BUFFER_CONFIG *show = cm->frame_to_show;
234
0
  YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer;
235
236
0
  FRAME_TYPE frame_type = cm->frame_type;
237
  /* Point at base of Mb MODE_INFO list has motion vectors etc */
238
0
  const MODE_INFO *mode_info_context = cm->mi;
239
0
  int mb_row;
240
0
  int mb_col;
241
0
  int totmap, map[4];
242
0
  int qcurr = cm->base_qindex;
243
0
  int qprev = cm->postproc_state.last_base_qindex;
244
245
0
  unsigned char *y_ptr, *u_ptr, *v_ptr;
246
0
  unsigned char *yd_ptr, *ud_ptr, *vd_ptr;
247
248
  /* Set up the buffer pointers */
249
0
  y_ptr = show->y_buffer;
250
0
  u_ptr = show->u_buffer;
251
0
  v_ptr = show->v_buffer;
252
0
  yd_ptr = dest->y_buffer;
253
0
  ud_ptr = dest->u_buffer;
254
0
  vd_ptr = dest->v_buffer;
255
256
  /* postprocess each macro block */
257
0
  for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
258
0
    for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
259
      /* if motion is high there will likely be no benefit */
260
0
      if (frame_type == INTER_FRAME) {
261
0
        totmap = qualify_inter_mb(mode_info_context, map);
262
0
      } else {
263
0
        totmap = (frame_type == KEY_FRAME ? 4 : 0);
264
0
      }
265
0
      if (totmap) {
266
0
        if (totmap < 4) {
267
0
          int i, j;
268
0
          for (i = 0; i < 2; ++i) {
269
0
            for (j = 0; j < 2; ++j) {
270
0
              if (map[i * 2 + j]) {
271
0
                multiframe_quality_enhance_block(
272
0
                    8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j),
273
0
                    u_ptr + 4 * (i * show->uv_stride + j),
274
0
                    v_ptr + 4 * (i * show->uv_stride + j), show->y_stride,
275
0
                    show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j),
276
0
                    ud_ptr + 4 * (i * dest->uv_stride + j),
277
0
                    vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride,
278
0
                    dest->uv_stride);
279
0
              } else {
280
                /* copy a 8x8 block */
281
0
                int k;
282
0
                unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j);
283
0
                unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j);
284
0
                unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j);
285
0
                unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j);
286
0
                vp8_copy_mem8x8(
287
0
                    y_ptr + 8 * (i * show->y_stride + j), show->y_stride,
288
0
                    yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride);
289
0
                for (k = 0; k < 4; ++k, up += show->uv_stride,
290
0
                    udp += dest->uv_stride, vp += show->uv_stride,
291
0
                    vdp += dest->uv_stride) {
292
0
                  memcpy(udp, up, 4);
293
0
                  memcpy(vdp, vp, 4);
294
0
                }
295
0
              }
296
0
            }
297
0
          }
298
0
        } else { /* totmap = 4 */
299
0
          multiframe_quality_enhance_block(
300
0
              16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride,
301
0
              show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride,
302
0
              dest->uv_stride);
303
0
        }
304
0
      } else {
305
0
        vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride);
306
0
        vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride);
307
0
        vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride);
308
0
      }
309
0
      y_ptr += 16;
310
0
      u_ptr += 8;
311
0
      v_ptr += 8;
312
0
      yd_ptr += 16;
313
0
      ud_ptr += 8;
314
0
      vd_ptr += 8;
315
0
      mode_info_context++; /* step to next MB */
316
0
    }
317
318
0
    y_ptr += show->y_stride * 16 - 16 * cm->mb_cols;
319
0
    u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
320
0
    v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
321
0
    yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols;
322
0
    ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
323
0
    vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
324
325
0
    mode_info_context++; /* Skip border mb */
326
0
  }
327
0
}