Coverage Report

Created: 2025-12-31 06:49

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/av1/common/x86/selfguided_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
14
#include "config/aom_config.h"
15
#include "config/av1_rtcd.h"
16
17
#include "av1/common/restoration.h"
18
#include "aom_dsp/x86/synonyms.h"
19
#include "aom_dsp/x86/synonyms_avx2.h"
20
21
// Load 8 bytes from the possibly-misaligned pointer p, extend each byte to
22
// 32-bit precision and return them in an AVX2 register.
23
30.8M
static __m256i yy256_load_extend_8_32(const void *p) {
24
30.8M
  return _mm256_cvtepu8_epi32(xx_loadl_64(p));
25
30.8M
}
26
27
// Load 8 halfwords from the possibly-misaligned pointer p, extend each
28
// halfword to 32-bit precision and return them in an AVX2 register.
29
99.4M
static __m256i yy256_load_extend_16_32(const void *p) {
30
99.4M
  return _mm256_cvtepu16_epi32(xx_loadu_128(p));
31
99.4M
}
32
33
// Compute the scan of an AVX2 register holding 8 32-bit integers. If the
34
// register holds x0..x7 then the scan will hold x0, x0+x1, x0+x1+x2, ...,
35
// x0+x1+...+x7
36
//
37
// Let [...] represent a 128-bit block, and let a, ..., h be 32-bit integers
38
// (assumed small enough to be able to add them without overflow).
39
//
40
// Use -> as shorthand for summing, i.e. h->a = h + g + f + e + d + c + b + a.
41
//
42
// x   = [h g f e][d c b a]
43
// x01 = [g f e 0][c b a 0]
44
// x02 = [g+h f+g e+f e][c+d b+c a+b a]
45
// x03 = [e+f e 0 0][a+b a 0 0]
46
// x04 = [e->h e->g e->f e][a->d a->c a->b a]
47
// s   = a->d
48
// s01 = [a->d a->d a->d a->d]
49
// s02 = [a->d a->d a->d a->d][0 0 0 0]
50
// ret = [a->h a->g a->f a->e][a->d a->c a->b a]
51
259M
static __m256i scan_32(__m256i x) {
52
259M
  const __m256i x01 = _mm256_slli_si256(x, 4);
53
259M
  const __m256i x02 = _mm256_add_epi32(x, x01);
54
259M
  const __m256i x03 = _mm256_slli_si256(x02, 8);
55
259M
  const __m256i x04 = _mm256_add_epi32(x02, x03);
56
259M
  const int32_t s = _mm256_extract_epi32(x04, 3);
57
259M
  const __m128i s01 = _mm_set1_epi32(s);
58
259M
  const __m256i s02 = _mm256_insertf128_si256(_mm256_setzero_si256(), s01, 1);
59
259M
  return _mm256_add_epi32(x04, s02);
60
259M
}
61
62
// Compute two integral images from src. B sums elements; A sums their
63
// squares. The images are offset by one pixel, so will have width and height
64
// equal to width + 1, height + 1 and the first row and column will be zero.
65
//
66
// A+1 and B+1 should be aligned to 32 bytes. buf_stride should be a multiple
67
// of 8.
68
69
672k
static void *memset_zero_avx(int32_t *dest, const __m256i *zero, size_t count) {
70
672k
  unsigned int i = 0;
71
1.85M
  for (i = 0; i < (count & 0xffffffe0); i += 32) {
72
1.17M
    _mm256_storeu_si256((__m256i *)(dest + i), *zero);
73
1.17M
    _mm256_storeu_si256((__m256i *)(dest + i + 8), *zero);
74
1.17M
    _mm256_storeu_si256((__m256i *)(dest + i + 16), *zero);
75
1.17M
    _mm256_storeu_si256((__m256i *)(dest + i + 24), *zero);
76
1.17M
  }
77
1.35M
  for (; i < (count & 0xfffffff8); i += 8) {
78
682k
    _mm256_storeu_si256((__m256i *)(dest + i), *zero);
79
682k
  }
80
4.54M
  for (; i < count; i++) {
81
3.87M
    dest[i] = 0;
82
3.87M
  }
83
672k
  return dest;
84
672k
}
85
86
static void integral_images(const uint8_t *src, int src_stride, int width,
87
                            int height, int32_t *A, int32_t *B,
88
130k
                            int buf_stride) {
89
130k
  const __m256i zero = _mm256_setzero_si256();
90
  // Write out the zero top row
91
130k
  memset_zero_avx(A, &zero, (width + 8));
92
130k
  memset_zero_avx(B, &zero, (width + 8));
93
4.41M
  for (int i = 0; i < height; ++i) {
94
    // Zero the left column.
95
4.28M
    A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
96
97
    // ldiff is the difference H - D where H is the output sample immediately
98
    // to the left and D is the output sample above it. These are scalars,
99
    // replicated across the eight lanes.
100
4.28M
    __m256i ldiff1 = zero, ldiff2 = zero;
101
35.1M
    for (int j = 0; j < width; j += 8) {
102
30.8M
      const int ABj = 1 + j;
103
104
30.8M
      const __m256i above1 = yy_load_256(B + ABj + i * buf_stride);
105
30.8M
      const __m256i above2 = yy_load_256(A + ABj + i * buf_stride);
106
107
30.8M
      const __m256i x1 = yy256_load_extend_8_32(src + j + i * src_stride);
108
30.8M
      const __m256i x2 = _mm256_madd_epi16(x1, x1);
109
110
30.8M
      const __m256i sc1 = scan_32(x1);
111
30.8M
      const __m256i sc2 = scan_32(x2);
112
113
30.8M
      const __m256i row1 =
114
30.8M
          _mm256_add_epi32(_mm256_add_epi32(sc1, above1), ldiff1);
115
30.8M
      const __m256i row2 =
116
30.8M
          _mm256_add_epi32(_mm256_add_epi32(sc2, above2), ldiff2);
117
118
30.8M
      yy_store_256(B + ABj + (i + 1) * buf_stride, row1);
119
30.8M
      yy_store_256(A + ABj + (i + 1) * buf_stride, row2);
120
121
      // Calculate the new H - D.
122
30.8M
      ldiff1 = _mm256_set1_epi32(
123
30.8M
          _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7));
124
30.8M
      ldiff2 = _mm256_set1_epi32(
125
30.8M
          _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7));
126
30.8M
    }
127
4.28M
  }
128
130k
}
129
130
// Compute two integral images from src. B sums elements; A sums their squares
131
//
132
// A and B should be aligned to 32 bytes. buf_stride should be a multiple of 8.
133
static void integral_images_highbd(const uint16_t *src, int src_stride,
134
                                   int width, int height, int32_t *A,
135
207k
                                   int32_t *B, int buf_stride) {
136
207k
  const __m256i zero = _mm256_setzero_si256();
137
  // Write out the zero top row
138
207k
  memset_zero_avx(A, &zero, (width + 8));
139
207k
  memset_zero_avx(B, &zero, (width + 8));
140
141
11.7M
  for (int i = 0; i < height; ++i) {
142
    // Zero the left column.
143
11.5M
    A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
144
145
    // ldiff is the difference H - D where H is the output sample immediately
146
    // to the left and D is the output sample above it. These are scalars,
147
    // replicated across the eight lanes.
148
11.5M
    __m256i ldiff1 = zero, ldiff2 = zero;
149
111M
    for (int j = 0; j < width; j += 8) {
150
99.6M
      const int ABj = 1 + j;
151
152
99.6M
      const __m256i above1 = yy_load_256(B + ABj + i * buf_stride);
153
99.6M
      const __m256i above2 = yy_load_256(A + ABj + i * buf_stride);
154
155
99.6M
      const __m256i x1 = yy256_load_extend_16_32(src + j + i * src_stride);
156
99.6M
      const __m256i x2 = _mm256_madd_epi16(x1, x1);
157
158
99.6M
      const __m256i sc1 = scan_32(x1);
159
99.6M
      const __m256i sc2 = scan_32(x2);
160
161
99.6M
      const __m256i row1 =
162
99.6M
          _mm256_add_epi32(_mm256_add_epi32(sc1, above1), ldiff1);
163
99.6M
      const __m256i row2 =
164
99.6M
          _mm256_add_epi32(_mm256_add_epi32(sc2, above2), ldiff2);
165
166
99.6M
      yy_store_256(B + ABj + (i + 1) * buf_stride, row1);
167
99.6M
      yy_store_256(A + ABj + (i + 1) * buf_stride, row2);
168
169
      // Calculate the new H - D.
170
99.6M
      ldiff1 = _mm256_set1_epi32(
171
99.6M
          _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7));
172
99.6M
      ldiff2 = _mm256_set1_epi32(
173
99.6M
          _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7));
174
99.6M
    }
175
11.5M
  }
176
207k
}
177
178
// Compute 8 values of boxsum from the given integral image. ii should point
179
// at the middle of the box (for the first value). r is the box radius.
180
164M
static inline __m256i boxsum_from_ii(const int32_t *ii, int stride, int r) {
181
164M
  const __m256i tl = yy_loadu_256(ii - (r + 1) - (r + 1) * stride);
182
164M
  const __m256i tr = yy_loadu_256(ii + (r + 0) - (r + 1) * stride);
183
164M
  const __m256i bl = yy_loadu_256(ii - (r + 1) + r * stride);
184
164M
  const __m256i br = yy_loadu_256(ii + (r + 0) + r * stride);
185
164M
  const __m256i u = _mm256_sub_epi32(tr, tl);
186
164M
  const __m256i v = _mm256_sub_epi32(br, bl);
187
164M
  return _mm256_sub_epi32(v, u);
188
164M
}
189
190
118M
static __m256i round_for_shift(unsigned shift) {
191
118M
  return _mm256_set1_epi32((1 << shift) >> 1);
192
118M
}
193
194
82.3M
static __m256i compute_p(__m256i sum1, __m256i sum2, int bit_depth, int n) {
195
82.3M
  __m256i an, bb;
196
82.3M
  if (bit_depth > 8) {
197
50.8M
    const __m256i rounding_a = round_for_shift(2 * (bit_depth - 8));
198
50.8M
    const __m256i rounding_b = round_for_shift(bit_depth - 8);
199
50.8M
    const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8));
200
50.8M
    const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8);
201
50.8M
    const __m256i a =
202
50.8M
        _mm256_srl_epi32(_mm256_add_epi32(sum2, rounding_a), shift_a);
203
50.8M
    const __m256i b =
204
50.8M
        _mm256_srl_epi32(_mm256_add_epi32(sum1, rounding_b), shift_b);
205
    // b < 2^14, so we can use a 16-bit madd rather than a 32-bit
206
    // mullo to square it
207
50.8M
    bb = _mm256_madd_epi16(b, b);
208
50.8M
    an = _mm256_max_epi32(_mm256_mullo_epi32(a, _mm256_set1_epi32(n)), bb);
209
50.8M
  } else {
210
31.4M
    bb = _mm256_madd_epi16(sum1, sum1);
211
31.4M
    an = _mm256_mullo_epi32(sum2, _mm256_set1_epi32(n));
212
31.4M
  }
213
82.3M
  return _mm256_sub_epi32(an, bb);
214
82.3M
}
215
216
// Assumes that C, D are integral images for the original buffer which has been
217
// extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
218
// on the sides. A, B, C, D point at logical position (0, 0).
219
static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D,
220
                    int width, int height, int buf_stride, int bit_depth,
221
321k
                    int sgr_params_idx, int radius_idx) {
222
321k
  const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx];
223
321k
  const int r = params->r[radius_idx];
224
321k
  const int n = (2 * r + 1) * (2 * r + 1);
225
321k
  const __m256i s = _mm256_set1_epi32(params->s[radius_idx]);
226
  // one_over_n[n-1] is 2^12/n, so easily fits in an int16
227
321k
  const __m256i one_over_n = _mm256_set1_epi32(av1_one_by_x[n - 1]);
228
229
321k
  const __m256i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
230
321k
  const __m256i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
231
232
  // Set up masks
233
321k
  const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0);
234
321k
  __m256i mask[8];
235
2.89M
  for (int idx = 0; idx < 8; idx++) {
236
2.56M
    const __m128i shift = _mm_cvtsi32_si128(8 * (8 - idx));
237
2.56M
    mask[idx] = _mm256_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
238
2.56M
  }
239
240
18.4E
  for (int i = -1; i < height + 1; ++i) {
241
119M
    for (int j = -1; j < width + 1; j += 8) {
242
119M
      const int32_t *Cij = C + i * buf_stride + j;
243
119M
      const int32_t *Dij = D + i * buf_stride + j;
244
245
119M
      __m256i sum1 = boxsum_from_ii(Dij, buf_stride, r);
246
119M
      __m256i sum2 = boxsum_from_ii(Cij, buf_stride, r);
247
248
      // When width + 2 isn't a multiple of 8, sum1 and sum2 will contain
249
      // some uninitialised data in their upper words. We use a mask to
250
      // ensure that these bits are set to 0.
251
119M
      int idx = AOMMIN(8, width + 1 - j);
252
119M
      assert(idx >= 1);
253
254
74.8M
      if (idx < 8) {
255
15.7M
        sum1 = _mm256_and_si256(mask[idx], sum1);
256
15.7M
        sum2 = _mm256_and_si256(mask[idx], sum2);
257
15.7M
      }
258
259
74.8M
      const __m256i p = compute_p(sum1, sum2, bit_depth, n);
260
261
74.8M
      const __m256i z = _mm256_min_epi32(
262
74.8M
          _mm256_srli_epi32(_mm256_add_epi32(_mm256_mullo_epi32(p, s), rnd_z),
263
74.8M
                            SGRPROJ_MTABLE_BITS),
264
74.8M
          _mm256_set1_epi32(255));
265
266
74.8M
      const __m256i a_res = _mm256_i32gather_epi32(av1_x_by_xplus1, z, 4);
267
268
74.8M
      yy_storeu_256(A + i * buf_stride + j, a_res);
269
270
74.8M
      const __m256i a_complement =
271
74.8M
          _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res);
272
273
      // sum1 might have lanes greater than 2^15, so we can't use madd to do
274
      // multiplication involving sum1. However, a_complement and one_over_n
275
      // are both less than 256, so we can multiply them first.
276
74.8M
      const __m256i a_comp_over_n = _mm256_madd_epi16(a_complement, one_over_n);
277
74.8M
      const __m256i b_int = _mm256_mullo_epi32(a_comp_over_n, sum1);
278
74.8M
      const __m256i b_res = _mm256_srli_epi32(_mm256_add_epi32(b_int, rnd_res),
279
74.8M
                                              SGRPROJ_RECIP_BITS);
280
281
74.8M
      yy_storeu_256(B + i * buf_stride + j, b_res);
282
74.8M
    }
283
17.7M
  }
284
321k
}
285
286
// Calculate 8 values of the "cross sum" starting at buf. This is a 3x3 filter
287
// where the outer four corners have weight 3 and all other pixels have weight
288
// 4.
289
//
290
// Pixels are indexed as follows:
291
// xtl  xt   xtr
292
// xl    x   xr
293
// xbl  xb   xbr
294
//
295
// buf points to x
296
//
297
// fours = xl + xt + xr + xb + x
298
// threes = xtl + xtr + xbr + xbl
299
// cross_sum = 4 * fours + 3 * threes
300
//           = 4 * (fours + threes) - threes
301
//           = (fours + threes) << 2 - threes
302
225M
static inline __m256i cross_sum(const int32_t *buf, int stride) {
303
225M
  const __m256i xtl = yy_loadu_256(buf - 1 - stride);
304
225M
  const __m256i xt = yy_loadu_256(buf - stride);
305
225M
  const __m256i xtr = yy_loadu_256(buf + 1 - stride);
306
225M
  const __m256i xl = yy_loadu_256(buf - 1);
307
225M
  const __m256i x = yy_loadu_256(buf);
308
225M
  const __m256i xr = yy_loadu_256(buf + 1);
309
225M
  const __m256i xbl = yy_loadu_256(buf - 1 + stride);
310
225M
  const __m256i xb = yy_loadu_256(buf + stride);
311
225M
  const __m256i xbr = yy_loadu_256(buf + 1 + stride);
312
313
225M
  const __m256i fours = _mm256_add_epi32(
314
225M
      xl, _mm256_add_epi32(xt, _mm256_add_epi32(xr, _mm256_add_epi32(xb, x))));
315
225M
  const __m256i threes =
316
225M
      _mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl)));
317
318
225M
  return _mm256_sub_epi32(_mm256_slli_epi32(_mm256_add_epi32(fours, threes), 2),
319
225M
                          threes);
320
225M
}
321
322
// The final filter for self-guided restoration. Computes a weighted average
323
// across A, B with "cross sums" (see cross_sum implementation above).
324
static void final_filter(int32_t *dst, int dst_stride, const int32_t *A,
325
                         const int32_t *B, int buf_stride, const void *dgd8,
326
324k
                         int dgd_stride, int width, int height, int highbd) {
327
324k
  const int nb = 5;
328
324k
  const __m256i rounding =
329
324k
      round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
330
324k
  const uint8_t *dgd_real =
331
324k
      highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
332
333
16.9M
  for (int i = 0; i < height; ++i) {
334
139M
    for (int j = 0; j < width; j += 8) {
335
122M
      const __m256i a = cross_sum(A + i * buf_stride + j, buf_stride);
336
122M
      const __m256i b = cross_sum(B + i * buf_stride + j, buf_stride);
337
338
122M
      const __m128i raw =
339
122M
          xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd));
340
122M
      const __m256i src =
341
122M
          highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw);
342
343
122M
      __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b);
344
122M
      __m256i w = _mm256_srai_epi32(_mm256_add_epi32(v, rounding),
345
122M
                                    SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
346
347
122M
      yy_storeu_256(dst + i * dst_stride + j, w);
348
122M
    }
349
16.6M
  }
350
324k
}
351
352
// Assumes that C, D are integral images for the original buffer which has been
353
// extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
354
// on the sides. A, B, C, D point at logical position (0, 0).
355
static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C,
356
                         const int32_t *D, int width, int height,
357
                         int buf_stride, int bit_depth, int sgr_params_idx,
358
304k
                         int radius_idx) {
359
304k
  const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx];
360
304k
  const int r = params->r[radius_idx];
361
304k
  const int n = (2 * r + 1) * (2 * r + 1);
362
304k
  const __m256i s = _mm256_set1_epi32(params->s[radius_idx]);
363
  // one_over_n[n-1] is 2^12/n, so easily fits in an int16
364
304k
  const __m256i one_over_n = _mm256_set1_epi32(av1_one_by_x[n - 1]);
365
366
304k
  const __m256i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
367
304k
  const __m256i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
368
369
  // Set up masks
370
304k
  const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0);
371
304k
  __m256i mask[8];
372
2.73M
  for (int idx = 0; idx < 8; idx++) {
373
2.43M
    const __m128i shift = _mm_cvtsi32_si128(8 * (8 - idx));
374
2.43M
    mask[idx] = _mm256_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
375
2.43M
  }
376
377
18.4E
  for (int i = -1; i < height + 1; i += 2) {
378
65.4M
    for (int j = -1; j < width + 1; j += 8) {
379
65.4M
      const int32_t *Cij = C + i * buf_stride + j;
380
65.4M
      const int32_t *Dij = D + i * buf_stride + j;
381
382
65.4M
      __m256i sum1 = boxsum_from_ii(Dij, buf_stride, r);
383
65.4M
      __m256i sum2 = boxsum_from_ii(Cij, buf_stride, r);
384
385
      // When width + 2 isn't a multiple of 8, sum1 and sum2 will contain
386
      // some uninitialised data in their upper words. We use a mask to
387
      // ensure that these bits are set to 0.
388
65.4M
      int idx = AOMMIN(8, width + 1 - j);
389
65.4M
      assert(idx >= 1);
390
391
46.9M
      if (idx < 8) {
392
8.22M
        sum1 = _mm256_and_si256(mask[idx], sum1);
393
8.22M
        sum2 = _mm256_and_si256(mask[idx], sum2);
394
8.22M
      }
395
396
46.9M
      const __m256i p = compute_p(sum1, sum2, bit_depth, n);
397
398
46.9M
      const __m256i z = _mm256_min_epi32(
399
46.9M
          _mm256_srli_epi32(_mm256_add_epi32(_mm256_mullo_epi32(p, s), rnd_z),
400
46.9M
                            SGRPROJ_MTABLE_BITS),
401
46.9M
          _mm256_set1_epi32(255));
402
403
46.9M
      const __m256i a_res = _mm256_i32gather_epi32(av1_x_by_xplus1, z, 4);
404
405
46.9M
      yy_storeu_256(A + i * buf_stride + j, a_res);
406
407
46.9M
      const __m256i a_complement =
408
46.9M
          _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res);
409
410
      // sum1 might have lanes greater than 2^15, so we can't use madd to do
411
      // multiplication involving sum1. However, a_complement and one_over_n
412
      // are both less than 256, so we can multiply them first.
413
46.9M
      const __m256i a_comp_over_n = _mm256_madd_epi16(a_complement, one_over_n);
414
46.9M
      const __m256i b_int = _mm256_mullo_epi32(a_comp_over_n, sum1);
415
46.9M
      const __m256i b_res = _mm256_srli_epi32(_mm256_add_epi32(b_int, rnd_res),
416
46.9M
                                              SGRPROJ_RECIP_BITS);
417
418
46.9M
      yy_storeu_256(B + i * buf_stride + j, b_res);
419
46.9M
    }
420
8.85M
  }
421
304k
}
422
423
// Calculate 8 values of the "cross sum" starting at buf.
424
//
425
// Pixels are indexed like this:
426
// xtl  xt   xtr
427
//  -   buf   -
428
// xbl  xb   xbr
429
//
430
// Pixels are weighted like this:
431
//  5    6    5
432
//  0    0    0
433
//  5    6    5
434
//
435
// fives = xtl + xtr + xbl + xbr
436
// sixes = xt + xb
437
// cross_sum = 6 * sixes + 5 * fives
438
//           = 5 * (fives + sixes) - sixes
439
//           = (fives + sixes) << 2 + (fives + sixes) + sixes
440
114M
static inline __m256i cross_sum_fast_even_row(const int32_t *buf, int stride) {
441
114M
  const __m256i xtl = yy_loadu_256(buf - 1 - stride);
442
114M
  const __m256i xt = yy_loadu_256(buf - stride);
443
114M
  const __m256i xtr = yy_loadu_256(buf + 1 - stride);
444
114M
  const __m256i xbl = yy_loadu_256(buf - 1 + stride);
445
114M
  const __m256i xb = yy_loadu_256(buf + stride);
446
114M
  const __m256i xbr = yy_loadu_256(buf + 1 + stride);
447
448
114M
  const __m256i fives =
449
114M
      _mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl)));
450
114M
  const __m256i sixes = _mm256_add_epi32(xt, xb);
451
114M
  const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes);
452
453
114M
  return _mm256_add_epi32(
454
114M
      _mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2),
455
114M
                       fives_plus_sixes),
456
114M
      sixes);
457
114M
}
458
459
// Calculate 8 values of the "cross sum" starting at buf.
460
//
461
// Pixels are indexed like this:
462
// xl    x   xr
463
//
464
// Pixels are weighted like this:
465
//  5    6    5
466
//
467
// buf points to x
468
//
469
// fives = xl + xr
470
// sixes = x
471
// cross_sum = 5 * fives + 6 * sixes
472
//           = 4 * (fives + sixes) + (fives + sixes) + sixes
473
//           = (fives + sixes) << 2 + (fives + sixes) + sixes
474
113M
static inline __m256i cross_sum_fast_odd_row(const int32_t *buf) {
475
113M
  const __m256i xl = yy_loadu_256(buf - 1);
476
113M
  const __m256i x = yy_loadu_256(buf);
477
113M
  const __m256i xr = yy_loadu_256(buf + 1);
478
479
113M
  const __m256i fives = _mm256_add_epi32(xl, xr);
480
113M
  const __m256i sixes = x;
481
482
113M
  const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes);
483
484
113M
  return _mm256_add_epi32(
485
113M
      _mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2),
486
113M
                       fives_plus_sixes),
487
113M
      sixes);
488
113M
}
489
490
// The final filter for the self-guided restoration. Computes a
491
// weighted average across A, B with "cross sums" (see cross_sum_...
492
// implementations above).
493
static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A,
494
                              const int32_t *B, int buf_stride,
495
                              const void *dgd8, int dgd_stride, int width,
496
310k
                              int height, int highbd) {
497
310k
  const int nb0 = 5;
498
310k
  const int nb1 = 4;
499
500
310k
  const __m256i rounding0 =
501
310k
      round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
502
310k
  const __m256i rounding1 =
503
310k
      round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
504
505
310k
  const uint8_t *dgd_real =
506
310k
      highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
507
508
15.4M
  for (int i = 0; i < height; ++i) {
509
15.1M
    if (!(i & 1)) {  // even row
510
65.0M
      for (int j = 0; j < width; j += 8) {
511
57.4M
        const __m256i a =
512
57.4M
            cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride);
513
57.4M
        const __m256i b =
514
57.4M
            cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride);
515
516
57.4M
        const __m128i raw =
517
57.4M
            xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd));
518
57.4M
        const __m256i src =
519
57.4M
            highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw);
520
521
57.4M
        __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b);
522
57.4M
        __m256i w =
523
57.4M
            _mm256_srai_epi32(_mm256_add_epi32(v, rounding0),
524
57.4M
                              SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
525
526
57.4M
        yy_storeu_256(dst + i * dst_stride + j, w);
527
57.4M
      }
528
7.57M
    } else {  // odd row
529
64.7M
      for (int j = 0; j < width; j += 8) {
530
57.2M
        const __m256i a = cross_sum_fast_odd_row(A + i * buf_stride + j);
531
57.2M
        const __m256i b = cross_sum_fast_odd_row(B + i * buf_stride + j);
532
533
57.2M
        const __m128i raw =
534
57.2M
            xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd));
535
57.2M
        const __m256i src =
536
57.2M
            highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw);
537
538
57.2M
        __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b);
539
57.2M
        __m256i w =
540
57.2M
            _mm256_srai_epi32(_mm256_add_epi32(v, rounding1),
541
57.2M
                              SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
542
543
57.2M
        yy_storeu_256(dst + i * dst_stride + j, w);
544
57.2M
      }
545
7.52M
    }
546
15.1M
  }
547
310k
}
548
549
int av1_selfguided_restoration_avx2(const uint8_t *dgd8, int width, int height,
550
                                    int dgd_stride, int32_t *flt0,
551
                                    int32_t *flt1, int flt_stride,
552
                                    int sgr_params_idx, int bit_depth,
553
334k
                                    int highbd) {
554
  // The ALIGN_POWER_OF_TWO macro here ensures that column 1 of Atl, Btl,
555
  // Ctl and Dtl is 32-byte aligned.
556
334k
  const int buf_elts = ALIGN_POWER_OF_TWO(RESTORATION_PROC_UNIT_PELS, 3);
557
558
334k
  int32_t *buf = aom_memalign(
559
334k
      32, 4 * sizeof(*buf) * ALIGN_POWER_OF_TWO(RESTORATION_PROC_UNIT_PELS, 3));
560
334k
  if (!buf) return -1;
561
562
334k
  const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
563
334k
  const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
564
565
  // Adjusting the stride of A and B here appears to avoid bad cache effects,
566
  // leading to a significant speed improvement.
567
  // We also align the stride to a multiple of 32 bytes for efficiency.
568
334k
  int buf_stride = ALIGN_POWER_OF_TWO(width_ext + 16, 3);
569
570
  // The "tl" pointers point at the top-left of the initialised data for the
571
  // array.
572
334k
  int32_t *Atl = buf + 0 * buf_elts + 7;
573
334k
  int32_t *Btl = buf + 1 * buf_elts + 7;
574
334k
  int32_t *Ctl = buf + 2 * buf_elts + 7;
575
334k
  int32_t *Dtl = buf + 3 * buf_elts + 7;
576
577
  // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note
578
  // there's a zero row and column in A, B (integral images), so we move down
579
  // and right one for them.
580
334k
  const int buf_diag_border =
581
334k
      SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT;
582
583
334k
  int32_t *A0 = Atl + 1 + buf_stride;
584
334k
  int32_t *B0 = Btl + 1 + buf_stride;
585
334k
  int32_t *C0 = Ctl + 1 + buf_stride;
586
334k
  int32_t *D0 = Dtl + 1 + buf_stride;
587
588
  // Finally, A, B, C, D point at position (0, 0).
589
334k
  int32_t *A = A0 + buf_diag_border;
590
334k
  int32_t *B = B0 + buf_diag_border;
591
334k
  int32_t *C = C0 + buf_diag_border;
592
334k
  int32_t *D = D0 + buf_diag_border;
593
594
334k
  const int dgd_diag_border =
595
334k
      SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT;
596
334k
  const uint8_t *dgd0 = dgd8 - dgd_diag_border;
597
598
  // Generate integral images from the input. C will contain sums of squares; D
599
  // will contain just sums
600
334k
  if (highbd)
601
207k
    integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext,
602
207k
                           height_ext, Ctl, Dtl, buf_stride);
603
127k
  else
604
127k
    integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl,
605
127k
                    buf_stride);
606
607
334k
  const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx];
608
  // Write to flt0 and flt1
609
  // If params->r == 0 we skip the corresponding filter. We only allow one of
610
  // the radii to be 0, as having both equal to 0 would be equivalent to
611
  // skipping SGR entirely.
612
334k
  assert(!(params->r[0] == 0 && params->r[1] == 0));
613
334k
  assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
614
330k
  assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
615
616
330k
  if (params->r[0] > 0) {
617
303k
    calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth,
618
303k
                 sgr_params_idx, 0);
619
303k
    final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride,
620
303k
                      width, height, highbd);
621
303k
  }
622
623
330k
  if (params->r[1] > 0) {
624
321k
    calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx,
625
321k
            1);
626
321k
    final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width,
627
321k
                 height, highbd);
628
321k
  }
629
330k
  aom_free(buf);
630
330k
  return 0;
631
330k
}
632
633
int av1_apply_selfguided_restoration_avx2(const uint8_t *dat8, int width,
634
                                          int height, int stride, int eps,
635
                                          const int *xqd, uint8_t *dst8,
636
                                          int dst_stride, int32_t *tmpbuf,
637
333k
                                          int bit_depth, int highbd) {
638
333k
  int32_t *flt0 = tmpbuf;
639
333k
  int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
640
333k
  assert(width * height <= RESTORATION_UNITPELS_MAX);
641
333k
  const int ret = av1_selfguided_restoration_avx2(
642
333k
      dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd);
643
333k
  if (ret != 0) return ret;
644
333k
  const sgr_params_type *const params = &av1_sgr_params[eps];
645
333k
  int xq[2];
646
333k
  av1_decode_xq(xqd, xq, params);
647
648
333k
  __m256i xq0 = _mm256_set1_epi32(xq[0]);
649
333k
  __m256i xq1 = _mm256_set1_epi32(xq[1]);
650
651
13.6M
  for (int i = 0; i < height; ++i) {
652
    // Calculate output in batches of 16 pixels
653
62.6M
    for (int j = 0; j < width; j += 16) {
654
49.2M
      const int k = i * width + j;
655
49.2M
      const int m = i * dst_stride + j;
656
657
49.2M
      const uint8_t *dat8ij = dat8 + i * stride + j;
658
49.2M
      __m256i ep_0, ep_1;
659
49.2M
      __m128i src_0, src_1;
660
49.2M
      if (highbd) {
661
33.7M
        src_0 = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij));
662
33.7M
        src_1 = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij + 8));
663
33.7M
        ep_0 = _mm256_cvtepu16_epi32(src_0);
664
33.7M
        ep_1 = _mm256_cvtepu16_epi32(src_1);
665
33.7M
      } else {
666
15.5M
        src_0 = xx_loadu_128(dat8ij);
667
15.5M
        ep_0 = _mm256_cvtepu8_epi32(src_0);
668
15.5M
        ep_1 = _mm256_cvtepu8_epi32(_mm_srli_si128(src_0, 8));
669
15.5M
      }
670
671
49.2M
      const __m256i u_0 = _mm256_slli_epi32(ep_0, SGRPROJ_RST_BITS);
672
49.2M
      const __m256i u_1 = _mm256_slli_epi32(ep_1, SGRPROJ_RST_BITS);
673
674
49.2M
      __m256i v_0 = _mm256_slli_epi32(u_0, SGRPROJ_PRJ_BITS);
675
49.2M
      __m256i v_1 = _mm256_slli_epi32(u_1, SGRPROJ_PRJ_BITS);
676
677
49.2M
      if (params->r[0] > 0) {
678
47.1M
        const __m256i f1_0 = _mm256_sub_epi32(yy_loadu_256(&flt0[k]), u_0);
679
47.1M
        v_0 = _mm256_add_epi32(v_0, _mm256_mullo_epi32(xq0, f1_0));
680
681
47.1M
        const __m256i f1_1 = _mm256_sub_epi32(yy_loadu_256(&flt0[k + 8]), u_1);
682
47.1M
        v_1 = _mm256_add_epi32(v_1, _mm256_mullo_epi32(xq0, f1_1));
683
47.1M
      }
684
685
49.2M
      if (params->r[1] > 0) {
686
46.9M
        const __m256i f2_0 = _mm256_sub_epi32(yy_loadu_256(&flt1[k]), u_0);
687
46.9M
        v_0 = _mm256_add_epi32(v_0, _mm256_mullo_epi32(xq1, f2_0));
688
689
46.9M
        const __m256i f2_1 = _mm256_sub_epi32(yy_loadu_256(&flt1[k + 8]), u_1);
690
46.9M
        v_1 = _mm256_add_epi32(v_1, _mm256_mullo_epi32(xq1, f2_1));
691
46.9M
      }
692
693
49.2M
      const __m256i rounding =
694
49.2M
          round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
695
49.2M
      const __m256i w_0 = _mm256_srai_epi32(
696
49.2M
          _mm256_add_epi32(v_0, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
697
49.2M
      const __m256i w_1 = _mm256_srai_epi32(
698
49.2M
          _mm256_add_epi32(v_1, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
699
700
49.2M
      if (highbd) {
701
        // Pack into 16 bits and clamp to [0, 2^bit_depth)
702
        // Note that packing into 16 bits messes up the order of the bits,
703
        // so we use a permute function to correct this
704
33.7M
        const __m256i tmp = _mm256_packus_epi32(w_0, w_1);
705
33.7M
        const __m256i tmp2 = _mm256_permute4x64_epi64(tmp, 0xd8);
706
33.7M
        const __m256i max = _mm256_set1_epi16((1 << bit_depth) - 1);
707
33.7M
        const __m256i res = _mm256_min_epi16(tmp2, max);
708
33.7M
        yy_storeu_256(CONVERT_TO_SHORTPTR(dst8 + m), res);
709
33.7M
      } else {
710
        // Pack into 8 bits and clamp to [0, 256)
711
        // Note that each pack messes up the order of the bits,
712
        // so we use a permute function to correct this
713
15.5M
        const __m256i tmp = _mm256_packs_epi32(w_0, w_1);
714
15.5M
        const __m256i tmp2 = _mm256_permute4x64_epi64(tmp, 0xd8);
715
15.5M
        const __m256i res =
716
15.5M
            _mm256_packus_epi16(tmp2, tmp2 /* "don't care" value */);
717
15.5M
        const __m128i res2 =
718
            _mm256_castsi256_si128(_mm256_permute4x64_epi64(res, 0xd8));
719
15.5M
        xx_storeu_128(dst8 + m, res2);
720
15.5M
      }
721
49.2M
    }
722
13.3M
  }
723
333k
  return 0;
724
333k
}