Coverage Report

Created: 2025-06-13 07:07

/src/aom/av1/common/x86/reconinter_avx2.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
14
#include "config/av1_rtcd.h"
15
16
#include "aom/aom_integer.h"
17
#include "aom_dsp/blend.h"
18
#include "aom_dsp/x86/synonyms.h"
19
#include "aom_dsp/x86/synonyms_avx2.h"
20
#include "av1/common/blockd.h"
21
22
static inline __m256i calc_mask_avx2(const __m256i mask_base, const __m256i s0,
23
0
                                     const __m256i s1) {
24
0
  const __m256i diff = _mm256_abs_epi16(_mm256_sub_epi16(s0, s1));
25
0
  return _mm256_abs_epi16(
26
0
      _mm256_add_epi16(mask_base, _mm256_srli_epi16(diff, 4)));
27
  // clamp(diff, 0, 64) can be skiped for diff is always in the range ( 38, 54)
28
0
}
29
void av1_build_compound_diffwtd_mask_avx2(uint8_t *mask,
30
                                          DIFFWTD_MASK_TYPE mask_type,
31
                                          const uint8_t *src0, int src0_stride,
32
                                          const uint8_t *src1, int src1_stride,
33
0
                                          int h, int w) {
34
0
  const int mb = (mask_type == DIFFWTD_38_INV) ? AOM_BLEND_A64_MAX_ALPHA : 0;
35
0
  const __m256i y_mask_base = _mm256_set1_epi16(38 - mb);
36
0
  int i = 0;
37
0
  if (4 == w) {
38
0
    do {
39
0
      const __m128i s0A = xx_loadl_32(src0);
40
0
      const __m128i s0B = xx_loadl_32(src0 + src0_stride);
41
0
      const __m128i s0C = xx_loadl_32(src0 + src0_stride * 2);
42
0
      const __m128i s0D = xx_loadl_32(src0 + src0_stride * 3);
43
0
      const __m128i s0AB = _mm_unpacklo_epi32(s0A, s0B);
44
0
      const __m128i s0CD = _mm_unpacklo_epi32(s0C, s0D);
45
0
      const __m128i s0ABCD = _mm_unpacklo_epi64(s0AB, s0CD);
46
0
      const __m256i s0ABCD_w = _mm256_cvtepu8_epi16(s0ABCD);
47
48
0
      const __m128i s1A = xx_loadl_32(src1);
49
0
      const __m128i s1B = xx_loadl_32(src1 + src1_stride);
50
0
      const __m128i s1C = xx_loadl_32(src1 + src1_stride * 2);
51
0
      const __m128i s1D = xx_loadl_32(src1 + src1_stride * 3);
52
0
      const __m128i s1AB = _mm_unpacklo_epi32(s1A, s1B);
53
0
      const __m128i s1CD = _mm_unpacklo_epi32(s1C, s1D);
54
0
      const __m128i s1ABCD = _mm_unpacklo_epi64(s1AB, s1CD);
55
0
      const __m256i s1ABCD_w = _mm256_cvtepu8_epi16(s1ABCD);
56
0
      const __m256i m16 = calc_mask_avx2(y_mask_base, s0ABCD_w, s1ABCD_w);
57
0
      const __m256i m8 = _mm256_packus_epi16(m16, _mm256_setzero_si256());
58
0
      const __m128i x_m8 =
59
0
          _mm256_castsi256_si128(_mm256_permute4x64_epi64(m8, 0xd8));
60
0
      xx_storeu_128(mask, x_m8);
61
0
      src0 += (src0_stride << 2);
62
0
      src1 += (src1_stride << 2);
63
0
      mask += 16;
64
0
      i += 4;
65
0
    } while (i < h);
66
0
  } else if (8 == w) {
67
0
    do {
68
0
      const __m128i s0A = xx_loadl_64(src0);
69
0
      const __m128i s0B = xx_loadl_64(src0 + src0_stride);
70
0
      const __m128i s0C = xx_loadl_64(src0 + src0_stride * 2);
71
0
      const __m128i s0D = xx_loadl_64(src0 + src0_stride * 3);
72
0
      const __m256i s0AC_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(s0A, s0C));
73
0
      const __m256i s0BD_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(s0B, s0D));
74
0
      const __m128i s1A = xx_loadl_64(src1);
75
0
      const __m128i s1B = xx_loadl_64(src1 + src1_stride);
76
0
      const __m128i s1C = xx_loadl_64(src1 + src1_stride * 2);
77
0
      const __m128i s1D = xx_loadl_64(src1 + src1_stride * 3);
78
0
      const __m256i s1AB_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(s1A, s1C));
79
0
      const __m256i s1CD_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(s1B, s1D));
80
0
      const __m256i m16AC = calc_mask_avx2(y_mask_base, s0AC_w, s1AB_w);
81
0
      const __m256i m16BD = calc_mask_avx2(y_mask_base, s0BD_w, s1CD_w);
82
0
      const __m256i m8 = _mm256_packus_epi16(m16AC, m16BD);
83
0
      yy_storeu_256(mask, m8);
84
0
      src0 += src0_stride << 2;
85
0
      src1 += src1_stride << 2;
86
0
      mask += 32;
87
0
      i += 4;
88
0
    } while (i < h);
89
0
  } else if (16 == w) {
90
0
    do {
91
0
      const __m128i s0A = xx_load_128(src0);
92
0
      const __m128i s0B = xx_load_128(src0 + src0_stride);
93
0
      const __m128i s1A = xx_load_128(src1);
94
0
      const __m128i s1B = xx_load_128(src1 + src1_stride);
95
0
      const __m256i s0AL = _mm256_cvtepu8_epi16(s0A);
96
0
      const __m256i s0BL = _mm256_cvtepu8_epi16(s0B);
97
0
      const __m256i s1AL = _mm256_cvtepu8_epi16(s1A);
98
0
      const __m256i s1BL = _mm256_cvtepu8_epi16(s1B);
99
100
0
      const __m256i m16AL = calc_mask_avx2(y_mask_base, s0AL, s1AL);
101
0
      const __m256i m16BL = calc_mask_avx2(y_mask_base, s0BL, s1BL);
102
103
0
      const __m256i m8 =
104
0
          _mm256_permute4x64_epi64(_mm256_packus_epi16(m16AL, m16BL), 0xd8);
105
0
      yy_storeu_256(mask, m8);
106
0
      src0 += src0_stride << 1;
107
0
      src1 += src1_stride << 1;
108
0
      mask += 32;
109
0
      i += 2;
110
0
    } while (i < h);
111
0
  } else {
112
0
    do {
113
0
      int j = 0;
114
0
      do {
115
0
        const __m256i s0 = yy_loadu_256(src0 + j);
116
0
        const __m256i s1 = yy_loadu_256(src1 + j);
117
0
        const __m256i s0L = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(s0));
118
0
        const __m256i s1L = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(s1));
119
0
        const __m256i s0H =
120
0
            _mm256_cvtepu8_epi16(_mm256_extracti128_si256(s0, 1));
121
0
        const __m256i s1H =
122
0
            _mm256_cvtepu8_epi16(_mm256_extracti128_si256(s1, 1));
123
0
        const __m256i m16L = calc_mask_avx2(y_mask_base, s0L, s1L);
124
0
        const __m256i m16H = calc_mask_avx2(y_mask_base, s0H, s1H);
125
0
        const __m256i m8 =
126
0
            _mm256_permute4x64_epi64(_mm256_packus_epi16(m16L, m16H), 0xd8);
127
0
        yy_storeu_256(mask + j, m8);
128
0
        j += 32;
129
0
      } while (j < w);
130
0
      src0 += src0_stride;
131
0
      src1 += src1_stride;
132
0
      mask += w;
133
0
      i += 1;
134
0
    } while (i < h);
135
0
  }
136
0
}
137
138
static inline __m256i calc_mask_d16_avx2(const __m256i *data_src0,
139
                                         const __m256i *data_src1,
140
                                         const __m256i *round_const,
141
                                         const __m256i *mask_base_16,
142
3.08M
                                         const __m256i *clip_diff, int round) {
143
3.08M
  const __m256i diffa = _mm256_subs_epu16(*data_src0, *data_src1);
144
3.08M
  const __m256i diffb = _mm256_subs_epu16(*data_src1, *data_src0);
145
3.08M
  const __m256i diff = _mm256_max_epu16(diffa, diffb);
146
3.08M
  const __m256i diff_round =
147
3.08M
      _mm256_srli_epi16(_mm256_adds_epu16(diff, *round_const), round);
148
3.08M
  const __m256i diff_factor = _mm256_srli_epi16(diff_round, DIFF_FACTOR_LOG2);
149
3.08M
  const __m256i diff_mask = _mm256_adds_epi16(diff_factor, *mask_base_16);
150
3.08M
  const __m256i diff_clamp = _mm256_min_epi16(diff_mask, *clip_diff);
151
3.08M
  return diff_clamp;
152
3.08M
}
153
154
static inline __m256i calc_mask_d16_inv_avx2(const __m256i *data_src0,
155
                                             const __m256i *data_src1,
156
                                             const __m256i *round_const,
157
                                             const __m256i *mask_base_16,
158
                                             const __m256i *clip_diff,
159
2.45M
                                             int round) {
160
2.45M
  const __m256i diffa = _mm256_subs_epu16(*data_src0, *data_src1);
161
2.45M
  const __m256i diffb = _mm256_subs_epu16(*data_src1, *data_src0);
162
2.45M
  const __m256i diff = _mm256_max_epu16(diffa, diffb);
163
2.45M
  const __m256i diff_round =
164
2.45M
      _mm256_srli_epi16(_mm256_adds_epu16(diff, *round_const), round);
165
2.45M
  const __m256i diff_factor = _mm256_srli_epi16(diff_round, DIFF_FACTOR_LOG2);
166
2.45M
  const __m256i diff_mask = _mm256_adds_epi16(diff_factor, *mask_base_16);
167
2.45M
  const __m256i diff_clamp = _mm256_min_epi16(diff_mask, *clip_diff);
168
2.45M
  const __m256i diff_const_16 = _mm256_sub_epi16(*clip_diff, diff_clamp);
169
2.45M
  return diff_const_16;
170
2.45M
}
171
172
static inline void build_compound_diffwtd_mask_d16_avx2(
173
    uint8_t *mask, const CONV_BUF_TYPE *src0, int src0_stride,
174
43.1k
    const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, int shift) {
175
43.1k
  const int mask_base = 38;
176
43.1k
  const __m256i _r = _mm256_set1_epi16((1 << shift) >> 1);
177
43.1k
  const __m256i y38 = _mm256_set1_epi16(mask_base);
178
43.1k
  const __m256i y64 = _mm256_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
179
43.1k
  int i = 0;
180
43.1k
  if (w == 4) {
181
0
    do {
182
0
      const __m128i s0A = xx_loadl_64(src0);
183
0
      const __m128i s0B = xx_loadl_64(src0 + src0_stride);
184
0
      const __m128i s0C = xx_loadl_64(src0 + src0_stride * 2);
185
0
      const __m128i s0D = xx_loadl_64(src0 + src0_stride * 3);
186
0
      const __m128i s1A = xx_loadl_64(src1);
187
0
      const __m128i s1B = xx_loadl_64(src1 + src1_stride);
188
0
      const __m128i s1C = xx_loadl_64(src1 + src1_stride * 2);
189
0
      const __m128i s1D = xx_loadl_64(src1 + src1_stride * 3);
190
0
      const __m256i s0 = yy_set_m128i(_mm_unpacklo_epi64(s0C, s0D),
191
0
                                      _mm_unpacklo_epi64(s0A, s0B));
192
0
      const __m256i s1 = yy_set_m128i(_mm_unpacklo_epi64(s1C, s1D),
193
0
                                      _mm_unpacklo_epi64(s1A, s1B));
194
0
      const __m256i m16 = calc_mask_d16_avx2(&s0, &s1, &_r, &y38, &y64, shift);
195
0
      const __m256i m8 = _mm256_packus_epi16(m16, _mm256_setzero_si256());
196
0
      xx_storeu_128(mask,
197
0
                    _mm256_castsi256_si128(_mm256_permute4x64_epi64(m8, 0xd8)));
198
0
      src0 += src0_stride << 2;
199
0
      src1 += src1_stride << 2;
200
0
      mask += 16;
201
0
      i += 4;
202
0
    } while (i < h);
203
43.1k
  } else if (w == 8) {
204
34.2k
    do {
205
34.2k
      const __m256i s0AB = yy_loadu2_128(src0 + src0_stride, src0);
206
34.2k
      const __m256i s0CD =
207
34.2k
          yy_loadu2_128(src0 + src0_stride * 3, src0 + src0_stride * 2);
208
34.2k
      const __m256i s1AB = yy_loadu2_128(src1 + src1_stride, src1);
209
34.2k
      const __m256i s1CD =
210
34.2k
          yy_loadu2_128(src1 + src1_stride * 3, src1 + src1_stride * 2);
211
34.2k
      const __m256i m16AB =
212
34.2k
          calc_mask_d16_avx2(&s0AB, &s1AB, &_r, &y38, &y64, shift);
213
34.2k
      const __m256i m16CD =
214
34.2k
          calc_mask_d16_avx2(&s0CD, &s1CD, &_r, &y38, &y64, shift);
215
34.2k
      const __m256i m8 = _mm256_packus_epi16(m16AB, m16CD);
216
34.2k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8, 0xd8));
217
34.2k
      src0 += src0_stride << 2;
218
34.2k
      src1 += src1_stride << 2;
219
34.2k
      mask += 32;
220
34.2k
      i += 4;
221
34.2k
    } while (i < h);
222
34.5k
  } else if (w == 16) {
223
134k
    do {
224
134k
      const __m256i s0A = yy_loadu_256(src0);
225
134k
      const __m256i s0B = yy_loadu_256(src0 + src0_stride);
226
134k
      const __m256i s1A = yy_loadu_256(src1);
227
134k
      const __m256i s1B = yy_loadu_256(src1 + src1_stride);
228
134k
      const __m256i m16A =
229
134k
          calc_mask_d16_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
230
134k
      const __m256i m16B =
231
134k
          calc_mask_d16_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
232
134k
      const __m256i m8 = _mm256_packus_epi16(m16A, m16B);
233
134k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8, 0xd8));
234
134k
      src0 += src0_stride << 1;
235
134k
      src1 += src1_stride << 1;
236
134k
      mask += 32;
237
134k
      i += 2;
238
134k
    } while (i < h);
239
19.5k
  } else if (w == 32) {
240
309k
    do {
241
309k
      const __m256i s0A = yy_loadu_256(src0);
242
309k
      const __m256i s0B = yy_loadu_256(src0 + 16);
243
309k
      const __m256i s1A = yy_loadu_256(src1);
244
309k
      const __m256i s1B = yy_loadu_256(src1 + 16);
245
309k
      const __m256i m16A =
246
309k
          calc_mask_d16_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
247
309k
      const __m256i m16B =
248
309k
          calc_mask_d16_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
249
309k
      const __m256i m8 = _mm256_packus_epi16(m16A, m16B);
250
309k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8, 0xd8));
251
309k
      src0 += src0_stride;
252
309k
      src1 += src1_stride;
253
309k
      mask += 32;
254
309k
      i += 1;
255
309k
    } while (i < h);
256
13.1k
  } else if (w == 64) {
257
215k
    do {
258
215k
      const __m256i s0A = yy_loadu_256(src0);
259
215k
      const __m256i s0B = yy_loadu_256(src0 + 16);
260
215k
      const __m256i s0C = yy_loadu_256(src0 + 32);
261
215k
      const __m256i s0D = yy_loadu_256(src0 + 48);
262
215k
      const __m256i s1A = yy_loadu_256(src1);
263
215k
      const __m256i s1B = yy_loadu_256(src1 + 16);
264
215k
      const __m256i s1C = yy_loadu_256(src1 + 32);
265
215k
      const __m256i s1D = yy_loadu_256(src1 + 48);
266
215k
      const __m256i m16A =
267
215k
          calc_mask_d16_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
268
215k
      const __m256i m16B =
269
215k
          calc_mask_d16_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
270
215k
      const __m256i m16C =
271
215k
          calc_mask_d16_avx2(&s0C, &s1C, &_r, &y38, &y64, shift);
272
215k
      const __m256i m16D =
273
215k
          calc_mask_d16_avx2(&s0D, &s1D, &_r, &y38, &y64, shift);
274
215k
      const __m256i m8AB = _mm256_packus_epi16(m16A, m16B);
275
215k
      const __m256i m8CD = _mm256_packus_epi16(m16C, m16D);
276
215k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8AB, 0xd8));
277
215k
      yy_storeu_256(mask + 32, _mm256_permute4x64_epi64(m8CD, 0xd8));
278
215k
      src0 += src0_stride;
279
215k
      src1 += src1_stride;
280
215k
      mask += 64;
281
215k
      i += 1;
282
215k
    } while (i < h);
283
4.83k
  } else {
284
159k
    do {
285
159k
      const __m256i s0A = yy_loadu_256(src0);
286
159k
      const __m256i s0B = yy_loadu_256(src0 + 16);
287
159k
      const __m256i s0C = yy_loadu_256(src0 + 32);
288
159k
      const __m256i s0D = yy_loadu_256(src0 + 48);
289
159k
      const __m256i s0E = yy_loadu_256(src0 + 64);
290
159k
      const __m256i s0F = yy_loadu_256(src0 + 80);
291
159k
      const __m256i s0G = yy_loadu_256(src0 + 96);
292
159k
      const __m256i s0H = yy_loadu_256(src0 + 112);
293
159k
      const __m256i s1A = yy_loadu_256(src1);
294
159k
      const __m256i s1B = yy_loadu_256(src1 + 16);
295
159k
      const __m256i s1C = yy_loadu_256(src1 + 32);
296
159k
      const __m256i s1D = yy_loadu_256(src1 + 48);
297
159k
      const __m256i s1E = yy_loadu_256(src1 + 64);
298
159k
      const __m256i s1F = yy_loadu_256(src1 + 80);
299
159k
      const __m256i s1G = yy_loadu_256(src1 + 96);
300
159k
      const __m256i s1H = yy_loadu_256(src1 + 112);
301
159k
      const __m256i m16A =
302
159k
          calc_mask_d16_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
303
159k
      const __m256i m16B =
304
159k
          calc_mask_d16_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
305
159k
      const __m256i m16C =
306
159k
          calc_mask_d16_avx2(&s0C, &s1C, &_r, &y38, &y64, shift);
307
159k
      const __m256i m16D =
308
159k
          calc_mask_d16_avx2(&s0D, &s1D, &_r, &y38, &y64, shift);
309
159k
      const __m256i m16E =
310
159k
          calc_mask_d16_avx2(&s0E, &s1E, &_r, &y38, &y64, shift);
311
159k
      const __m256i m16F =
312
159k
          calc_mask_d16_avx2(&s0F, &s1F, &_r, &y38, &y64, shift);
313
159k
      const __m256i m16G =
314
159k
          calc_mask_d16_avx2(&s0G, &s1G, &_r, &y38, &y64, shift);
315
159k
      const __m256i m16H =
316
159k
          calc_mask_d16_avx2(&s0H, &s1H, &_r, &y38, &y64, shift);
317
159k
      const __m256i m8AB = _mm256_packus_epi16(m16A, m16B);
318
159k
      const __m256i m8CD = _mm256_packus_epi16(m16C, m16D);
319
159k
      const __m256i m8EF = _mm256_packus_epi16(m16E, m16F);
320
159k
      const __m256i m8GH = _mm256_packus_epi16(m16G, m16H);
321
159k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8AB, 0xd8));
322
159k
      yy_storeu_256(mask + 32, _mm256_permute4x64_epi64(m8CD, 0xd8));
323
159k
      yy_storeu_256(mask + 64, _mm256_permute4x64_epi64(m8EF, 0xd8));
324
159k
      yy_storeu_256(mask + 96, _mm256_permute4x64_epi64(m8GH, 0xd8));
325
159k
      src0 += src0_stride;
326
159k
      src1 += src1_stride;
327
159k
      mask += 128;
328
159k
      i += 1;
329
159k
    } while (i < h);
330
1.58k
  }
331
43.1k
}
332
333
static inline void build_compound_diffwtd_mask_d16_inv_avx2(
334
    uint8_t *mask, const CONV_BUF_TYPE *src0, int src0_stride,
335
39.2k
    const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, int shift) {
336
39.2k
  const int mask_base = 38;
337
39.2k
  const __m256i _r = _mm256_set1_epi16((1 << shift) >> 1);
338
39.2k
  const __m256i y38 = _mm256_set1_epi16(mask_base);
339
39.2k
  const __m256i y64 = _mm256_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
340
39.2k
  int i = 0;
341
39.2k
  if (w == 4) {
342
0
    do {
343
0
      const __m128i s0A = xx_loadl_64(src0);
344
0
      const __m128i s0B = xx_loadl_64(src0 + src0_stride);
345
0
      const __m128i s0C = xx_loadl_64(src0 + src0_stride * 2);
346
0
      const __m128i s0D = xx_loadl_64(src0 + src0_stride * 3);
347
0
      const __m128i s1A = xx_loadl_64(src1);
348
0
      const __m128i s1B = xx_loadl_64(src1 + src1_stride);
349
0
      const __m128i s1C = xx_loadl_64(src1 + src1_stride * 2);
350
0
      const __m128i s1D = xx_loadl_64(src1 + src1_stride * 3);
351
0
      const __m256i s0 = yy_set_m128i(_mm_unpacklo_epi64(s0C, s0D),
352
0
                                      _mm_unpacklo_epi64(s0A, s0B));
353
0
      const __m256i s1 = yy_set_m128i(_mm_unpacklo_epi64(s1C, s1D),
354
0
                                      _mm_unpacklo_epi64(s1A, s1B));
355
0
      const __m256i m16 =
356
0
          calc_mask_d16_inv_avx2(&s0, &s1, &_r, &y38, &y64, shift);
357
0
      const __m256i m8 = _mm256_packus_epi16(m16, _mm256_setzero_si256());
358
0
      xx_storeu_128(mask,
359
0
                    _mm256_castsi256_si128(_mm256_permute4x64_epi64(m8, 0xd8)));
360
0
      src0 += src0_stride << 2;
361
0
      src1 += src1_stride << 2;
362
0
      mask += 16;
363
0
      i += 4;
364
0
    } while (i < h);
365
39.2k
  } else if (w == 8) {
366
38.0k
    do {
367
38.0k
      const __m256i s0AB = yy_loadu2_128(src0 + src0_stride, src0);
368
38.0k
      const __m256i s0CD =
369
38.0k
          yy_loadu2_128(src0 + src0_stride * 3, src0 + src0_stride * 2);
370
38.0k
      const __m256i s1AB = yy_loadu2_128(src1 + src1_stride, src1);
371
38.0k
      const __m256i s1CD =
372
38.0k
          yy_loadu2_128(src1 + src1_stride * 3, src1 + src1_stride * 2);
373
38.0k
      const __m256i m16AB =
374
38.0k
          calc_mask_d16_inv_avx2(&s0AB, &s1AB, &_r, &y38, &y64, shift);
375
38.0k
      const __m256i m16CD =
376
38.0k
          calc_mask_d16_inv_avx2(&s0CD, &s1CD, &_r, &y38, &y64, shift);
377
38.0k
      const __m256i m8 = _mm256_packus_epi16(m16AB, m16CD);
378
38.0k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8, 0xd8));
379
38.0k
      src0 += src0_stride << 2;
380
38.0k
      src1 += src1_stride << 2;
381
38.0k
      mask += 32;
382
38.0k
      i += 4;
383
38.0k
    } while (i < h);
384
30.2k
  } else if (w == 16) {
385
260k
    do {
386
260k
      const __m256i s0A = yy_loadu_256(src0);
387
260k
      const __m256i s0B = yy_loadu_256(src0 + src0_stride);
388
260k
      const __m256i s1A = yy_loadu_256(src1);
389
260k
      const __m256i s1B = yy_loadu_256(src1 + src1_stride);
390
260k
      const __m256i m16A =
391
260k
          calc_mask_d16_inv_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
392
260k
      const __m256i m16B =
393
260k
          calc_mask_d16_inv_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
394
260k
      const __m256i m8 = _mm256_packus_epi16(m16A, m16B);
395
260k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8, 0xd8));
396
260k
      src0 += src0_stride << 1;
397
260k
      src1 += src1_stride << 1;
398
260k
      mask += 32;
399
260k
      i += 2;
400
260k
    } while (i < h);
401
17.2k
  } else if (w == 32) {
402
182k
    do {
403
182k
      const __m256i s0A = yy_loadu_256(src0);
404
182k
      const __m256i s0B = yy_loadu_256(src0 + 16);
405
182k
      const __m256i s1A = yy_loadu_256(src1);
406
182k
      const __m256i s1B = yy_loadu_256(src1 + 16);
407
182k
      const __m256i m16A =
408
182k
          calc_mask_d16_inv_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
409
182k
      const __m256i m16B =
410
182k
          calc_mask_d16_inv_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
411
182k
      const __m256i m8 = _mm256_packus_epi16(m16A, m16B);
412
182k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8, 0xd8));
413
182k
      src0 += src0_stride;
414
182k
      src1 += src1_stride;
415
182k
      mask += 32;
416
182k
      i += 1;
417
182k
    } while (i < h);
418
8.42k
  } else if (w == 64) {
419
209k
    do {
420
209k
      const __m256i s0A = yy_loadu_256(src0);
421
209k
      const __m256i s0B = yy_loadu_256(src0 + 16);
422
209k
      const __m256i s0C = yy_loadu_256(src0 + 32);
423
209k
      const __m256i s0D = yy_loadu_256(src0 + 48);
424
209k
      const __m256i s1A = yy_loadu_256(src1);
425
209k
      const __m256i s1B = yy_loadu_256(src1 + 16);
426
209k
      const __m256i s1C = yy_loadu_256(src1 + 32);
427
209k
      const __m256i s1D = yy_loadu_256(src1 + 48);
428
209k
      const __m256i m16A =
429
209k
          calc_mask_d16_inv_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
430
209k
      const __m256i m16B =
431
209k
          calc_mask_d16_inv_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
432
209k
      const __m256i m16C =
433
209k
          calc_mask_d16_inv_avx2(&s0C, &s1C, &_r, &y38, &y64, shift);
434
209k
      const __m256i m16D =
435
209k
          calc_mask_d16_inv_avx2(&s0D, &s1D, &_r, &y38, &y64, shift);
436
209k
      const __m256i m8AB = _mm256_packus_epi16(m16A, m16B);
437
209k
      const __m256i m8CD = _mm256_packus_epi16(m16C, m16D);
438
209k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8AB, 0xd8));
439
209k
      yy_storeu_256(mask + 32, _mm256_permute4x64_epi64(m8CD, 0xd8));
440
209k
      src0 += src0_stride;
441
209k
      src1 += src1_stride;
442
209k
      mask += 64;
443
209k
      i += 1;
444
209k
    } while (i < h);
445
3.80k
  } else {
446
82.6k
    do {
447
82.6k
      const __m256i s0A = yy_loadu_256(src0);
448
82.6k
      const __m256i s0B = yy_loadu_256(src0 + 16);
449
82.6k
      const __m256i s0C = yy_loadu_256(src0 + 32);
450
82.6k
      const __m256i s0D = yy_loadu_256(src0 + 48);
451
82.6k
      const __m256i s0E = yy_loadu_256(src0 + 64);
452
82.6k
      const __m256i s0F = yy_loadu_256(src0 + 80);
453
82.6k
      const __m256i s0G = yy_loadu_256(src0 + 96);
454
82.6k
      const __m256i s0H = yy_loadu_256(src0 + 112);
455
82.6k
      const __m256i s1A = yy_loadu_256(src1);
456
82.6k
      const __m256i s1B = yy_loadu_256(src1 + 16);
457
82.6k
      const __m256i s1C = yy_loadu_256(src1 + 32);
458
82.6k
      const __m256i s1D = yy_loadu_256(src1 + 48);
459
82.6k
      const __m256i s1E = yy_loadu_256(src1 + 64);
460
82.6k
      const __m256i s1F = yy_loadu_256(src1 + 80);
461
82.6k
      const __m256i s1G = yy_loadu_256(src1 + 96);
462
82.6k
      const __m256i s1H = yy_loadu_256(src1 + 112);
463
82.6k
      const __m256i m16A =
464
82.6k
          calc_mask_d16_inv_avx2(&s0A, &s1A, &_r, &y38, &y64, shift);
465
82.6k
      const __m256i m16B =
466
82.6k
          calc_mask_d16_inv_avx2(&s0B, &s1B, &_r, &y38, &y64, shift);
467
82.6k
      const __m256i m16C =
468
82.6k
          calc_mask_d16_inv_avx2(&s0C, &s1C, &_r, &y38, &y64, shift);
469
82.6k
      const __m256i m16D =
470
82.6k
          calc_mask_d16_inv_avx2(&s0D, &s1D, &_r, &y38, &y64, shift);
471
82.6k
      const __m256i m16E =
472
82.6k
          calc_mask_d16_inv_avx2(&s0E, &s1E, &_r, &y38, &y64, shift);
473
82.6k
      const __m256i m16F =
474
82.6k
          calc_mask_d16_inv_avx2(&s0F, &s1F, &_r, &y38, &y64, shift);
475
82.6k
      const __m256i m16G =
476
82.6k
          calc_mask_d16_inv_avx2(&s0G, &s1G, &_r, &y38, &y64, shift);
477
82.6k
      const __m256i m16H =
478
82.6k
          calc_mask_d16_inv_avx2(&s0H, &s1H, &_r, &y38, &y64, shift);
479
82.6k
      const __m256i m8AB = _mm256_packus_epi16(m16A, m16B);
480
82.6k
      const __m256i m8CD = _mm256_packus_epi16(m16C, m16D);
481
82.6k
      const __m256i m8EF = _mm256_packus_epi16(m16E, m16F);
482
82.6k
      const __m256i m8GH = _mm256_packus_epi16(m16G, m16H);
483
82.6k
      yy_storeu_256(mask, _mm256_permute4x64_epi64(m8AB, 0xd8));
484
82.6k
      yy_storeu_256(mask + 32, _mm256_permute4x64_epi64(m8CD, 0xd8));
485
82.6k
      yy_storeu_256(mask + 64, _mm256_permute4x64_epi64(m8EF, 0xd8));
486
82.6k
      yy_storeu_256(mask + 96, _mm256_permute4x64_epi64(m8GH, 0xd8));
487
82.6k
      src0 += src0_stride;
488
82.6k
      src1 += src1_stride;
489
82.6k
      mask += 128;
490
82.6k
      i += 1;
491
82.6k
    } while (i < h);
492
763
  }
493
39.2k
}
494
495
void av1_build_compound_diffwtd_mask_d16_avx2(
496
    uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const CONV_BUF_TYPE *src0,
497
    int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w,
498
82.3k
    ConvolveParams *conv_params, int bd) {
499
82.3k
  const int shift =
500
82.3k
      2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1 + (bd - 8);
501
  // When rounding constant is added, there is a possibility of overflow.
502
  // However that much precision is not required. Code should very well work for
503
  // other values of DIFF_FACTOR_LOG2 and AOM_BLEND_A64_MAX_ALPHA as well. But
504
  // there is a possibility of corner case bugs.
505
82.3k
  assert(DIFF_FACTOR_LOG2 == 4);
506
82.3k
  assert(AOM_BLEND_A64_MAX_ALPHA == 64);
507
508
82.3k
  if (mask_type == DIFFWTD_38) {
509
43.1k
    build_compound_diffwtd_mask_d16_avx2(mask, src0, src0_stride, src1,
510
43.1k
                                         src1_stride, h, w, shift);
511
43.1k
  } else {
512
39.2k
    build_compound_diffwtd_mask_d16_inv_avx2(mask, src0, src0_stride, src1,
513
39.2k
                                             src1_stride, h, w, shift);
514
39.2k
  }
515
82.3k
}
516
517
#if CONFIG_AV1_HIGHBITDEPTH
518
519
void av1_build_compound_diffwtd_mask_highbd_avx2(
520
    uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const uint8_t *src0,
521
    int src0_stride, const uint8_t *src1, int src1_stride, int h, int w,
522
0
    int bd) {
523
0
  if (w < 16) {
524
0
    av1_build_compound_diffwtd_mask_highbd_ssse3(
525
0
        mask, mask_type, src0, src0_stride, src1, src1_stride, h, w, bd);
526
0
  } else {
527
0
    assert(mask_type == DIFFWTD_38 || mask_type == DIFFWTD_38_INV);
528
0
    assert(bd >= 8);
529
0
    assert((w % 16) == 0);
530
0
    const __m256i y0 = _mm256_setzero_si256();
531
0
    const __m256i yAOM_BLEND_A64_MAX_ALPHA =
532
0
        _mm256_set1_epi16(AOM_BLEND_A64_MAX_ALPHA);
533
0
    const int mask_base = 38;
534
0
    const __m256i ymask_base = _mm256_set1_epi16(mask_base);
535
0
    const uint16_t *ssrc0 = CONVERT_TO_SHORTPTR(src0);
536
0
    const uint16_t *ssrc1 = CONVERT_TO_SHORTPTR(src1);
537
0
    if (bd == 8) {
538
0
      if (mask_type == DIFFWTD_38_INV) {
539
0
        for (int i = 0; i < h; ++i) {
540
0
          for (int j = 0; j < w; j += 16) {
541
0
            __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]);
542
0
            __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]);
543
0
            __m256i diff = _mm256_srai_epi16(
544
0
                _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), DIFF_FACTOR_LOG2);
545
0
            __m256i m = _mm256_min_epi16(
546
0
                _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)),
547
0
                yAOM_BLEND_A64_MAX_ALPHA);
548
0
            m = _mm256_sub_epi16(yAOM_BLEND_A64_MAX_ALPHA, m);
549
0
            m = _mm256_packus_epi16(m, m);
550
0
            m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0));
551
0
            __m128i m0 = _mm256_castsi256_si128(m);
552
0
            _mm_storeu_si128((__m128i *)&mask[j], m0);
553
0
          }
554
0
          ssrc0 += src0_stride;
555
0
          ssrc1 += src1_stride;
556
0
          mask += w;
557
0
        }
558
0
      } else {
559
0
        for (int i = 0; i < h; ++i) {
560
0
          for (int j = 0; j < w; j += 16) {
561
0
            __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]);
562
0
            __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]);
563
0
            __m256i diff = _mm256_srai_epi16(
564
0
                _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), DIFF_FACTOR_LOG2);
565
0
            __m256i m = _mm256_min_epi16(
566
0
                _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)),
567
0
                yAOM_BLEND_A64_MAX_ALPHA);
568
0
            m = _mm256_packus_epi16(m, m);
569
0
            m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0));
570
0
            __m128i m0 = _mm256_castsi256_si128(m);
571
0
            _mm_storeu_si128((__m128i *)&mask[j], m0);
572
0
          }
573
0
          ssrc0 += src0_stride;
574
0
          ssrc1 += src1_stride;
575
0
          mask += w;
576
0
        }
577
0
      }
578
0
    } else {
579
0
      const __m128i xshift = _mm_set1_epi64x(bd - 8 + DIFF_FACTOR_LOG2);
580
0
      if (mask_type == DIFFWTD_38_INV) {
581
0
        for (int i = 0; i < h; ++i) {
582
0
          for (int j = 0; j < w; j += 16) {
583
0
            __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]);
584
0
            __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]);
585
0
            __m256i diff = _mm256_sra_epi16(
586
0
                _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), xshift);
587
0
            __m256i m = _mm256_min_epi16(
588
0
                _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)),
589
0
                yAOM_BLEND_A64_MAX_ALPHA);
590
0
            m = _mm256_sub_epi16(yAOM_BLEND_A64_MAX_ALPHA, m);
591
0
            m = _mm256_packus_epi16(m, m);
592
0
            m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0));
593
0
            __m128i m0 = _mm256_castsi256_si128(m);
594
0
            _mm_storeu_si128((__m128i *)&mask[j], m0);
595
0
          }
596
0
          ssrc0 += src0_stride;
597
0
          ssrc1 += src1_stride;
598
0
          mask += w;
599
0
        }
600
0
      } else {
601
0
        for (int i = 0; i < h; ++i) {
602
0
          for (int j = 0; j < w; j += 16) {
603
0
            __m256i s0 = _mm256_loadu_si256((const __m256i *)&ssrc0[j]);
604
0
            __m256i s1 = _mm256_loadu_si256((const __m256i *)&ssrc1[j]);
605
0
            __m256i diff = _mm256_sra_epi16(
606
0
                _mm256_abs_epi16(_mm256_sub_epi16(s0, s1)), xshift);
607
0
            __m256i m = _mm256_min_epi16(
608
0
                _mm256_max_epi16(y0, _mm256_add_epi16(diff, ymask_base)),
609
0
                yAOM_BLEND_A64_MAX_ALPHA);
610
0
            m = _mm256_packus_epi16(m, m);
611
0
            m = _mm256_permute4x64_epi64(m, _MM_SHUFFLE(0, 0, 2, 0));
612
0
            __m128i m0 = _mm256_castsi256_si128(m);
613
0
            _mm_storeu_si128((__m128i *)&mask[j], m0);
614
0
          }
615
0
          ssrc0 += src0_stride;
616
0
          ssrc1 += src1_stride;
617
0
          mask += w;
618
0
        }
619
0
      }
620
0
    }
621
0
  }
622
0
}
623
624
#endif  // CONFIG_AV1_HIGHBITDEPTH