Coverage Report

Created: 2025-11-29 06:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libvpx/vpx_dsp/x86/variance_avx2.c
Line
Count
Source
1
/*
2
 *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
 *  that can be found in the LICENSE file in the root of the source
6
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
9
 */
10
11
#include <immintrin.h>  // AVX2
12
13
#include "./vpx_dsp_rtcd.h"
14
15
/* clang-format off */
16
DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = {
17
  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,
18
  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,
19
  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,
20
  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,
21
  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,
22
  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,
23
  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,
24
  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,
25
  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
26
  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
27
  6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10,
28
  6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10,
29
  4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12,
30
  4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12,
31
  2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14,
32
  2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14,
33
};
34
35
DECLARE_ALIGNED(32, static const int8_t, adjacent_sub_avx2[32]) = {
36
  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,
37
  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1
38
};
39
/* clang-format on */
40
41
static INLINE void variance_kernel_avx2(const __m256i src, const __m256i ref,
42
                                        __m256i *const sse,
43
5.72M
                                        __m256i *const sum) {
44
5.72M
  const __m256i adj_sub = _mm256_load_si256((__m256i const *)adjacent_sub_avx2);
45
46
  // unpack into pairs of source and reference values
47
5.72M
  const __m256i src_ref0 = _mm256_unpacklo_epi8(src, ref);
48
5.72M
  const __m256i src_ref1 = _mm256_unpackhi_epi8(src, ref);
49
50
  // subtract adjacent elements using src*1 + ref*-1
51
5.72M
  const __m256i diff0 = _mm256_maddubs_epi16(src_ref0, adj_sub);
52
5.72M
  const __m256i diff1 = _mm256_maddubs_epi16(src_ref1, adj_sub);
53
5.72M
  const __m256i madd0 = _mm256_madd_epi16(diff0, diff0);
54
5.72M
  const __m256i madd1 = _mm256_madd_epi16(diff1, diff1);
55
56
  // add to the running totals
57
5.72M
  *sum = _mm256_add_epi16(*sum, _mm256_add_epi16(diff0, diff1));
58
5.72M
  *sse = _mm256_add_epi32(*sse, _mm256_add_epi32(madd0, madd1));
59
5.72M
}
60
61
static INLINE void variance_final_from_32bit_sum_avx2(__m256i vsse,
62
                                                      __m128i vsum,
63
                                                      unsigned int *const sse,
64
1.57M
                                                      int *const sum) {
65
  // extract the low lane and add it to the high lane
66
1.57M
  const __m128i sse_reg_128 = _mm_add_epi32(_mm256_castsi256_si128(vsse),
67
1.57M
                                            _mm256_extractf128_si256(vsse, 1));
68
69
  // unpack sse and sum registers and add
70
1.57M
  const __m128i sse_sum_lo = _mm_unpacklo_epi32(sse_reg_128, vsum);
71
1.57M
  const __m128i sse_sum_hi = _mm_unpackhi_epi32(sse_reg_128, vsum);
72
1.57M
  const __m128i sse_sum = _mm_add_epi32(sse_sum_lo, sse_sum_hi);
73
74
  // perform the final summation and extract the results
75
1.57M
  const __m128i res = _mm_add_epi32(sse_sum, _mm_srli_si128(sse_sum, 8));
76
1.57M
  *((int *)sse) = _mm_cvtsi128_si32(res);
77
1.57M
  *((int *)sum) = _mm_extract_epi32(res, 1);
78
1.57M
}
79
80
static INLINE void variance_final_from_16bit_sum_avx2(__m256i vsse,
81
                                                      __m256i vsum,
82
                                                      unsigned int *const sse,
83
1.57M
                                                      int *const sum) {
84
  // extract the low lane and add it to the high lane
85
1.57M
  const __m128i sum_reg_128 = _mm_add_epi16(_mm256_castsi256_si128(vsum),
86
1.57M
                                            _mm256_extractf128_si256(vsum, 1));
87
1.57M
  const __m128i sum_reg_64 =
88
1.57M
      _mm_add_epi16(sum_reg_128, _mm_srli_si128(sum_reg_128, 8));
89
1.57M
  const __m128i sum_int32 = _mm_cvtepi16_epi32(sum_reg_64);
90
91
1.57M
  variance_final_from_32bit_sum_avx2(vsse, sum_int32, sse, sum);
92
1.57M
}
93
94
0
static INLINE __m256i sum_to_32bit_avx2(const __m256i sum) {
95
0
  const __m256i sum_lo = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(sum));
96
0
  const __m256i sum_hi =
97
0
      _mm256_cvtepi16_epi32(_mm256_extractf128_si256(sum, 1));
98
0
  return _mm256_add_epi32(sum_lo, sum_hi);
99
0
}
100
101
static INLINE void variance8_kernel_avx2(
102
    const uint8_t *const src, const int src_stride, const uint8_t *const ref,
103
3.42M
    const int ref_stride, __m256i *const sse, __m256i *const sum) {
104
3.42M
  __m128i src0, src1, ref0, ref1;
105
3.42M
  __m256i ss, rr, diff;
106
107
  // 0 0 0.... 0 s07 s06 s05 s04 s03 s02 s01 s00
108
3.42M
  src0 = _mm_loadl_epi64((const __m128i *)(src + 0 * src_stride));
109
110
  // 0 0 0.... 0 s17 s16 s15 s14 s13 s12 s11 s10
111
3.42M
  src1 = _mm_loadl_epi64((const __m128i *)(src + 1 * src_stride));
112
113
  // s17 s16...s11 s10 s07 s06...s01 s00 (8bit)
114
3.42M
  src0 = _mm_unpacklo_epi64(src0, src1);
115
116
  // s17 s16...s11 s10 s07 s06...s01 s00 (16 bit)
117
3.42M
  ss = _mm256_cvtepu8_epi16(src0);
118
119
  // 0 0 0.... 0 r07 r06 r05 r04 r03 r02 r01 r00
120
3.42M
  ref0 = _mm_loadl_epi64((const __m128i *)(ref + 0 * ref_stride));
121
122
  // 0 0 0.... 0 r17 r16 0 r15 0 r14 0 r13 0 r12 0 r11 0 r10
123
3.42M
  ref1 = _mm_loadl_epi64((const __m128i *)(ref + 1 * ref_stride));
124
125
  // r17 r16...r11 r10 r07 r06...r01 r00 (8 bit)
126
3.42M
  ref0 = _mm_unpacklo_epi64(ref0, ref1);
127
128
  // r17 r16...r11 r10 r07 r06...r01 r00 (16 bit)
129
3.42M
  rr = _mm256_cvtepu8_epi16(ref0);
130
131
3.42M
  diff = _mm256_sub_epi16(ss, rr);
132
3.42M
  *sse = _mm256_add_epi32(*sse, _mm256_madd_epi16(diff, diff));
133
3.42M
  *sum = _mm256_add_epi16(*sum, diff);
134
3.42M
}
135
136
static INLINE void variance16_kernel_avx2(
137
    const uint8_t *const src, const int src_stride, const uint8_t *const ref,
138
5.72M
    const int ref_stride, __m256i *const sse, __m256i *const sum) {
139
5.72M
  const __m128i s0 = _mm_loadu_si128((__m128i const *)(src + 0 * src_stride));
140
5.72M
  const __m128i s1 = _mm_loadu_si128((__m128i const *)(src + 1 * src_stride));
141
5.72M
  const __m128i r0 = _mm_loadu_si128((__m128i const *)(ref + 0 * ref_stride));
142
5.72M
  const __m128i r1 = _mm_loadu_si128((__m128i const *)(ref + 1 * ref_stride));
143
5.72M
  const __m256i s = _mm256_inserti128_si256(_mm256_castsi128_si256(s0), s1, 1);
144
5.72M
  const __m256i r = _mm256_inserti128_si256(_mm256_castsi128_si256(r0), r1, 1);
145
5.72M
  variance_kernel_avx2(s, r, sse, sum);
146
5.72M
}
147
148
static INLINE void variance32_kernel_avx2(const uint8_t *const src,
149
                                          const uint8_t *const ref,
150
                                          __m256i *const sse,
151
0
                                          __m256i *const sum) {
152
0
  const __m256i s = _mm256_loadu_si256((__m256i const *)(src));
153
0
  const __m256i r = _mm256_loadu_si256((__m256i const *)(ref));
154
0
  variance_kernel_avx2(s, r, sse, sum);
155
0
}
156
157
static INLINE void variance8_avx2(const uint8_t *src, const int src_stride,
158
                                  const uint8_t *ref, const int ref_stride,
159
                                  const int h, __m256i *const vsse,
160
856k
                                  __m256i *const vsum) {
161
856k
  int i;
162
856k
  *vsum = _mm256_setzero_si256();
163
856k
  *vsse = _mm256_setzero_si256();
164
165
4.28M
  for (i = 0; i < h; i += 2) {
166
3.42M
    variance8_kernel_avx2(src, src_stride, ref, ref_stride, vsse, vsum);
167
3.42M
    src += 2 * src_stride;
168
3.42M
    ref += 2 * ref_stride;
169
3.42M
  }
170
856k
}
171
172
static INLINE void variance16_avx2(const uint8_t *src, const int src_stride,
173
                                   const uint8_t *ref, const int ref_stride,
174
                                   const int h, __m256i *const vsse,
175
715k
                                   __m256i *const vsum) {
176
715k
  int i;
177
715k
  *vsum = _mm256_setzero_si256();
178
715k
  *vsse = _mm256_setzero_si256();
179
180
6.44M
  for (i = 0; i < h; i += 2) {
181
5.72M
    variance16_kernel_avx2(src, src_stride, ref, ref_stride, vsse, vsum);
182
5.72M
    src += 2 * src_stride;
183
5.72M
    ref += 2 * ref_stride;
184
5.72M
  }
185
715k
}
186
187
static INLINE void variance32_avx2(const uint8_t *src, const int src_stride,
188
                                   const uint8_t *ref, const int ref_stride,
189
                                   const int h, __m256i *const vsse,
190
0
                                   __m256i *const vsum) {
191
0
  int i;
192
0
  *vsum = _mm256_setzero_si256();
193
0
  *vsse = _mm256_setzero_si256();
194
195
0
  for (i = 0; i < h; i++) {
196
0
    variance32_kernel_avx2(src, ref, vsse, vsum);
197
0
    src += src_stride;
198
0
    ref += ref_stride;
199
0
  }
200
0
}
201
202
static INLINE void variance64_avx2(const uint8_t *src, const int src_stride,
203
                                   const uint8_t *ref, const int ref_stride,
204
                                   const int h, __m256i *const vsse,
205
0
                                   __m256i *const vsum) {
206
0
  int i;
207
0
  *vsum = _mm256_setzero_si256();
208
209
0
  for (i = 0; i < h; i++) {
210
0
    variance32_kernel_avx2(src + 0, ref + 0, vsse, vsum);
211
0
    variance32_kernel_avx2(src + 32, ref + 32, vsse, vsum);
212
0
    src += src_stride;
213
0
    ref += ref_stride;
214
0
  }
215
0
}
216
217
void vpx_get16x16var_avx2(const uint8_t *src_ptr, int src_stride,
218
                          const uint8_t *ref_ptr, int ref_stride,
219
0
                          unsigned int *sse, int *sum) {
220
0
  __m256i vsse, vsum;
221
0
  variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
222
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, sum);
223
0
}
224
225
#define FILTER_SRC(filter)                               \
226
  /* filter the source */                                \
227
0
  exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \
228
0
  exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \
229
0
                                                         \
230
0
  /* add 8 to source */                                  \
231
0
  exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);        \
232
0
  exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);        \
233
0
                                                         \
234
0
  /* divide source by 16 */                              \
235
0
  exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);         \
236
0
  exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
237
238
#define CALC_SUM_SSE_INSIDE_LOOP                          \
239
  /* expand each byte to 2 bytes */                       \
240
0
  exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);   \
241
0
  exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);   \
242
0
  /* source - dest */                                     \
243
0
  exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);  \
244
0
  exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);  \
245
0
  /* caculate sum */                                      \
246
0
  *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_lo);      \
247
0
  exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \
248
0
  *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_hi);      \
249
0
  exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \
250
0
  /* calculate sse */                                     \
251
0
  *sse_reg = _mm256_add_epi32(*sse_reg, exp_src_lo);      \
252
0
  *sse_reg = _mm256_add_epi32(*sse_reg, exp_src_hi);
253
254
// final calculation to sum and sse
255
#define CALC_SUM_AND_SSE                                                   \
256
0
  res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg);                         \
257
0
  sse_reg_hi = _mm256_srli_si256(sse_reg, 8);                              \
258
0
  sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp);                    \
259
0
  sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp);                    \
260
0
  sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);                         \
261
0
  sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi);                      \
262
0
                                                                           \
263
0
  sse_reg_hi = _mm256_srli_si256(sse_reg, 4);                              \
264
0
  sum_reg_hi = _mm256_srli_si256(sum_reg, 8);                              \
265
0
                                                                           \
266
0
  sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);                         \
267
0
  sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);                         \
268
0
  *((int *)sse) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) +     \
269
0
                  _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \
270
0
  sum_reg_hi = _mm256_srli_si256(sum_reg, 4);                              \
271
0
  sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);                         \
272
0
  sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) +               \
273
0
        _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
274
275
static INLINE void spv32_x0_y0(const uint8_t *src, int src_stride,
276
                               const uint8_t *dst, int dst_stride,
277
                               const uint8_t *second_pred, int second_stride,
278
                               int do_sec, int height, __m256i *sum_reg,
279
0
                               __m256i *sse_reg) {
280
0
  const __m256i zero_reg = _mm256_setzero_si256();
281
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
282
0
  int i;
283
0
  for (i = 0; i < height; i++) {
284
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
285
0
    const __m256i src_reg = _mm256_loadu_si256((__m256i const *)src);
286
0
    if (do_sec) {
287
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
288
0
      const __m256i avg_reg = _mm256_avg_epu8(src_reg, sec_reg);
289
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
290
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
291
0
      second_pred += second_stride;
292
0
    } else {
293
0
      exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
294
0
      exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
295
0
    }
296
0
    CALC_SUM_SSE_INSIDE_LOOP
297
0
    src += src_stride;
298
0
    dst += dst_stride;
299
0
  }
300
0
}
301
302
// (x == 0, y == 4) or (x == 4, y == 0).  sstep determines the direction.
303
static INLINE void spv32_half_zero(const uint8_t *src, int src_stride,
304
                                   const uint8_t *dst, int dst_stride,
305
                                   const uint8_t *second_pred,
306
                                   int second_stride, int do_sec, int height,
307
                                   __m256i *sum_reg, __m256i *sse_reg,
308
0
                                   int sstep) {
309
0
  const __m256i zero_reg = _mm256_setzero_si256();
310
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
311
0
  int i;
312
0
  for (i = 0; i < height; i++) {
313
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
314
0
    const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
315
0
    const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
316
0
    const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
317
0
    if (do_sec) {
318
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
319
0
      const __m256i avg_reg = _mm256_avg_epu8(src_avg, sec_reg);
320
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
321
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
322
0
      second_pred += second_stride;
323
0
    } else {
324
0
      exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg);
325
0
      exp_src_hi = _mm256_unpackhi_epi8(src_avg, zero_reg);
326
0
    }
327
0
    CALC_SUM_SSE_INSIDE_LOOP
328
0
    src += src_stride;
329
0
    dst += dst_stride;
330
0
  }
331
0
}
332
333
static INLINE void spv32_x0_y4(const uint8_t *src, int src_stride,
334
                               const uint8_t *dst, int dst_stride,
335
                               const uint8_t *second_pred, int second_stride,
336
                               int do_sec, int height, __m256i *sum_reg,
337
0
                               __m256i *sse_reg) {
338
0
  spv32_half_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
339
0
                  do_sec, height, sum_reg, sse_reg, src_stride);
340
0
}
341
342
static INLINE void spv32_x4_y0(const uint8_t *src, int src_stride,
343
                               const uint8_t *dst, int dst_stride,
344
                               const uint8_t *second_pred, int second_stride,
345
                               int do_sec, int height, __m256i *sum_reg,
346
0
                               __m256i *sse_reg) {
347
0
  spv32_half_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
348
0
                  do_sec, height, sum_reg, sse_reg, 1);
349
0
}
350
351
static INLINE void spv32_x4_y4(const uint8_t *src, int src_stride,
352
                               const uint8_t *dst, int dst_stride,
353
                               const uint8_t *second_pred, int second_stride,
354
                               int do_sec, int height, __m256i *sum_reg,
355
0
                               __m256i *sse_reg) {
356
0
  const __m256i zero_reg = _mm256_setzero_si256();
357
0
  const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
358
0
  const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
359
0
  __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b);
360
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
361
0
  int i;
362
0
  src += src_stride;
363
0
  for (i = 0; i < height; i++) {
364
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
365
0
    const __m256i src_0 = _mm256_loadu_si256((__m256i const *)(src));
366
0
    const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
367
0
    const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
368
0
    const __m256i current_avg = _mm256_avg_epu8(prev_src_avg, src_avg);
369
0
    prev_src_avg = src_avg;
370
371
0
    if (do_sec) {
372
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
373
0
      const __m256i avg_reg = _mm256_avg_epu8(current_avg, sec_reg);
374
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
375
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
376
0
      second_pred += second_stride;
377
0
    } else {
378
0
      exp_src_lo = _mm256_unpacklo_epi8(current_avg, zero_reg);
379
0
      exp_src_hi = _mm256_unpackhi_epi8(current_avg, zero_reg);
380
0
    }
381
    // save current source average
382
0
    CALC_SUM_SSE_INSIDE_LOOP
383
0
    dst += dst_stride;
384
0
    src += src_stride;
385
0
  }
386
0
}
387
388
// (x == 0, y == bil) or (x == 4, y == bil).  sstep determines the direction.
389
static INLINE void spv32_bilin_zero(const uint8_t *src, int src_stride,
390
                                    const uint8_t *dst, int dst_stride,
391
                                    const uint8_t *second_pred,
392
                                    int second_stride, int do_sec, int height,
393
                                    __m256i *sum_reg, __m256i *sse_reg,
394
0
                                    int offset, int sstep) {
395
0
  const __m256i zero_reg = _mm256_setzero_si256();
396
0
  const __m256i pw8 = _mm256_set1_epi16(8);
397
0
  const __m256i filter = _mm256_load_si256(
398
0
      (__m256i const *)(bilinear_filters_avx2 + (offset << 5)));
399
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
400
0
  int i;
401
0
  for (i = 0; i < height; i++) {
402
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
403
0
    const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
404
0
    const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
405
0
    exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
406
0
    exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
407
408
0
    FILTER_SRC(filter)
409
0
    if (do_sec) {
410
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
411
0
      const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
412
0
      const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
413
0
      second_pred += second_stride;
414
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
415
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
416
0
    }
417
0
    CALC_SUM_SSE_INSIDE_LOOP
418
0
    src += src_stride;
419
0
    dst += dst_stride;
420
0
  }
421
0
}
422
423
static INLINE void spv32_x0_yb(const uint8_t *src, int src_stride,
424
                               const uint8_t *dst, int dst_stride,
425
                               const uint8_t *second_pred, int second_stride,
426
                               int do_sec, int height, __m256i *sum_reg,
427
0
                               __m256i *sse_reg, int y_offset) {
428
0
  spv32_bilin_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
429
0
                   do_sec, height, sum_reg, sse_reg, y_offset, src_stride);
430
0
}
431
432
static INLINE void spv32_xb_y0(const uint8_t *src, int src_stride,
433
                               const uint8_t *dst, int dst_stride,
434
                               const uint8_t *second_pred, int second_stride,
435
                               int do_sec, int height, __m256i *sum_reg,
436
0
                               __m256i *sse_reg, int x_offset) {
437
0
  spv32_bilin_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
438
0
                   do_sec, height, sum_reg, sse_reg, x_offset, 1);
439
0
}
440
441
static INLINE void spv32_x4_yb(const uint8_t *src, int src_stride,
442
                               const uint8_t *dst, int dst_stride,
443
                               const uint8_t *second_pred, int second_stride,
444
                               int do_sec, int height, __m256i *sum_reg,
445
0
                               __m256i *sse_reg, int y_offset) {
446
0
  const __m256i zero_reg = _mm256_setzero_si256();
447
0
  const __m256i pw8 = _mm256_set1_epi16(8);
448
0
  const __m256i filter = _mm256_load_si256(
449
0
      (__m256i const *)(bilinear_filters_avx2 + (y_offset << 5)));
450
0
  const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
451
0
  const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
452
0
  __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b);
453
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
454
0
  int i;
455
0
  src += src_stride;
456
0
  for (i = 0; i < height; i++) {
457
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
458
0
    const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
459
0
    const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
460
0
    const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
461
0
    exp_src_lo = _mm256_unpacklo_epi8(prev_src_avg, src_avg);
462
0
    exp_src_hi = _mm256_unpackhi_epi8(prev_src_avg, src_avg);
463
0
    prev_src_avg = src_avg;
464
465
0
    FILTER_SRC(filter)
466
0
    if (do_sec) {
467
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
468
0
      const __m256i exp_src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
469
0
      const __m256i avg_reg = _mm256_avg_epu8(exp_src_avg, sec_reg);
470
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
471
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
472
0
      second_pred += second_stride;
473
0
    }
474
0
    CALC_SUM_SSE_INSIDE_LOOP
475
0
    dst += dst_stride;
476
0
    src += src_stride;
477
0
  }
478
0
}
479
480
static INLINE void spv32_xb_y4(const uint8_t *src, int src_stride,
481
                               const uint8_t *dst, int dst_stride,
482
                               const uint8_t *second_pred, int second_stride,
483
                               int do_sec, int height, __m256i *sum_reg,
484
0
                               __m256i *sse_reg, int x_offset) {
485
0
  const __m256i zero_reg = _mm256_setzero_si256();
486
0
  const __m256i pw8 = _mm256_set1_epi16(8);
487
0
  const __m256i filter = _mm256_load_si256(
488
0
      (__m256i const *)(bilinear_filters_avx2 + (x_offset << 5)));
489
0
  const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
490
0
  const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
491
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
492
0
  __m256i src_reg, src_pack;
493
0
  int i;
494
0
  exp_src_lo = _mm256_unpacklo_epi8(src_a, src_b);
495
0
  exp_src_hi = _mm256_unpackhi_epi8(src_a, src_b);
496
0
  FILTER_SRC(filter)
497
  // convert each 16 bit to 8 bit to each low and high lane source
498
0
  src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
499
500
0
  src += src_stride;
501
0
  for (i = 0; i < height; i++) {
502
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
503
0
    const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
504
0
    const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
505
0
    exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
506
0
    exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
507
508
0
    FILTER_SRC(filter)
509
510
0
    src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
511
    // average between previous pack to the current
512
0
    src_pack = _mm256_avg_epu8(src_pack, src_reg);
513
514
0
    if (do_sec) {
515
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
516
0
      const __m256i avg_pack = _mm256_avg_epu8(src_pack, sec_reg);
517
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_pack, zero_reg);
518
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_pack, zero_reg);
519
0
      second_pred += second_stride;
520
0
    } else {
521
0
      exp_src_lo = _mm256_unpacklo_epi8(src_pack, zero_reg);
522
0
      exp_src_hi = _mm256_unpackhi_epi8(src_pack, zero_reg);
523
0
    }
524
0
    CALC_SUM_SSE_INSIDE_LOOP
525
0
    src_pack = src_reg;
526
0
    dst += dst_stride;
527
0
    src += src_stride;
528
0
  }
529
0
}
530
531
static INLINE void spv32_xb_yb(const uint8_t *src, int src_stride,
532
                               const uint8_t *dst, int dst_stride,
533
                               const uint8_t *second_pred, int second_stride,
534
                               int do_sec, int height, __m256i *sum_reg,
535
0
                               __m256i *sse_reg, int x_offset, int y_offset) {
536
0
  const __m256i zero_reg = _mm256_setzero_si256();
537
0
  const __m256i pw8 = _mm256_set1_epi16(8);
538
0
  const __m256i xfilter = _mm256_load_si256(
539
0
      (__m256i const *)(bilinear_filters_avx2 + (x_offset << 5)));
540
0
  const __m256i yfilter = _mm256_load_si256(
541
0
      (__m256i const *)(bilinear_filters_avx2 + (y_offset << 5)));
542
0
  const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
543
0
  const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
544
0
  __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
545
0
  __m256i prev_src_pack, src_pack;
546
0
  int i;
547
0
  exp_src_lo = _mm256_unpacklo_epi8(src_a, src_b);
548
0
  exp_src_hi = _mm256_unpackhi_epi8(src_a, src_b);
549
0
  FILTER_SRC(xfilter)
550
  // convert each 16 bit to 8 bit to each low and high lane source
551
0
  prev_src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
552
0
  src += src_stride;
553
554
0
  for (i = 0; i < height; i++) {
555
0
    const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
556
0
    const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
557
0
    const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
558
0
    exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
559
0
    exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
560
561
0
    FILTER_SRC(xfilter)
562
0
    src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
563
564
    // merge previous pack to current pack source
565
0
    exp_src_lo = _mm256_unpacklo_epi8(prev_src_pack, src_pack);
566
0
    exp_src_hi = _mm256_unpackhi_epi8(prev_src_pack, src_pack);
567
568
0
    FILTER_SRC(yfilter)
569
0
    if (do_sec) {
570
0
      const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
571
0
      const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
572
0
      const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
573
0
      exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
574
0
      exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
575
0
      second_pred += second_stride;
576
0
    }
577
578
0
    prev_src_pack = src_pack;
579
580
0
    CALC_SUM_SSE_INSIDE_LOOP
581
0
    dst += dst_stride;
582
0
    src += src_stride;
583
0
  }
584
0
}
585
586
static INLINE int sub_pix_var32xh(const uint8_t *src, int src_stride,
587
                                  int x_offset, int y_offset,
588
                                  const uint8_t *dst, int dst_stride,
589
                                  const uint8_t *second_pred, int second_stride,
590
0
                                  int do_sec, int height, unsigned int *sse) {
591
0
  const __m256i zero_reg = _mm256_setzero_si256();
592
0
  __m256i sum_reg = _mm256_setzero_si256();
593
0
  __m256i sse_reg = _mm256_setzero_si256();
594
0
  __m256i sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
595
0
  int sum;
596
  // x_offset = 0 and y_offset = 0
597
0
  if (x_offset == 0) {
598
0
    if (y_offset == 0) {
599
0
      spv32_x0_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
600
0
                  do_sec, height, &sum_reg, &sse_reg);
601
      // x_offset = 0 and y_offset = 4
602
0
    } else if (y_offset == 4) {
603
0
      spv32_x0_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
604
0
                  do_sec, height, &sum_reg, &sse_reg);
605
      // x_offset = 0 and y_offset = bilin interpolation
606
0
    } else {
607
0
      spv32_x0_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
608
0
                  do_sec, height, &sum_reg, &sse_reg, y_offset);
609
0
    }
610
    // x_offset = 4  and y_offset = 0
611
0
  } else if (x_offset == 4) {
612
0
    if (y_offset == 0) {
613
0
      spv32_x4_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
614
0
                  do_sec, height, &sum_reg, &sse_reg);
615
      // x_offset = 4  and y_offset = 4
616
0
    } else if (y_offset == 4) {
617
0
      spv32_x4_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
618
0
                  do_sec, height, &sum_reg, &sse_reg);
619
      // x_offset = 4  and y_offset = bilin interpolation
620
0
    } else {
621
0
      spv32_x4_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
622
0
                  do_sec, height, &sum_reg, &sse_reg, y_offset);
623
0
    }
624
    // x_offset = bilin interpolation and y_offset = 0
625
0
  } else {
626
0
    if (y_offset == 0) {
627
0
      spv32_xb_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
628
0
                  do_sec, height, &sum_reg, &sse_reg, x_offset);
629
      // x_offset = bilin interpolation and y_offset = 4
630
0
    } else if (y_offset == 4) {
631
0
      spv32_xb_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
632
0
                  do_sec, height, &sum_reg, &sse_reg, x_offset);
633
      // x_offset = bilin interpolation and y_offset = bilin interpolation
634
0
    } else {
635
0
      spv32_xb_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
636
0
                  do_sec, height, &sum_reg, &sse_reg, x_offset, y_offset);
637
0
    }
638
0
  }
639
0
  CALC_SUM_AND_SSE
640
0
  return sum;
641
0
}
642
643
static int sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
644
                                       int x_offset, int y_offset,
645
                                       const uint8_t *dst, int dst_stride,
646
0
                                       int height, unsigned int *sse) {
647
0
  return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
648
0
                         NULL, 0, 0, height, sse);
649
0
}
650
651
static int sub_pixel_avg_variance32xh_avx2(const uint8_t *src, int src_stride,
652
                                           int x_offset, int y_offset,
653
                                           const uint8_t *dst, int dst_stride,
654
                                           const uint8_t *second_pred,
655
                                           int second_stride, int height,
656
0
                                           unsigned int *sse) {
657
0
  return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
658
0
                         second_pred, second_stride, 1, height, sse);
659
0
}
660
661
typedef void (*get_var_avx2)(const uint8_t *src_ptr, int src_stride,
662
                             const uint8_t *ref_ptr, int ref_stride,
663
                             unsigned int *sse, int *sum);
664
665
unsigned int vpx_variance8x4_avx2(const uint8_t *src_ptr, int src_stride,
666
                                  const uint8_t *ref_ptr, int ref_stride,
667
0
                                  unsigned int *sse) {
668
0
  __m256i vsse, vsum;
669
0
  int sum;
670
0
  variance8_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 4, &vsse, &vsum);
671
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
672
0
  return *sse - ((sum * sum) >> 5);
673
0
}
674
675
unsigned int vpx_variance8x8_avx2(const uint8_t *src_ptr, int src_stride,
676
                                  const uint8_t *ref_ptr, int ref_stride,
677
856k
                                  unsigned int *sse) {
678
856k
  __m256i vsse, vsum;
679
856k
  int sum;
680
856k
  variance8_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
681
856k
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
682
856k
  return *sse - ((sum * sum) >> 6);
683
856k
}
684
685
unsigned int vpx_variance8x16_avx2(const uint8_t *src_ptr, int src_stride,
686
                                   const uint8_t *ref_ptr, int ref_stride,
687
0
                                   unsigned int *sse) {
688
0
  __m256i vsse, vsum;
689
0
  int sum;
690
0
  variance8_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
691
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
692
0
  return *sse - ((sum * sum) >> 7);
693
0
}
694
695
unsigned int vpx_variance16x8_avx2(const uint8_t *src_ptr, int src_stride,
696
                                   const uint8_t *ref_ptr, int ref_stride,
697
0
                                   unsigned int *sse) {
698
0
  int sum;
699
0
  __m256i vsse, vsum;
700
0
  variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
701
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
702
0
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 7);
703
0
}
704
705
unsigned int vpx_variance16x16_avx2(const uint8_t *src_ptr, int src_stride,
706
                                    const uint8_t *ref_ptr, int ref_stride,
707
715k
                                    unsigned int *sse) {
708
715k
  int sum;
709
715k
  __m256i vsse, vsum;
710
715k
  variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
711
715k
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
712
715k
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 8);
713
715k
}
714
715
unsigned int vpx_variance16x32_avx2(const uint8_t *src_ptr, int src_stride,
716
                                    const uint8_t *ref_ptr, int ref_stride,
717
0
                                    unsigned int *sse) {
718
0
  int sum;
719
0
  __m256i vsse, vsum;
720
0
  variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
721
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
722
0
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
723
0
}
724
725
unsigned int vpx_variance32x16_avx2(const uint8_t *src_ptr, int src_stride,
726
                                    const uint8_t *ref_ptr, int ref_stride,
727
0
                                    unsigned int *sse) {
728
0
  int sum;
729
0
  __m256i vsse, vsum;
730
0
  variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
731
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
732
0
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
733
0
}
734
735
unsigned int vpx_variance32x32_avx2(const uint8_t *src_ptr, int src_stride,
736
                                    const uint8_t *ref_ptr, int ref_stride,
737
0
                                    unsigned int *sse) {
738
0
  int sum;
739
0
  __m256i vsse, vsum;
740
0
  __m128i vsum_128;
741
0
  variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
742
0
  vsum_128 = _mm_add_epi16(_mm256_castsi256_si128(vsum),
743
0
                           _mm256_extractf128_si256(vsum, 1));
744
0
  vsum_128 = _mm_add_epi32(_mm_cvtepi16_epi32(vsum_128),
745
0
                           _mm_cvtepi16_epi32(_mm_srli_si128(vsum_128, 8)));
746
0
  variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
747
0
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 10);
748
0
}
749
750
unsigned int vpx_variance32x64_avx2(const uint8_t *src_ptr, int src_stride,
751
                                    const uint8_t *ref_ptr, int ref_stride,
752
0
                                    unsigned int *sse) {
753
0
  int sum;
754
0
  __m256i vsse, vsum;
755
0
  __m128i vsum_128;
756
0
  variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64, &vsse, &vsum);
757
0
  vsum = sum_to_32bit_avx2(vsum);
758
0
  vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
759
0
                           _mm256_extractf128_si256(vsum, 1));
760
0
  variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
761
0
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
762
0
}
763
764
unsigned int vpx_variance64x32_avx2(const uint8_t *src_ptr, int src_stride,
765
                                    const uint8_t *ref_ptr, int ref_stride,
766
0
                                    unsigned int *sse) {
767
0
  __m256i vsse = _mm256_setzero_si256();
768
0
  __m256i vsum = _mm256_setzero_si256();
769
0
  __m128i vsum_128;
770
0
  int sum;
771
0
  variance64_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
772
0
  vsum = sum_to_32bit_avx2(vsum);
773
0
  vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
774
0
                           _mm256_extractf128_si256(vsum, 1));
775
0
  variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
776
0
  return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
777
0
}
778
779
unsigned int vpx_variance64x64_avx2(const uint8_t *src_ptr, int src_stride,
780
                                    const uint8_t *ref_ptr, int ref_stride,
781
0
                                    unsigned int *sse) {
782
0
  __m256i vsse = _mm256_setzero_si256();
783
0
  __m256i vsum = _mm256_setzero_si256();
784
0
  __m128i vsum_128;
785
0
  int sum;
786
0
  int i = 0;
787
788
0
  for (i = 0; i < 2; i++) {
789
0
    __m256i vsum16;
790
0
    variance64_avx2(src_ptr + 32 * i * src_stride, src_stride,
791
0
                    ref_ptr + 32 * i * ref_stride, ref_stride, 32, &vsse,
792
0
                    &vsum16);
793
0
    vsum = _mm256_add_epi32(vsum, sum_to_32bit_avx2(vsum16));
794
0
  }
795
0
  vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
796
0
                           _mm256_extractf128_si256(vsum, 1));
797
0
  variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
798
0
  return *sse - (unsigned int)(((int64_t)sum * sum) >> 12);
799
0
}
800
801
unsigned int vpx_mse16x8_avx2(const uint8_t *src_ptr, int src_stride,
802
                              const uint8_t *ref_ptr, int ref_stride,
803
0
                              unsigned int *sse) {
804
0
  int sum;
805
0
  __m256i vsse, vsum;
806
0
  variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
807
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
808
0
  return *sse;
809
0
}
810
811
unsigned int vpx_mse16x16_avx2(const uint8_t *src_ptr, int src_stride,
812
                               const uint8_t *ref_ptr, int ref_stride,
813
0
                               unsigned int *sse) {
814
0
  int sum;
815
0
  __m256i vsse, vsum;
816
0
  variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
817
0
  variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
818
0
  return *sse;
819
0
}
820
821
unsigned int vpx_sub_pixel_variance64x64_avx2(
822
    const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
823
0
    const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) {
824
0
  unsigned int sse1;
825
0
  const int se1 = sub_pixel_variance32xh_avx2(
826
0
      src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, 64, &sse1);
827
0
  unsigned int sse2;
828
0
  const int se2 =
829
0
      sub_pixel_variance32xh_avx2(src_ptr + 32, src_stride, x_offset, y_offset,
830
0
                                  ref_ptr + 32, ref_stride, 64, &sse2);
831
0
  const int se = se1 + se2;
832
0
  *sse = sse1 + sse2;
833
0
  return *sse - (uint32_t)(((int64_t)se * se) >> 12);
834
0
}
835
836
unsigned int vpx_sub_pixel_variance32x32_avx2(
837
    const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
838
0
    const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) {
839
0
  const int se = sub_pixel_variance32xh_avx2(
840
0
      src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, 32, sse);
841
0
  return *sse - (uint32_t)(((int64_t)se * se) >> 10);
842
0
}
843
844
unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
845
    const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
846
    const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
847
0
    const uint8_t *second_pred) {
848
0
  unsigned int sse1;
849
0
  const int se1 = sub_pixel_avg_variance32xh_avx2(src_ptr, src_stride, x_offset,
850
0
                                                  y_offset, ref_ptr, ref_stride,
851
0
                                                  second_pred, 64, 64, &sse1);
852
0
  unsigned int sse2;
853
0
  const int se2 = sub_pixel_avg_variance32xh_avx2(
854
0
      src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, ref_stride,
855
0
      second_pred + 32, 64, 64, &sse2);
856
0
  const int se = se1 + se2;
857
858
0
  *sse = sse1 + sse2;
859
860
0
  return *sse - (uint32_t)(((int64_t)se * se) >> 12);
861
0
}
862
863
unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
864
    const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
865
    const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
866
0
    const uint8_t *second_pred) {
867
  // Process 32 elements in parallel.
868
0
  const int se = sub_pixel_avg_variance32xh_avx2(src_ptr, src_stride, x_offset,
869
0
                                                 y_offset, ref_ptr, ref_stride,
870
0
                                                 second_pred, 32, 32, sse);
871
0
  return *sse - (uint32_t)(((int64_t)se * se) >> 10);
872
0
}