Coverage Report

Created: 2026-02-14 07:00

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/av1/common/x86/warp_plane_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2019, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
#include "config/av1_rtcd.h"
14
#include "av1/common/warped_motion.h"
15
#include "aom_dsp/x86/synonyms.h"
16
17
#if !CONFIG_HIGHWAY
18
19
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask01_avx2[32]) = {
20
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
21
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
22
};
23
24
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask23_avx2[32]) = {
25
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
26
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3
27
};
28
29
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask45_avx2[32]) = {
30
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
31
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5
32
};
33
34
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask67_avx2[32]) = {
35
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
36
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7
37
};
38
39
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask0_avx2[32]) = {
40
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
41
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
42
};
43
44
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask1_avx2[32]) = {
45
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7,
46
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7
47
};
48
49
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask2_avx2[32]) = {
50
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11,
51
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11
52
};
53
54
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask3_avx2[32]) = {
55
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15,
56
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15
57
};
58
59
DECLARE_ALIGNED(32, static const uint8_t,
60
                shuffle_src0[32]) = { 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3,
61
                                      5, 5, 7, 7, 9, 0, 2, 2, 4, 4, 6,
62
                                      6, 8, 1, 3, 3, 5, 5, 7, 7, 9 };
63
64
DECLARE_ALIGNED(32, static const uint8_t,
65
                shuffle_src1[32]) = { 4,  6,  6,  8,  8,  10, 10, 12, 5,  7, 7,
66
                                      9,  9,  11, 11, 13, 4,  6,  6,  8,  8, 10,
67
                                      10, 12, 5,  7,  7,  9,  9,  11, 11, 13 };
68
69
DECLARE_ALIGNED(32, static const uint8_t,
70
                shuffle_src2[32]) = { 1, 3, 3, 5, 5,  7, 7, 9, 2, 4, 4,
71
                                      6, 6, 8, 8, 10, 1, 3, 3, 5, 5, 7,
72
                                      7, 9, 2, 4, 4,  6, 6, 8, 8, 10 };
73
74
DECLARE_ALIGNED(32, static const uint8_t,
75
                shuffle_src3[32]) = { 5,  7,  7,  9,  9,  11, 11, 13, 6,  8, 8,
76
                                      10, 10, 12, 12, 14, 5,  7,  7,  9,  9, 11,
77
                                      11, 13, 6,  8,  8,  10, 10, 12, 12, 14 };
78
79
static inline void filter_src_pixels_avx2(const __m256i src, __m256i *horz_out,
80
                                          __m256i *coeff,
81
                                          const __m256i *shuffle_src,
82
                                          const __m256i *round_const,
83
14.4M
                                          const __m128i *shift, int row) {
84
14.4M
  const __m256i src_0 = _mm256_shuffle_epi8(src, shuffle_src[0]);
85
14.4M
  const __m256i src_1 = _mm256_shuffle_epi8(src, shuffle_src[1]);
86
14.4M
  const __m256i src_2 = _mm256_shuffle_epi8(src, shuffle_src[2]);
87
14.4M
  const __m256i src_3 = _mm256_shuffle_epi8(src, shuffle_src[3]);
88
89
14.4M
  const __m256i res_02 = _mm256_maddubs_epi16(src_0, coeff[0]);
90
14.4M
  const __m256i res_46 = _mm256_maddubs_epi16(src_1, coeff[1]);
91
14.4M
  const __m256i res_13 = _mm256_maddubs_epi16(src_2, coeff[2]);
92
14.4M
  const __m256i res_57 = _mm256_maddubs_epi16(src_3, coeff[3]);
93
94
14.4M
  const __m256i res_even = _mm256_add_epi16(res_02, res_46);
95
14.4M
  const __m256i res_odd = _mm256_add_epi16(res_13, res_57);
96
14.4M
  const __m256i res =
97
14.4M
      _mm256_add_epi16(_mm256_add_epi16(res_even, res_odd), *round_const);
98
14.4M
  horz_out[row] = _mm256_srl_epi16(res, *shift);
99
14.4M
}
100
101
static inline void prepare_horizontal_filter_coeff_avx2(int alpha, int beta,
102
                                                        int sx,
103
3.07M
                                                        __m256i *coeff) {
104
3.07M
  __m128i tmp_0 = _mm_loadl_epi64(
105
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 0 * alpha)) >>
106
3.07M
                                  WARPEDDIFF_PREC_BITS]);
107
3.07M
  __m128i tmp_1 = _mm_loadl_epi64(
108
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 1 * alpha)) >>
109
3.07M
                                  WARPEDDIFF_PREC_BITS]);
110
3.07M
  __m128i tmp_2 = _mm_loadl_epi64(
111
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 2 * alpha)) >>
112
3.07M
                                  WARPEDDIFF_PREC_BITS]);
113
3.07M
  __m128i tmp_3 = _mm_loadl_epi64(
114
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 3 * alpha)) >>
115
3.07M
                                  WARPEDDIFF_PREC_BITS]);
116
117
3.07M
  __m128i tmp_4 = _mm_loadl_epi64(
118
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 4 * alpha)) >>
119
3.07M
                                  WARPEDDIFF_PREC_BITS]);
120
3.07M
  __m128i tmp_5 = _mm_loadl_epi64(
121
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 5 * alpha)) >>
122
3.07M
                                  WARPEDDIFF_PREC_BITS]);
123
3.07M
  __m128i tmp_6 = _mm_loadl_epi64(
124
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 6 * alpha)) >>
125
3.07M
                                  WARPEDDIFF_PREC_BITS]);
126
3.07M
  __m128i tmp_7 = _mm_loadl_epi64(
127
3.07M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 7 * alpha)) >>
128
3.07M
                                  WARPEDDIFF_PREC_BITS]);
129
130
3.07M
  __m256i tmp0_256 = _mm256_castsi128_si256(tmp_0);
131
3.07M
  __m256i tmp2_256 = _mm256_castsi128_si256(tmp_2);
132
3.07M
  __m256i tmp1_256 = _mm256_castsi128_si256(tmp_1);
133
3.07M
  __m256i tmp3_256 = _mm256_castsi128_si256(tmp_3);
134
135
3.07M
  __m256i tmp4_256 = _mm256_castsi128_si256(tmp_4);
136
3.07M
  __m256i tmp6_256 = _mm256_castsi128_si256(tmp_6);
137
3.07M
  __m256i tmp5_256 = _mm256_castsi128_si256(tmp_5);
138
3.07M
  __m256i tmp7_256 = _mm256_castsi128_si256(tmp_7);
139
140
3.07M
  __m128i tmp_8 = _mm_loadl_epi64(
141
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 0 * alpha) >>
142
3.07M
                                  WARPEDDIFF_PREC_BITS]);
143
3.07M
  tmp0_256 = _mm256_inserti128_si256(tmp0_256, tmp_8, 1);
144
145
3.07M
  __m128i tmp_9 = _mm_loadl_epi64(
146
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 1 * alpha) >>
147
3.07M
                                  WARPEDDIFF_PREC_BITS]);
148
3.07M
  tmp1_256 = _mm256_inserti128_si256(tmp1_256, tmp_9, 1);
149
150
3.07M
  __m128i tmp_10 = _mm_loadl_epi64(
151
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 2 * alpha) >>
152
3.07M
                                  WARPEDDIFF_PREC_BITS]);
153
3.07M
  tmp2_256 = _mm256_inserti128_si256(tmp2_256, tmp_10, 1);
154
155
3.07M
  __m128i tmp_11 = _mm_loadl_epi64(
156
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 3 * alpha) >>
157
3.07M
                                  WARPEDDIFF_PREC_BITS]);
158
3.07M
  tmp3_256 = _mm256_inserti128_si256(tmp3_256, tmp_11, 1);
159
160
3.07M
  tmp_2 = _mm_loadl_epi64(
161
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 4 * alpha) >>
162
3.07M
                                  WARPEDDIFF_PREC_BITS]);
163
3.07M
  tmp4_256 = _mm256_inserti128_si256(tmp4_256, tmp_2, 1);
164
165
3.07M
  tmp_3 = _mm_loadl_epi64(
166
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 5 * alpha) >>
167
3.07M
                                  WARPEDDIFF_PREC_BITS]);
168
3.07M
  tmp5_256 = _mm256_inserti128_si256(tmp5_256, tmp_3, 1);
169
170
3.07M
  tmp_6 = _mm_loadl_epi64(
171
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 6 * alpha) >>
172
3.07M
                                  WARPEDDIFF_PREC_BITS]);
173
3.07M
  tmp6_256 = _mm256_inserti128_si256(tmp6_256, tmp_6, 1);
174
175
3.07M
  tmp_7 = _mm_loadl_epi64(
176
3.07M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 7 * alpha) >>
177
3.07M
                                  WARPEDDIFF_PREC_BITS]);
178
3.07M
  tmp7_256 = _mm256_inserti128_si256(tmp7_256, tmp_7, 1);
179
180
3.07M
  const __m256i tmp_12 = _mm256_unpacklo_epi16(tmp0_256, tmp2_256);
181
3.07M
  const __m256i tmp_13 = _mm256_unpacklo_epi16(tmp1_256, tmp3_256);
182
3.07M
  const __m256i tmp_14 = _mm256_unpacklo_epi16(tmp4_256, tmp6_256);
183
3.07M
  const __m256i tmp_15 = _mm256_unpacklo_epi16(tmp5_256, tmp7_256);
184
185
3.07M
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
186
3.07M
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
187
3.07M
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
188
3.07M
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
189
190
3.07M
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
191
3.07M
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
192
3.07M
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
193
3.07M
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
194
3.07M
}
195
196
static inline void prepare_horizontal_filter_coeff_beta0_avx2(int alpha, int sx,
197
411k
                                                              __m256i *coeff) {
198
411k
  __m128i tmp_0 = _mm_loadl_epi64(
199
411k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
200
411k
  __m128i tmp_1 = _mm_loadl_epi64(
201
411k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
202
411k
  __m128i tmp_2 = _mm_loadl_epi64(
203
411k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
204
411k
  __m128i tmp_3 = _mm_loadl_epi64(
205
411k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
206
411k
  __m128i tmp_4 = _mm_loadl_epi64(
207
411k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
208
411k
  __m128i tmp_5 = _mm_loadl_epi64(
209
411k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
210
411k
  __m128i tmp_6 = _mm_loadl_epi64(
211
411k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
212
411k
  __m128i tmp_7 = _mm_loadl_epi64(
213
411k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
214
215
411k
  tmp_0 = _mm_unpacklo_epi16(tmp_0, tmp_2);
216
411k
  tmp_1 = _mm_unpacklo_epi16(tmp_1, tmp_3);
217
411k
  tmp_4 = _mm_unpacklo_epi16(tmp_4, tmp_6);
218
411k
  tmp_5 = _mm_unpacklo_epi16(tmp_5, tmp_7);
219
220
411k
  const __m256i tmp_12 = _mm256_broadcastsi128_si256(tmp_0);
221
411k
  const __m256i tmp_13 = _mm256_broadcastsi128_si256(tmp_1);
222
411k
  const __m256i tmp_14 = _mm256_broadcastsi128_si256(tmp_4);
223
411k
  const __m256i tmp_15 = _mm256_broadcastsi128_si256(tmp_5);
224
225
411k
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
226
411k
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
227
411k
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
228
411k
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
229
230
411k
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
231
411k
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
232
411k
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
233
411k
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
234
411k
}
235
236
static inline void prepare_horizontal_filter_coeff_alpha0_avx2(int beta, int sx,
237
3.43M
                                                               __m256i *coeff) {
238
3.43M
  const __m128i tmp_0 =
239
3.43M
      _mm_loadl_epi64((__m128i *)&av1_filter_8bit[sx >> WARPEDDIFF_PREC_BITS]);
240
3.43M
  const __m128i tmp_1 = _mm_loadl_epi64(
241
3.43M
      (__m128i *)&av1_filter_8bit[(sx + beta) >> WARPEDDIFF_PREC_BITS]);
242
243
3.43M
  const __m256i res_0 =
244
3.43M
      _mm256_inserti128_si256(_mm256_castsi128_si256(tmp_0), tmp_1, 0x1);
245
246
3.43M
  coeff[0] = _mm256_shuffle_epi8(
247
3.43M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask01_avx2));
248
3.43M
  coeff[1] = _mm256_shuffle_epi8(
249
3.43M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask23_avx2));
250
3.43M
  coeff[2] = _mm256_shuffle_epi8(
251
3.43M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask45_avx2));
252
3.43M
  coeff[3] = _mm256_shuffle_epi8(
253
3.43M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask67_avx2));
254
3.43M
}
255
256
static inline void horizontal_filter_avx2(const __m256i src, __m256i *horz_out,
257
                                          int sx, int alpha, int beta, int row,
258
                                          const __m256i *shuffle_src,
259
                                          const __m256i *round_const,
260
3.07M
                                          const __m128i *shift) {
261
3.07M
  __m256i coeff[4];
262
3.07M
  prepare_horizontal_filter_coeff_avx2(alpha, beta, sx, coeff);
263
3.07M
  filter_src_pixels_avx2(src, horz_out, coeff, shuffle_src, round_const, shift,
264
3.07M
                         row);
265
3.07M
}
266
static inline void prepare_horizontal_filter_coeff(int alpha, int sx,
267
444k
                                                   __m256i *coeff) {
268
444k
  const __m128i tmp_0 = _mm_loadl_epi64(
269
444k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
270
444k
  const __m128i tmp_1 = _mm_loadl_epi64(
271
444k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
272
444k
  const __m128i tmp_2 = _mm_loadl_epi64(
273
444k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
274
444k
  const __m128i tmp_3 = _mm_loadl_epi64(
275
444k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
276
444k
  const __m128i tmp_4 = _mm_loadl_epi64(
277
444k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
278
444k
  const __m128i tmp_5 = _mm_loadl_epi64(
279
444k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
280
444k
  const __m128i tmp_6 = _mm_loadl_epi64(
281
444k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
282
444k
  const __m128i tmp_7 = _mm_loadl_epi64(
283
444k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
284
285
444k
  const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2);
286
444k
  const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3);
287
444k
  const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6);
288
444k
  const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7);
289
290
444k
  const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10);
291
444k
  const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10);
292
444k
  const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11);
293
444k
  const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11);
294
295
444k
  coeff[0] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_12, tmp_14));
296
444k
  coeff[1] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_12, tmp_14));
297
444k
  coeff[2] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_13, tmp_15));
298
444k
  coeff[3] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_13, tmp_15));
299
444k
}
300
301
static inline void warp_horizontal_filter_avx2(
302
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
303
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
304
    const __m256i *round_const, const __m128i *shift,
305
362k
    const __m256i *shuffle_src) {
306
362k
  int k, iy, sx, row = 0;
307
362k
  __m256i coeff[4];
308
2.88M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
309
2.52M
    iy = iy4 + k;
310
2.52M
    iy = clamp(iy, 0, height - 1);
311
2.52M
    const __m128i src_0 =
312
2.52M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
313
2.52M
    iy = iy4 + k + 1;
314
2.52M
    iy = clamp(iy, 0, height - 1);
315
2.52M
    const __m128i src_1 =
316
2.52M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
317
2.52M
    const __m256i src_01 =
318
2.52M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
319
2.52M
    sx = sx4 + beta * (k + 4);
320
2.52M
    horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, shuffle_src,
321
2.52M
                           round_const, shift);
322
2.52M
    row += 1;
323
2.52M
  }
324
362k
  iy = iy4 + k;
325
362k
  iy = clamp(iy, 0, height - 1);
326
362k
  const __m256i src_01 = _mm256_castsi128_si256(
327
362k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
328
362k
  sx = sx4 + beta * (k + 4);
329
362k
  prepare_horizontal_filter_coeff(alpha, sx, coeff);
330
362k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
331
362k
                         shift, row);
332
362k
}
333
334
static inline void warp_horizontal_filter_alpha0_avx2(
335
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
336
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
337
    const __m256i *round_const, const __m128i *shift,
338
350k
    const __m256i *shuffle_src) {
339
350k
  (void)alpha;
340
350k
  int k, iy, sx, row = 0;
341
350k
  __m256i coeff[4];
342
2.78M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
343
2.43M
    iy = iy4 + k;
344
2.43M
    iy = clamp(iy, 0, height - 1);
345
2.43M
    const __m128i src_0 =
346
2.43M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
347
2.43M
    iy = iy4 + k + 1;
348
2.43M
    iy = clamp(iy, 0, height - 1);
349
2.43M
    const __m128i src_1 =
350
2.43M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
351
2.43M
    const __m256i src_01 =
352
2.43M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
353
2.43M
    sx = sx4 + beta * (k + 4);
354
2.43M
    prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
355
2.43M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
356
2.43M
                           shift, row);
357
2.43M
    row += 1;
358
2.43M
  }
359
350k
  iy = iy4 + k;
360
350k
  iy = clamp(iy, 0, height - 1);
361
350k
  const __m256i src_01 = _mm256_castsi128_si256(
362
350k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
363
350k
  sx = sx4 + beta * (k + 4);
364
350k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
365
350k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
366
350k
                         shift, row);
367
350k
}
368
369
static inline void warp_horizontal_filter_beta0_avx2(
370
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
371
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
372
    const __m256i *round_const, const __m128i *shift,
373
411k
    const __m256i *shuffle_src) {
374
411k
  (void)beta;
375
411k
  int k, iy, row = 0;
376
411k
  __m256i coeff[4];
377
411k
  prepare_horizontal_filter_coeff_beta0_avx2(alpha, sx4, coeff);
378
3.27M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
379
2.86M
    iy = iy4 + k;
380
2.86M
    iy = clamp(iy, 0, height - 1);
381
2.86M
    const __m128i src_0 =
382
2.86M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
383
2.86M
    iy = iy4 + k + 1;
384
2.86M
    iy = clamp(iy, 0, height - 1);
385
2.86M
    const __m128i src_1 =
386
2.86M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
387
2.86M
    const __m256i src_01 =
388
2.86M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
389
2.86M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
390
2.86M
                           shift, row);
391
2.86M
    row += 1;
392
2.86M
  }
393
411k
  iy = iy4 + k;
394
411k
  iy = clamp(iy, 0, height - 1);
395
411k
  const __m256i src_01 = _mm256_castsi128_si256(
396
411k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
397
411k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
398
411k
                         shift, row);
399
411k
}
400
401
static inline void warp_horizontal_filter_alpha0_beta0_avx2(
402
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
403
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
404
    const __m256i *round_const, const __m128i *shift,
405
676k
    const __m256i *shuffle_src) {
406
676k
  (void)alpha;
407
676k
  int k, iy, row = 0;
408
676k
  __m256i coeff[4];
409
676k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx4, coeff);
410
5.36M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
411
4.69M
    iy = iy4 + k;
412
4.69M
    iy = clamp(iy, 0, height - 1);
413
4.69M
    const __m128i src0 =
414
4.69M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
415
4.69M
    iy = iy4 + k + 1;
416
4.69M
    iy = clamp(iy, 0, height - 1);
417
4.69M
    const __m128i src1 =
418
4.69M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
419
4.69M
    const __m256i src_01 =
420
4.69M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
421
4.69M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
422
4.69M
                           shift, row);
423
4.69M
    row += 1;
424
4.69M
  }
425
676k
  iy = iy4 + k;
426
676k
  iy = clamp(iy, 0, height - 1);
427
676k
  const __m256i src_01 = _mm256_castsi128_si256(
428
676k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
429
676k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
430
676k
                         shift, row);
431
676k
}
432
433
static inline void unpack_weights_and_set_round_const_avx2(
434
    ConvolveParams *conv_params, const int round_bits, const int offset_bits,
435
213k
    __m256i *res_sub_const, __m256i *round_bits_const, __m256i *wt) {
436
213k
  *res_sub_const =
437
213k
      _mm256_set1_epi16(-(1 << (offset_bits - conv_params->round_1)) -
438
213k
                        (1 << (offset_bits - conv_params->round_1 - 1)));
439
213k
  *round_bits_const = _mm256_set1_epi16(((1 << round_bits) >> 1));
440
441
213k
  const int w0 = conv_params->fwd_offset;
442
213k
  const int w1 = conv_params->bck_offset;
443
213k
  const __m256i wt0 = _mm256_set1_epi16((short)w0);
444
213k
  const __m256i wt1 = _mm256_set1_epi16((short)w1);
445
213k
  *wt = _mm256_unpacklo_epi16(wt0, wt1);
446
213k
}
447
448
static inline void prepare_vertical_filter_coeffs_avx2(int gamma, int delta,
449
                                                       int sy,
450
3.22M
                                                       __m256i *coeffs) {
451
3.22M
  __m128i filt_00 =
452
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
453
3.22M
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
454
3.22M
  __m128i filt_01 =
455
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
456
3.22M
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
457
3.22M
  __m128i filt_02 =
458
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
459
3.22M
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
460
3.22M
  __m128i filt_03 =
461
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
462
3.22M
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
463
464
3.22M
  __m128i filt_10 = _mm_loadu_si128(
465
3.22M
      (__m128i *)(av1_warped_filter +
466
3.22M
                  (((sy + delta) + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
467
3.22M
  __m128i filt_11 = _mm_loadu_si128(
468
3.22M
      (__m128i *)(av1_warped_filter +
469
3.22M
                  (((sy + delta) + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
470
3.22M
  __m128i filt_12 = _mm_loadu_si128(
471
3.22M
      (__m128i *)(av1_warped_filter +
472
3.22M
                  (((sy + delta) + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
473
3.22M
  __m128i filt_13 = _mm_loadu_si128(
474
3.22M
      (__m128i *)(av1_warped_filter +
475
3.22M
                  (((sy + delta) + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
476
477
3.22M
  __m256i filt_0 =
478
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
479
3.22M
  __m256i filt_1 =
480
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
481
3.22M
  __m256i filt_2 =
482
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
483
3.22M
  __m256i filt_3 =
484
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
485
486
3.22M
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
487
3.22M
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
488
3.22M
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
489
3.22M
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
490
491
3.22M
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
492
3.22M
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
493
3.22M
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
494
3.22M
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
495
496
3.22M
  filt_00 =
497
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
498
3.22M
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
499
3.22M
  filt_01 =
500
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
501
3.22M
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
502
3.22M
  filt_02 =
503
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
504
3.22M
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
505
3.22M
  filt_03 =
506
3.22M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
507
3.22M
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
508
509
3.22M
  filt_10 = _mm_loadu_si128(
510
3.22M
      (__m128i *)(av1_warped_filter +
511
3.22M
                  (((sy + delta) + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
512
3.22M
  filt_11 = _mm_loadu_si128(
513
3.22M
      (__m128i *)(av1_warped_filter +
514
3.22M
                  (((sy + delta) + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
515
3.22M
  filt_12 = _mm_loadu_si128(
516
3.22M
      (__m128i *)(av1_warped_filter +
517
3.22M
                  (((sy + delta) + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
518
3.22M
  filt_13 = _mm_loadu_si128(
519
3.22M
      (__m128i *)(av1_warped_filter +
520
3.22M
                  (((sy + delta) + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
521
522
3.22M
  filt_0 =
523
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
524
3.22M
  filt_1 =
525
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
526
3.22M
  filt_2 =
527
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
528
3.22M
  filt_3 =
529
3.22M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
530
531
3.22M
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
532
3.22M
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
533
3.22M
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
534
3.22M
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
535
536
3.22M
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
537
3.22M
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
538
3.22M
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
539
3.22M
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
540
3.22M
}
541
542
static inline void prepare_vertical_filter_coeffs_delta0_avx2(int gamma, int sy,
543
474k
                                                              __m256i *coeffs) {
544
474k
  __m128i filt_00 =
545
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
546
474k
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
547
474k
  __m128i filt_01 =
548
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
549
474k
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
550
474k
  __m128i filt_02 =
551
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
552
474k
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
553
474k
  __m128i filt_03 =
554
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
555
474k
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
556
557
474k
  __m256i filt_0 = _mm256_broadcastsi128_si256(filt_00);
558
474k
  __m256i filt_1 = _mm256_broadcastsi128_si256(filt_01);
559
474k
  __m256i filt_2 = _mm256_broadcastsi128_si256(filt_02);
560
474k
  __m256i filt_3 = _mm256_broadcastsi128_si256(filt_03);
561
562
474k
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
563
474k
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
564
474k
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
565
474k
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
566
567
474k
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
568
474k
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
569
474k
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
570
474k
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
571
572
474k
  filt_00 =
573
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
574
474k
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
575
474k
  filt_01 =
576
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
577
474k
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
578
474k
  filt_02 =
579
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
580
474k
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
581
474k
  filt_03 =
582
474k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
583
474k
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
584
585
474k
  filt_0 = _mm256_broadcastsi128_si256(filt_00);
586
474k
  filt_1 = _mm256_broadcastsi128_si256(filt_01);
587
474k
  filt_2 = _mm256_broadcastsi128_si256(filt_02);
588
474k
  filt_3 = _mm256_broadcastsi128_si256(filt_03);
589
590
474k
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
591
474k
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
592
474k
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
593
474k
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
594
595
474k
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
596
474k
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
597
474k
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
598
474k
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
599
474k
}
600
601
static inline void prepare_vertical_filter_coeffs_gamma0_avx2(int delta, int sy,
602
1.85M
                                                              __m256i *coeffs) {
603
1.85M
  const __m128i filt_0 = _mm_loadu_si128(
604
1.85M
      (__m128i *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
605
1.85M
  const __m128i filt_1 = _mm_loadu_si128(
606
1.85M
      (__m128i *)(av1_warped_filter + ((sy + delta) >> WARPEDDIFF_PREC_BITS)));
607
608
1.85M
  __m256i res_0 =
609
1.85M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_0), filt_1, 0x1);
610
611
1.85M
  coeffs[0] = _mm256_shuffle_epi8(
612
1.85M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask0_avx2));
613
1.85M
  coeffs[1] = _mm256_shuffle_epi8(
614
1.85M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask1_avx2));
615
1.85M
  coeffs[2] = _mm256_shuffle_epi8(
616
1.85M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask2_avx2));
617
1.85M
  coeffs[3] = _mm256_shuffle_epi8(
618
1.85M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask3_avx2));
619
620
1.85M
  coeffs[4] = coeffs[0];
621
1.85M
  coeffs[5] = coeffs[1];
622
1.85M
  coeffs[6] = coeffs[2];
623
1.85M
  coeffs[7] = coeffs[3];
624
1.85M
}
625
626
static inline void filter_src_pixels_vertical_avx2(__m256i *horz_out,
627
                                                   __m256i *src,
628
                                                   __m256i *coeffs,
629
                                                   __m256i *res_lo,
630
7.98M
                                                   __m256i *res_hi, int row) {
631
7.98M
  const __m256i src_6 = horz_out[row + 3];
632
7.98M
  const __m256i src_7 =
633
7.98M
      _mm256_permute2x128_si256(horz_out[row + 3], horz_out[row + 4], 0x21);
634
635
7.98M
  src[6] = _mm256_unpacklo_epi16(src_6, src_7);
636
637
7.98M
  const __m256i res_0 = _mm256_madd_epi16(src[0], coeffs[0]);
638
7.98M
  const __m256i res_2 = _mm256_madd_epi16(src[2], coeffs[1]);
639
7.98M
  const __m256i res_4 = _mm256_madd_epi16(src[4], coeffs[2]);
640
7.98M
  const __m256i res_6 = _mm256_madd_epi16(src[6], coeffs[3]);
641
642
7.98M
  const __m256i res_even = _mm256_add_epi32(_mm256_add_epi32(res_0, res_2),
643
7.98M
                                            _mm256_add_epi32(res_4, res_6));
644
645
7.98M
  src[7] = _mm256_unpackhi_epi16(src_6, src_7);
646
647
7.98M
  const __m256i res_1 = _mm256_madd_epi16(src[1], coeffs[4]);
648
7.98M
  const __m256i res_3 = _mm256_madd_epi16(src[3], coeffs[5]);
649
7.98M
  const __m256i res_5 = _mm256_madd_epi16(src[5], coeffs[6]);
650
7.98M
  const __m256i res_7 = _mm256_madd_epi16(src[7], coeffs[7]);
651
652
7.98M
  const __m256i res_odd = _mm256_add_epi32(_mm256_add_epi32(res_1, res_3),
653
7.98M
                                           _mm256_add_epi32(res_5, res_7));
654
655
  // Rearrange pixels back into the order 0 ... 7
656
7.98M
  *res_lo = _mm256_unpacklo_epi32(res_even, res_odd);
657
7.98M
  *res_hi = _mm256_unpackhi_epi32(res_even, res_odd);
658
7.98M
}
659
660
static inline void store_vertical_filter_output_avx2(
661
    const __m256i *res_lo, const __m256i *res_hi, const __m256i *res_add_const,
662
    const __m256i *wt, const __m256i *res_sub_const,
663
    const __m256i *round_bits_const, uint8_t *pred, ConvolveParams *conv_params,
664
    int i, int j, int k, const int reduce_bits_vert, int p_stride, int p_width,
665
7.98M
    const int round_bits) {
666
7.98M
  __m256i res_lo_1 = *res_lo;
667
7.98M
  __m256i res_hi_1 = *res_hi;
668
669
7.98M
  if (conv_params->is_compound) {
670
231k
    __m128i *const p_0 =
671
231k
        (__m128i *)&conv_params->dst[(i + k + 4) * conv_params->dst_stride + j];
672
231k
    __m128i *const p_1 =
673
231k
        (__m128i *)&conv_params
674
231k
            ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j];
675
676
231k
    res_lo_1 = _mm256_srai_epi32(_mm256_add_epi32(res_lo_1, *res_add_const),
677
231k
                                 reduce_bits_vert);
678
679
231k
    const __m256i temp_lo_16 = _mm256_packus_epi32(res_lo_1, res_lo_1);
680
231k
    __m256i res_lo_16;
681
231k
    if (conv_params->do_average) {
682
106k
      __m128i *const dst8_0 = (__m128i *)&pred[(i + k + 4) * p_stride + j];
683
106k
      __m128i *const dst8_1 =
684
106k
          (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
685
106k
      const __m128i p_16_0 = _mm_loadl_epi64(p_0);
686
106k
      const __m128i p_16_1 = _mm_loadl_epi64(p_1);
687
106k
      const __m256i p_16 =
688
106k
          _mm256_inserti128_si256(_mm256_castsi128_si256(p_16_0), p_16_1, 1);
689
106k
      if (conv_params->use_dist_wtd_comp_avg) {
690
47.1k
        const __m256i p_16_lo = _mm256_unpacklo_epi16(p_16, temp_lo_16);
691
47.1k
        const __m256i wt_res_lo = _mm256_madd_epi16(p_16_lo, *wt);
692
47.1k
        const __m256i shifted_32 =
693
47.1k
            _mm256_srai_epi32(wt_res_lo, DIST_PRECISION_BITS);
694
47.1k
        res_lo_16 = _mm256_packus_epi32(shifted_32, shifted_32);
695
59.7k
      } else {
696
59.7k
        res_lo_16 = _mm256_srai_epi16(_mm256_add_epi16(p_16, temp_lo_16), 1);
697
59.7k
      }
698
106k
      res_lo_16 = _mm256_add_epi16(res_lo_16, *res_sub_const);
699
106k
      res_lo_16 = _mm256_srai_epi16(
700
106k
          _mm256_add_epi16(res_lo_16, *round_bits_const), round_bits);
701
106k
      const __m256i res_8_lo = _mm256_packus_epi16(res_lo_16, res_lo_16);
702
106k
      const __m128i res_8_lo_0 = _mm256_castsi256_si128(res_8_lo);
703
106k
      const __m128i res_8_lo_1 = _mm256_extracti128_si256(res_8_lo, 1);
704
106k
      *(int *)dst8_0 = _mm_cvtsi128_si32(res_8_lo_0);
705
106k
      *(int *)dst8_1 = _mm_cvtsi128_si32(res_8_lo_1);
706
124k
    } else {
707
124k
      const __m128i temp_lo_16_0 = _mm256_castsi256_si128(temp_lo_16);
708
124k
      const __m128i temp_lo_16_1 = _mm256_extracti128_si256(temp_lo_16, 1);
709
124k
      _mm_storel_epi64(p_0, temp_lo_16_0);
710
124k
      _mm_storel_epi64(p_1, temp_lo_16_1);
711
124k
    }
712
231k
    if (p_width > 4) {
713
231k
      __m128i *const p4_0 =
714
231k
          (__m128i *)&conv_params
715
231k
              ->dst[(i + k + 4) * conv_params->dst_stride + j + 4];
716
231k
      __m128i *const p4_1 =
717
231k
          (__m128i *)&conv_params
718
231k
              ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j + 4];
719
231k
      res_hi_1 = _mm256_srai_epi32(_mm256_add_epi32(res_hi_1, *res_add_const),
720
231k
                                   reduce_bits_vert);
721
231k
      const __m256i temp_hi_16 = _mm256_packus_epi32(res_hi_1, res_hi_1);
722
231k
      __m256i res_hi_16;
723
231k
      if (conv_params->do_average) {
724
106k
        __m128i *const dst8_4_0 =
725
106k
            (__m128i *)&pred[(i + k + 4) * p_stride + j + 4];
726
106k
        __m128i *const dst8_4_1 =
727
106k
            (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j + 4];
728
106k
        const __m128i p4_16_0 = _mm_loadl_epi64(p4_0);
729
106k
        const __m128i p4_16_1 = _mm_loadl_epi64(p4_1);
730
106k
        const __m256i p4_16 = _mm256_inserti128_si256(
731
106k
            _mm256_castsi128_si256(p4_16_0), p4_16_1, 1);
732
106k
        if (conv_params->use_dist_wtd_comp_avg) {
733
47.1k
          const __m256i p_16_hi = _mm256_unpacklo_epi16(p4_16, temp_hi_16);
734
47.1k
          const __m256i wt_res_hi = _mm256_madd_epi16(p_16_hi, *wt);
735
47.1k
          const __m256i shifted_32 =
736
47.1k
              _mm256_srai_epi32(wt_res_hi, DIST_PRECISION_BITS);
737
47.1k
          res_hi_16 = _mm256_packus_epi32(shifted_32, shifted_32);
738
59.6k
        } else {
739
59.6k
          res_hi_16 = _mm256_srai_epi16(_mm256_add_epi16(p4_16, temp_hi_16), 1);
740
59.6k
        }
741
106k
        res_hi_16 = _mm256_add_epi16(res_hi_16, *res_sub_const);
742
106k
        res_hi_16 = _mm256_srai_epi16(
743
106k
            _mm256_add_epi16(res_hi_16, *round_bits_const), round_bits);
744
106k
        __m256i res_8_hi = _mm256_packus_epi16(res_hi_16, res_hi_16);
745
106k
        const __m128i res_8_hi_0 = _mm256_castsi256_si128(res_8_hi);
746
106k
        const __m128i res_8_hi_1 = _mm256_extracti128_si256(res_8_hi, 1);
747
106k
        *(int *)dst8_4_0 = _mm_cvtsi128_si32(res_8_hi_0);
748
106k
        *(int *)dst8_4_1 = _mm_cvtsi128_si32(res_8_hi_1);
749
124k
      } else {
750
124k
        const __m128i temp_hi_16_0 = _mm256_castsi256_si128(temp_hi_16);
751
124k
        const __m128i temp_hi_16_1 = _mm256_extracti128_si256(temp_hi_16, 1);
752
124k
        _mm_storel_epi64(p4_0, temp_hi_16_0);
753
124k
        _mm_storel_epi64(p4_1, temp_hi_16_1);
754
124k
      }
755
231k
    }
756
7.75M
  } else {
757
7.75M
    const __m256i res_lo_round = _mm256_srai_epi32(
758
7.75M
        _mm256_add_epi32(res_lo_1, *res_add_const), reduce_bits_vert);
759
7.75M
    const __m256i res_hi_round = _mm256_srai_epi32(
760
7.75M
        _mm256_add_epi32(res_hi_1, *res_add_const), reduce_bits_vert);
761
762
7.75M
    const __m256i res_16bit = _mm256_packs_epi32(res_lo_round, res_hi_round);
763
7.75M
    const __m256i res_8bit = _mm256_packus_epi16(res_16bit, res_16bit);
764
7.75M
    const __m128i res_8bit0 = _mm256_castsi256_si128(res_8bit);
765
7.75M
    const __m128i res_8bit1 = _mm256_extracti128_si256(res_8bit, 1);
766
767
    // Store, blending with 'pred' if needed
768
7.75M
    __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
769
7.75M
    __m128i *const p1 = (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
770
771
7.75M
    if (p_width == 4) {
772
0
      *(int *)p = _mm_cvtsi128_si32(res_8bit0);
773
0
      *(int *)p1 = _mm_cvtsi128_si32(res_8bit1);
774
7.75M
    } else {
775
7.75M
      _mm_storel_epi64(p, res_8bit0);
776
7.75M
      _mm_storel_epi64(p1, res_8bit1);
777
7.75M
    }
778
7.75M
  }
779
7.98M
}
780
781
static inline void warp_vertical_filter_avx2(
782
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
783
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
784
    int i, int j, int sy4, const int reduce_bits_vert,
785
    const __m256i *res_add_const, const int round_bits,
786
    const __m256i *res_sub_const, const __m256i *round_bits_const,
787
812k
    const __m256i *wt) {
788
812k
  int k, row = 0;
789
812k
  __m256i src[8];
790
812k
  const __m256i src_0 = horz_out[0];
791
812k
  const __m256i src_1 =
792
812k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
793
812k
  const __m256i src_2 = horz_out[1];
794
812k
  const __m256i src_3 =
795
812k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
796
812k
  const __m256i src_4 = horz_out[2];
797
812k
  const __m256i src_5 =
798
812k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
799
800
812k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
801
812k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
802
812k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
803
804
812k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
805
812k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
806
812k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
807
808
4.04M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
809
3.22M
    int sy = sy4 + delta * (k + 4);
810
3.22M
    __m256i coeffs[8];
811
3.22M
    prepare_vertical_filter_coeffs_avx2(gamma, delta, sy, coeffs);
812
3.22M
    __m256i res_lo, res_hi;
813
3.22M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
814
3.22M
                                    row);
815
3.22M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
816
3.22M
                                      res_sub_const, round_bits_const, pred,
817
3.22M
                                      conv_params, i, j, k, reduce_bits_vert,
818
3.22M
                                      p_stride, p_width, round_bits);
819
3.22M
    src[0] = src[2];
820
3.22M
    src[2] = src[4];
821
3.22M
    src[4] = src[6];
822
3.22M
    src[1] = src[3];
823
3.22M
    src[3] = src[5];
824
3.22M
    src[5] = src[7];
825
826
3.22M
    row += 1;
827
3.22M
  }
828
812k
}
829
830
static inline void warp_vertical_filter_gamma0_avx2(
831
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
832
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
833
    int i, int j, int sy4, const int reduce_bits_vert,
834
    const __m256i *res_add_const, const int round_bits,
835
    const __m256i *res_sub_const, const __m256i *round_bits_const,
836
380k
    const __m256i *wt) {
837
380k
  (void)gamma;
838
380k
  int k, row = 0;
839
380k
  __m256i src[8];
840
380k
  const __m256i src_0 = horz_out[0];
841
380k
  const __m256i src_1 =
842
380k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
843
380k
  const __m256i src_2 = horz_out[1];
844
380k
  const __m256i src_3 =
845
380k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
846
380k
  const __m256i src_4 = horz_out[2];
847
380k
  const __m256i src_5 =
848
380k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
849
850
380k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
851
380k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
852
380k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
853
854
380k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
855
380k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
856
380k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
857
858
1.89M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
859
1.51M
    int sy = sy4 + delta * (k + 4);
860
1.51M
    __m256i coeffs[8];
861
1.51M
    prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy, coeffs);
862
1.51M
    __m256i res_lo, res_hi;
863
1.51M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
864
1.51M
                                    row);
865
1.51M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
866
1.51M
                                      res_sub_const, round_bits_const, pred,
867
1.51M
                                      conv_params, i, j, k, reduce_bits_vert,
868
1.51M
                                      p_stride, p_width, round_bits);
869
1.51M
    src[0] = src[2];
870
1.51M
    src[2] = src[4];
871
1.51M
    src[4] = src[6];
872
1.51M
    src[1] = src[3];
873
1.51M
    src[3] = src[5];
874
1.51M
    src[5] = src[7];
875
1.51M
    row += 1;
876
1.51M
  }
877
380k
}
878
879
static inline void warp_vertical_filter_delta0_avx2(
880
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
881
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
882
    int i, int j, int sy4, const int reduce_bits_vert,
883
    const __m256i *res_add_const, const int round_bits,
884
    const __m256i *res_sub_const, const __m256i *round_bits_const,
885
474k
    const __m256i *wt) {
886
474k
  (void)delta;
887
474k
  int k, row = 0;
888
474k
  __m256i src[8], coeffs[8];
889
474k
  const __m256i src_0 = horz_out[0];
890
474k
  const __m256i src_1 =
891
474k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
892
474k
  const __m256i src_2 = horz_out[1];
893
474k
  const __m256i src_3 =
894
474k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
895
474k
  const __m256i src_4 = horz_out[2];
896
474k
  const __m256i src_5 =
897
474k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
898
899
474k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
900
474k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
901
474k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
902
903
474k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
904
474k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
905
474k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
906
907
474k
  prepare_vertical_filter_coeffs_delta0_avx2(gamma, sy4, coeffs);
908
909
2.35M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
910
1.88M
    __m256i res_lo, res_hi;
911
1.88M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
912
1.88M
                                    row);
913
1.88M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
914
1.88M
                                      res_sub_const, round_bits_const, pred,
915
1.88M
                                      conv_params, i, j, k, reduce_bits_vert,
916
1.88M
                                      p_stride, p_width, round_bits);
917
1.88M
    src[0] = src[2];
918
1.88M
    src[2] = src[4];
919
1.88M
    src[4] = src[6];
920
1.88M
    src[1] = src[3];
921
1.88M
    src[3] = src[5];
922
1.88M
    src[5] = src[7];
923
1.88M
    row += 1;
924
1.88M
  }
925
474k
}
926
927
static inline void warp_vertical_filter_gamma0_delta0_avx2(
928
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
929
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
930
    int i, int j, int sy4, const int reduce_bits_vert,
931
    const __m256i *res_add_const, const int round_bits,
932
    const __m256i *res_sub_const, const __m256i *round_bits_const,
933
340k
    const __m256i *wt) {
934
340k
  (void)gamma;
935
340k
  int k, row = 0;
936
340k
  __m256i src[8], coeffs[8];
937
340k
  const __m256i src_0 = horz_out[0];
938
340k
  const __m256i src_1 =
939
340k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
940
340k
  const __m256i src_2 = horz_out[1];
941
340k
  const __m256i src_3 =
942
340k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
943
340k
  const __m256i src_4 = horz_out[2];
944
340k
  const __m256i src_5 =
945
340k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
946
947
340k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
948
340k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
949
340k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
950
951
340k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
952
340k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
953
340k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
954
955
340k
  prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy4, coeffs);
956
957
1.70M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
958
1.36M
    __m256i res_lo, res_hi;
959
1.36M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
960
1.36M
                                    row);
961
1.36M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
962
1.36M
                                      res_sub_const, round_bits_const, pred,
963
1.36M
                                      conv_params, i, j, k, reduce_bits_vert,
964
1.36M
                                      p_stride, p_width, round_bits);
965
1.36M
    src[0] = src[2];
966
1.36M
    src[2] = src[4];
967
1.36M
    src[4] = src[6];
968
1.36M
    src[1] = src[3];
969
1.36M
    src[3] = src[5];
970
1.36M
    src[5] = src[7];
971
1.36M
    row += 1;
972
1.36M
  }
973
340k
}
974
975
static inline void prepare_warp_vertical_filter_avx2(
976
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
977
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
978
    int i, int j, int sy4, const int reduce_bits_vert,
979
    const __m256i *res_add_const, const int round_bits,
980
    const __m256i *res_sub_const, const __m256i *round_bits_const,
981
2.00M
    const __m256i *wt) {
982
2.00M
  if (gamma == 0 && delta == 0)
983
340k
    warp_vertical_filter_gamma0_delta0_avx2(
984
340k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
985
340k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
986
340k
        round_bits_const, wt);
987
1.66M
  else if (gamma == 0 && delta != 0)
988
379k
    warp_vertical_filter_gamma0_avx2(
989
379k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
990
379k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
991
379k
        round_bits_const, wt);
992
1.28M
  else if (gamma != 0 && delta == 0)
993
474k
    warp_vertical_filter_delta0_avx2(
994
474k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
995
474k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
996
474k
        round_bits_const, wt);
997
811k
  else
998
811k
    warp_vertical_filter_avx2(pred, horz_out, conv_params, gamma, delta,
999
811k
                              p_height, p_stride, p_width, i, j, sy4,
1000
811k
                              reduce_bits_vert, res_add_const, round_bits,
1001
811k
                              res_sub_const, round_bits_const, wt);
1002
2.00M
}
1003
1004
static inline void prepare_warp_horizontal_filter_avx2(
1005
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
1006
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
1007
    const __m256i *round_const, const __m128i *shift,
1008
1.79M
    const __m256i *shuffle_src) {
1009
1.79M
  if (alpha == 0 && beta == 0)
1010
676k
    warp_horizontal_filter_alpha0_beta0_avx2(
1011
676k
        ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, i,
1012
676k
        round_const, shift, shuffle_src);
1013
1.12M
  else if (alpha == 0 && beta != 0)
1014
349k
    warp_horizontal_filter_alpha0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1015
349k
                                       alpha, beta, p_height, height, i,
1016
349k
                                       round_const, shift, shuffle_src);
1017
773k
  else if (alpha != 0 && beta == 0)
1018
411k
    warp_horizontal_filter_beta0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1019
411k
                                      alpha, beta, p_height, height, i,
1020
411k
                                      round_const, shift, shuffle_src);
1021
362k
  else
1022
362k
    warp_horizontal_filter_avx2(ref, horz_out, stride, ix4, iy4, sx4, alpha,
1023
362k
                                beta, p_height, height, i, round_const, shift,
1024
362k
                                shuffle_src);
1025
1.79M
}
1026
1027
void av1_warp_affine_avx2(const int32_t *mat, const uint8_t *ref, int width,
1028
                          int height, int stride, uint8_t *pred, int p_col,
1029
                          int p_row, int p_width, int p_height, int p_stride,
1030
                          int subsampling_x, int subsampling_y,
1031
                          ConvolveParams *conv_params, int16_t alpha,
1032
213k
                          int16_t beta, int16_t gamma, int16_t delta) {
1033
213k
  __m256i horz_out[8];
1034
213k
  int i, j, k;
1035
213k
  const int bd = 8;
1036
213k
  const int reduce_bits_horiz = conv_params->round_0;
1037
213k
  const int reduce_bits_vert = conv_params->is_compound
1038
213k
                                   ? conv_params->round_1
1039
213k
                                   : 2 * FILTER_BITS - reduce_bits_horiz;
1040
213k
  const int offset_bits_horiz = bd + FILTER_BITS - 1;
1041
213k
  assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL));
1042
1043
213k
  const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz;
1044
213k
  const __m256i reduce_bits_vert_const =
1045
213k
      _mm256_set1_epi32(((1 << reduce_bits_vert) >> 1));
1046
213k
  const __m256i res_add_const = _mm256_set1_epi32(1 << offset_bits_vert);
1047
213k
  const int round_bits =
1048
213k
      2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
1049
213k
  const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
1050
213k
  assert(IMPLIES(conv_params->do_average, conv_params->is_compound));
1051
1052
213k
  const __m256i round_const = _mm256_set1_epi16(
1053
213k
      (1 << offset_bits_horiz) + ((1 << reduce_bits_horiz) >> 1));
1054
213k
  const __m128i shift = _mm_cvtsi32_si128(reduce_bits_horiz);
1055
1056
213k
  __m256i res_sub_const, round_bits_const, wt;
1057
213k
  unpack_weights_and_set_round_const_avx2(conv_params, round_bits, offset_bits,
1058
213k
                                          &res_sub_const, &round_bits_const,
1059
213k
                                          &wt);
1060
1061
213k
  __m256i res_add_const_1;
1062
213k
  if (conv_params->is_compound == 1) {
1063
9.58k
    res_add_const_1 = _mm256_add_epi32(reduce_bits_vert_const, res_add_const);
1064
203k
  } else {
1065
203k
    res_add_const_1 = _mm256_set1_epi32(-(1 << (bd + reduce_bits_vert - 1)) +
1066
203k
                                        ((1 << reduce_bits_vert) >> 1));
1067
203k
  }
1068
213k
  const int32_t const1 = alpha * (-4) + beta * (-4) +
1069
213k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1070
213k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1071
213k
  const int32_t const2 = gamma * (-4) + delta * (-4) +
1072
213k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1073
213k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1074
213k
  const int32_t const3 = ((1 << WARP_PARAM_REDUCE_BITS) - 1);
1075
213k
  const int16_t const4 = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1));
1076
213k
  const int16_t const5 = (1 << (FILTER_BITS - reduce_bits_horiz));
1077
1078
213k
  __m256i shuffle_src[4];
1079
213k
  shuffle_src[0] = _mm256_load_si256((__m256i *)shuffle_src0);
1080
213k
  shuffle_src[1] = _mm256_load_si256((__m256i *)shuffle_src1);
1081
213k
  shuffle_src[2] = _mm256_load_si256((__m256i *)shuffle_src2);
1082
213k
  shuffle_src[3] = _mm256_load_si256((__m256i *)shuffle_src3);
1083
1084
682k
  for (i = 0; i < p_height; i += 8) {
1085
2.47M
    for (j = 0; j < p_width; j += 8) {
1086
2.00M
      const int32_t src_x = (p_col + j + 4) << subsampling_x;
1087
2.00M
      const int32_t src_y = (p_row + i + 4) << subsampling_y;
1088
2.00M
      const int64_t dst_x =
1089
2.00M
          (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
1090
2.00M
      const int64_t dst_y =
1091
2.00M
          (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
1092
2.00M
      const int64_t x4 = dst_x >> subsampling_x;
1093
2.00M
      const int64_t y4 = dst_y >> subsampling_y;
1094
1095
2.00M
      int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
1096
2.00M
      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1097
2.00M
      int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
1098
2.00M
      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1099
1100
      // Add in all the constant terms, including rounding and offset
1101
2.00M
      sx4 += const1;
1102
2.00M
      sy4 += const2;
1103
1104
2.00M
      sx4 &= ~const3;
1105
2.00M
      sy4 &= ~const3;
1106
1107
      // Horizontal filter
1108
      // If the block is aligned such that, after clamping, every sample
1109
      // would be taken from the leftmost/rightmost column, then we can
1110
      // skip the expensive horizontal filter.
1111
1112
2.00M
      if (ix4 <= -7) {
1113
70.0k
        int iy, row = 0;
1114
560k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1115
490k
          iy = iy4 + k;
1116
490k
          iy = clamp(iy, 0, height - 1);
1117
490k
          const __m256i temp_0 =
1118
490k
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1119
490k
          iy = iy4 + k + 1;
1120
490k
          iy = clamp(iy, 0, height - 1);
1121
490k
          const __m256i temp_1 =
1122
490k
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1123
490k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1124
490k
          row += 1;
1125
490k
        }
1126
70.0k
        iy = iy4 + k;
1127
70.0k
        iy = clamp(iy, 0, height - 1);
1128
70.0k
        horz_out[row] = _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1129
1.93M
      } else if (ix4 >= width + 6) {
1130
63.9k
        int iy, row = 0;
1131
511k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1132
447k
          iy = iy4 + k;
1133
447k
          iy = clamp(iy, 0, height - 1);
1134
447k
          const __m256i temp_0 = _mm256_set1_epi16(
1135
447k
              const4 + ref[iy * stride + (width - 1)] * const5);
1136
447k
          iy = iy4 + k + 1;
1137
447k
          iy = clamp(iy, 0, height - 1);
1138
447k
          const __m256i temp_1 = _mm256_set1_epi16(
1139
447k
              const4 + ref[iy * stride + (width - 1)] * const5);
1140
447k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1141
447k
          row += 1;
1142
447k
        }
1143
63.9k
        iy = iy4 + k;
1144
63.9k
        iy = clamp(iy, 0, height - 1);
1145
63.9k
        horz_out[row] =
1146
63.9k
            _mm256_set1_epi16(const4 + ref[iy * stride + (width - 1)] * const5);
1147
1.87M
      } else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) {
1148
82.8k
        const int out_of_boundary_left = -(ix4 - 6);
1149
82.8k
        const int out_of_boundary_right = (ix4 + 8) - width;
1150
82.8k
        int iy, sx, row = 0;
1151
648k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1152
565k
          iy = iy4 + k;
1153
565k
          iy = clamp(iy, 0, height - 1);
1154
565k
          __m128i src0 =
1155
565k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1156
565k
          iy = iy4 + k + 1;
1157
565k
          iy = clamp(iy, 0, height - 1);
1158
565k
          __m128i src1 =
1159
565k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1160
1161
565k
          if (out_of_boundary_left >= 0) {
1162
321k
            const __m128i shuffle_reg_left =
1163
321k
                _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1164
321k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_left);
1165
321k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_left);
1166
321k
          }
1167
565k
          if (out_of_boundary_right >= 0) {
1168
278k
            const __m128i shuffle_reg_right = _mm_loadu_si128(
1169
278k
                (__m128i *)warp_pad_right[out_of_boundary_right]);
1170
278k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_right);
1171
278k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_right);
1172
278k
          }
1173
565k
          sx = sx4 + beta * (k + 4);
1174
565k
          const __m256i src_01 =
1175
565k
              _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
1176
565k
          horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row,
1177
565k
                                 shuffle_src, &round_const, &shift);
1178
565k
          row += 1;
1179
565k
        }
1180
82.8k
        iy = iy4 + k;
1181
82.8k
        iy = clamp(iy, 0, height - 1);
1182
82.8k
        __m128i src = _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1183
82.8k
        if (out_of_boundary_left >= 0) {
1184
45.8k
          const __m128i shuffle_reg_left =
1185
45.8k
              _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1186
45.8k
          src = _mm_shuffle_epi8(src, shuffle_reg_left);
1187
45.8k
        }
1188
82.8k
        if (out_of_boundary_right >= 0) {
1189
39.7k
          const __m128i shuffle_reg_right =
1190
39.7k
              _mm_loadu_si128((__m128i *)warp_pad_right[out_of_boundary_right]);
1191
39.7k
          src = _mm_shuffle_epi8(src, shuffle_reg_right);
1192
39.7k
        }
1193
82.8k
        sx = sx4 + beta * (k + 4);
1194
82.8k
        const __m256i src_01 = _mm256_castsi128_si256(src);
1195
82.8k
        __m256i coeff[4];
1196
82.8k
        prepare_horizontal_filter_coeff(alpha, sx, coeff);
1197
82.8k
        filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src,
1198
82.8k
                               &round_const, &shift, row);
1199
1.79M
      } else {
1200
1.79M
        prepare_warp_horizontal_filter_avx2(
1201
1.79M
            ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height,
1202
1.79M
            i, &round_const, &shift, shuffle_src);
1203
1.79M
      }
1204
1205
      // Vertical filter
1206
2.00M
      prepare_warp_vertical_filter_avx2(
1207
2.00M
          pred, horz_out, conv_params, gamma, delta, p_height, p_stride,
1208
2.00M
          p_width, i, j, sy4, reduce_bits_vert, &res_add_const_1, round_bits,
1209
2.00M
          &res_sub_const, &round_bits_const, &wt);
1210
2.00M
    }
1211
468k
  }
1212
213k
}
1213
1214
#endif  // !CONFIG_HIGHWAY