Coverage Report

Created: 2025-12-31 06:49

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/av1/common/x86/warp_plane_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2019, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
#include "config/av1_rtcd.h"
14
#include "av1/common/warped_motion.h"
15
#include "aom_dsp/x86/synonyms.h"
16
17
#if !CONFIG_HIGHWAY
18
19
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask01_avx2[32]) = {
20
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
21
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
22
};
23
24
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask23_avx2[32]) = {
25
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
26
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3
27
};
28
29
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask45_avx2[32]) = {
30
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
31
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5
32
};
33
34
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask67_avx2[32]) = {
35
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
36
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7
37
};
38
39
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask0_avx2[32]) = {
40
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
41
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
42
};
43
44
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask1_avx2[32]) = {
45
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7,
46
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7
47
};
48
49
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask2_avx2[32]) = {
50
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11,
51
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11
52
};
53
54
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask3_avx2[32]) = {
55
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15,
56
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15
57
};
58
59
DECLARE_ALIGNED(32, static const uint8_t,
60
                shuffle_src0[32]) = { 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3,
61
                                      5, 5, 7, 7, 9, 0, 2, 2, 4, 4, 6,
62
                                      6, 8, 1, 3, 3, 5, 5, 7, 7, 9 };
63
64
DECLARE_ALIGNED(32, static const uint8_t,
65
                shuffle_src1[32]) = { 4,  6,  6,  8,  8,  10, 10, 12, 5,  7, 7,
66
                                      9,  9,  11, 11, 13, 4,  6,  6,  8,  8, 10,
67
                                      10, 12, 5,  7,  7,  9,  9,  11, 11, 13 };
68
69
DECLARE_ALIGNED(32, static const uint8_t,
70
                shuffle_src2[32]) = { 1, 3, 3, 5, 5,  7, 7, 9, 2, 4, 4,
71
                                      6, 6, 8, 8, 10, 1, 3, 3, 5, 5, 7,
72
                                      7, 9, 2, 4, 4,  6, 6, 8, 8, 10 };
73
74
DECLARE_ALIGNED(32, static const uint8_t,
75
                shuffle_src3[32]) = { 5,  7,  7,  9,  9,  11, 11, 13, 6,  8, 8,
76
                                      10, 10, 12, 12, 14, 5,  7,  7,  9,  9, 11,
77
                                      11, 13, 6,  8,  8,  10, 10, 12, 12, 14 };
78
79
static inline void filter_src_pixels_avx2(const __m256i src, __m256i *horz_out,
80
                                          __m256i *coeff,
81
                                          const __m256i *shuffle_src,
82
                                          const __m256i *round_const,
83
6.55M
                                          const __m128i *shift, int row) {
84
6.55M
  const __m256i src_0 = _mm256_shuffle_epi8(src, shuffle_src[0]);
85
6.55M
  const __m256i src_1 = _mm256_shuffle_epi8(src, shuffle_src[1]);
86
6.55M
  const __m256i src_2 = _mm256_shuffle_epi8(src, shuffle_src[2]);
87
6.55M
  const __m256i src_3 = _mm256_shuffle_epi8(src, shuffle_src[3]);
88
89
6.55M
  const __m256i res_02 = _mm256_maddubs_epi16(src_0, coeff[0]);
90
6.55M
  const __m256i res_46 = _mm256_maddubs_epi16(src_1, coeff[1]);
91
6.55M
  const __m256i res_13 = _mm256_maddubs_epi16(src_2, coeff[2]);
92
6.55M
  const __m256i res_57 = _mm256_maddubs_epi16(src_3, coeff[3]);
93
94
6.55M
  const __m256i res_even = _mm256_add_epi16(res_02, res_46);
95
6.55M
  const __m256i res_odd = _mm256_add_epi16(res_13, res_57);
96
6.55M
  const __m256i res =
97
6.55M
      _mm256_add_epi16(_mm256_add_epi16(res_even, res_odd), *round_const);
98
6.55M
  horz_out[row] = _mm256_srl_epi16(res, *shift);
99
6.55M
}
100
101
static inline void prepare_horizontal_filter_coeff_avx2(int alpha, int beta,
102
                                                        int sx,
103
1.55M
                                                        __m256i *coeff) {
104
1.55M
  __m128i tmp_0 = _mm_loadl_epi64(
105
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 0 * alpha)) >>
106
1.55M
                                  WARPEDDIFF_PREC_BITS]);
107
1.55M
  __m128i tmp_1 = _mm_loadl_epi64(
108
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 1 * alpha)) >>
109
1.55M
                                  WARPEDDIFF_PREC_BITS]);
110
1.55M
  __m128i tmp_2 = _mm_loadl_epi64(
111
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 2 * alpha)) >>
112
1.55M
                                  WARPEDDIFF_PREC_BITS]);
113
1.55M
  __m128i tmp_3 = _mm_loadl_epi64(
114
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 3 * alpha)) >>
115
1.55M
                                  WARPEDDIFF_PREC_BITS]);
116
117
1.55M
  __m128i tmp_4 = _mm_loadl_epi64(
118
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 4 * alpha)) >>
119
1.55M
                                  WARPEDDIFF_PREC_BITS]);
120
1.55M
  __m128i tmp_5 = _mm_loadl_epi64(
121
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 5 * alpha)) >>
122
1.55M
                                  WARPEDDIFF_PREC_BITS]);
123
1.55M
  __m128i tmp_6 = _mm_loadl_epi64(
124
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 6 * alpha)) >>
125
1.55M
                                  WARPEDDIFF_PREC_BITS]);
126
1.55M
  __m128i tmp_7 = _mm_loadl_epi64(
127
1.55M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 7 * alpha)) >>
128
1.55M
                                  WARPEDDIFF_PREC_BITS]);
129
130
1.55M
  __m256i tmp0_256 = _mm256_castsi128_si256(tmp_0);
131
1.55M
  __m256i tmp2_256 = _mm256_castsi128_si256(tmp_2);
132
1.55M
  __m256i tmp1_256 = _mm256_castsi128_si256(tmp_1);
133
1.55M
  __m256i tmp3_256 = _mm256_castsi128_si256(tmp_3);
134
135
1.55M
  __m256i tmp4_256 = _mm256_castsi128_si256(tmp_4);
136
1.55M
  __m256i tmp6_256 = _mm256_castsi128_si256(tmp_6);
137
1.55M
  __m256i tmp5_256 = _mm256_castsi128_si256(tmp_5);
138
1.55M
  __m256i tmp7_256 = _mm256_castsi128_si256(tmp_7);
139
140
1.55M
  __m128i tmp_8 = _mm_loadl_epi64(
141
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 0 * alpha) >>
142
1.55M
                                  WARPEDDIFF_PREC_BITS]);
143
1.55M
  tmp0_256 = _mm256_inserti128_si256(tmp0_256, tmp_8, 1);
144
145
1.55M
  __m128i tmp_9 = _mm_loadl_epi64(
146
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 1 * alpha) >>
147
1.55M
                                  WARPEDDIFF_PREC_BITS]);
148
1.55M
  tmp1_256 = _mm256_inserti128_si256(tmp1_256, tmp_9, 1);
149
150
1.55M
  __m128i tmp_10 = _mm_loadl_epi64(
151
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 2 * alpha) >>
152
1.55M
                                  WARPEDDIFF_PREC_BITS]);
153
1.55M
  tmp2_256 = _mm256_inserti128_si256(tmp2_256, tmp_10, 1);
154
155
1.55M
  __m128i tmp_11 = _mm_loadl_epi64(
156
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 3 * alpha) >>
157
1.55M
                                  WARPEDDIFF_PREC_BITS]);
158
1.55M
  tmp3_256 = _mm256_inserti128_si256(tmp3_256, tmp_11, 1);
159
160
1.55M
  tmp_2 = _mm_loadl_epi64(
161
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 4 * alpha) >>
162
1.55M
                                  WARPEDDIFF_PREC_BITS]);
163
1.55M
  tmp4_256 = _mm256_inserti128_si256(tmp4_256, tmp_2, 1);
164
165
1.55M
  tmp_3 = _mm_loadl_epi64(
166
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 5 * alpha) >>
167
1.55M
                                  WARPEDDIFF_PREC_BITS]);
168
1.55M
  tmp5_256 = _mm256_inserti128_si256(tmp5_256, tmp_3, 1);
169
170
1.55M
  tmp_6 = _mm_loadl_epi64(
171
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 6 * alpha) >>
172
1.55M
                                  WARPEDDIFF_PREC_BITS]);
173
1.55M
  tmp6_256 = _mm256_inserti128_si256(tmp6_256, tmp_6, 1);
174
175
1.55M
  tmp_7 = _mm_loadl_epi64(
176
1.55M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 7 * alpha) >>
177
1.55M
                                  WARPEDDIFF_PREC_BITS]);
178
1.55M
  tmp7_256 = _mm256_inserti128_si256(tmp7_256, tmp_7, 1);
179
180
1.55M
  const __m256i tmp_12 = _mm256_unpacklo_epi16(tmp0_256, tmp2_256);
181
1.55M
  const __m256i tmp_13 = _mm256_unpacklo_epi16(tmp1_256, tmp3_256);
182
1.55M
  const __m256i tmp_14 = _mm256_unpacklo_epi16(tmp4_256, tmp6_256);
183
1.55M
  const __m256i tmp_15 = _mm256_unpacklo_epi16(tmp5_256, tmp7_256);
184
185
1.55M
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
186
1.55M
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
187
1.55M
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
188
1.55M
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
189
190
1.55M
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
191
1.55M
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
192
1.55M
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
193
1.55M
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
194
1.55M
}
195
196
static inline void prepare_horizontal_filter_coeff_beta0_avx2(int alpha, int sx,
197
212k
                                                              __m256i *coeff) {
198
212k
  __m128i tmp_0 = _mm_loadl_epi64(
199
212k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
200
212k
  __m128i tmp_1 = _mm_loadl_epi64(
201
212k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
202
212k
  __m128i tmp_2 = _mm_loadl_epi64(
203
212k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
204
212k
  __m128i tmp_3 = _mm_loadl_epi64(
205
212k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
206
212k
  __m128i tmp_4 = _mm_loadl_epi64(
207
212k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
208
212k
  __m128i tmp_5 = _mm_loadl_epi64(
209
212k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
210
212k
  __m128i tmp_6 = _mm_loadl_epi64(
211
212k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
212
212k
  __m128i tmp_7 = _mm_loadl_epi64(
213
212k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
214
215
212k
  tmp_0 = _mm_unpacklo_epi16(tmp_0, tmp_2);
216
212k
  tmp_1 = _mm_unpacklo_epi16(tmp_1, tmp_3);
217
212k
  tmp_4 = _mm_unpacklo_epi16(tmp_4, tmp_6);
218
212k
  tmp_5 = _mm_unpacklo_epi16(tmp_5, tmp_7);
219
220
212k
  const __m256i tmp_12 = _mm256_broadcastsi128_si256(tmp_0);
221
212k
  const __m256i tmp_13 = _mm256_broadcastsi128_si256(tmp_1);
222
212k
  const __m256i tmp_14 = _mm256_broadcastsi128_si256(tmp_4);
223
212k
  const __m256i tmp_15 = _mm256_broadcastsi128_si256(tmp_5);
224
225
212k
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
226
212k
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
227
212k
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
228
212k
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
229
230
212k
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
231
212k
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
232
212k
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
233
212k
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
234
212k
}
235
236
static inline void prepare_horizontal_filter_coeff_alpha0_avx2(int beta, int sx,
237
1.20M
                                                               __m256i *coeff) {
238
1.20M
  const __m128i tmp_0 =
239
1.20M
      _mm_loadl_epi64((__m128i *)&av1_filter_8bit[sx >> WARPEDDIFF_PREC_BITS]);
240
1.20M
  const __m128i tmp_1 = _mm_loadl_epi64(
241
1.20M
      (__m128i *)&av1_filter_8bit[(sx + beta) >> WARPEDDIFF_PREC_BITS]);
242
243
1.20M
  const __m256i res_0 =
244
1.20M
      _mm256_inserti128_si256(_mm256_castsi128_si256(tmp_0), tmp_1, 0x1);
245
246
1.20M
  coeff[0] = _mm256_shuffle_epi8(
247
1.20M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask01_avx2));
248
1.20M
  coeff[1] = _mm256_shuffle_epi8(
249
1.20M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask23_avx2));
250
1.20M
  coeff[2] = _mm256_shuffle_epi8(
251
1.20M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask45_avx2));
252
1.20M
  coeff[3] = _mm256_shuffle_epi8(
253
1.20M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask67_avx2));
254
1.20M
}
255
256
static inline void horizontal_filter_avx2(const __m256i src, __m256i *horz_out,
257
                                          int sx, int alpha, int beta, int row,
258
                                          const __m256i *shuffle_src,
259
                                          const __m256i *round_const,
260
1.55M
                                          const __m128i *shift) {
261
1.55M
  __m256i coeff[4];
262
1.55M
  prepare_horizontal_filter_coeff_avx2(alpha, beta, sx, coeff);
263
1.55M
  filter_src_pixels_avx2(src, horz_out, coeff, shuffle_src, round_const, shift,
264
1.55M
                         row);
265
1.55M
}
266
static inline void prepare_horizontal_filter_coeff(int alpha, int sx,
267
224k
                                                   __m256i *coeff) {
268
224k
  const __m128i tmp_0 = _mm_loadl_epi64(
269
224k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
270
224k
  const __m128i tmp_1 = _mm_loadl_epi64(
271
224k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
272
224k
  const __m128i tmp_2 = _mm_loadl_epi64(
273
224k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
274
224k
  const __m128i tmp_3 = _mm_loadl_epi64(
275
224k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
276
224k
  const __m128i tmp_4 = _mm_loadl_epi64(
277
224k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
278
224k
  const __m128i tmp_5 = _mm_loadl_epi64(
279
224k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
280
224k
  const __m128i tmp_6 = _mm_loadl_epi64(
281
224k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
282
224k
  const __m128i tmp_7 = _mm_loadl_epi64(
283
224k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
284
285
224k
  const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2);
286
224k
  const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3);
287
224k
  const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6);
288
224k
  const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7);
289
290
224k
  const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10);
291
224k
  const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10);
292
224k
  const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11);
293
224k
  const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11);
294
295
224k
  coeff[0] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_12, tmp_14));
296
224k
  coeff[1] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_12, tmp_14));
297
224k
  coeff[2] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_13, tmp_15));
298
224k
  coeff[3] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_13, tmp_15));
299
224k
}
300
301
static inline void warp_horizontal_filter_avx2(
302
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
303
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
304
    const __m256i *round_const, const __m128i *shift,
305
196k
    const __m256i *shuffle_src) {
306
196k
  int k, iy, sx, row = 0;
307
196k
  __m256i coeff[4];
308
1.56M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
309
1.37M
    iy = iy4 + k;
310
1.37M
    iy = clamp(iy, 0, height - 1);
311
1.37M
    const __m128i src_0 =
312
1.37M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
313
1.37M
    iy = iy4 + k + 1;
314
1.37M
    iy = clamp(iy, 0, height - 1);
315
1.37M
    const __m128i src_1 =
316
1.37M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
317
1.37M
    const __m256i src_01 =
318
1.37M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
319
1.37M
    sx = sx4 + beta * (k + 4);
320
1.37M
    horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, shuffle_src,
321
1.37M
                           round_const, shift);
322
1.37M
    row += 1;
323
1.37M
  }
324
196k
  iy = iy4 + k;
325
196k
  iy = clamp(iy, 0, height - 1);
326
196k
  const __m256i src_01 = _mm256_castsi128_si256(
327
196k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
328
196k
  sx = sx4 + beta * (k + 4);
329
196k
  prepare_horizontal_filter_coeff(alpha, sx, coeff);
330
196k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
331
196k
                         shift, row);
332
196k
}
333
334
static inline void warp_horizontal_filter_alpha0_avx2(
335
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
336
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
337
    const __m256i *round_const, const __m128i *shift,
338
116k
    const __m256i *shuffle_src) {
339
116k
  (void)alpha;
340
116k
  int k, iy, sx, row = 0;
341
116k
  __m256i coeff[4];
342
927k
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
343
811k
    iy = iy4 + k;
344
811k
    iy = clamp(iy, 0, height - 1);
345
811k
    const __m128i src_0 =
346
811k
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
347
811k
    iy = iy4 + k + 1;
348
811k
    iy = clamp(iy, 0, height - 1);
349
811k
    const __m128i src_1 =
350
811k
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
351
811k
    const __m256i src_01 =
352
811k
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
353
811k
    sx = sx4 + beta * (k + 4);
354
811k
    prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
355
811k
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
356
811k
                           shift, row);
357
811k
    row += 1;
358
811k
  }
359
116k
  iy = iy4 + k;
360
116k
  iy = clamp(iy, 0, height - 1);
361
116k
  const __m256i src_01 = _mm256_castsi128_si256(
362
116k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
363
116k
  sx = sx4 + beta * (k + 4);
364
116k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
365
116k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
366
116k
                         shift, row);
367
116k
}
368
369
static inline void warp_horizontal_filter_beta0_avx2(
370
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
371
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
372
    const __m256i *round_const, const __m128i *shift,
373
212k
    const __m256i *shuffle_src) {
374
212k
  (void)beta;
375
212k
  int k, iy, row = 0;
376
212k
  __m256i coeff[4];
377
212k
  prepare_horizontal_filter_coeff_beta0_avx2(alpha, sx4, coeff);
378
1.69M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
379
1.47M
    iy = iy4 + k;
380
1.47M
    iy = clamp(iy, 0, height - 1);
381
1.47M
    const __m128i src_0 =
382
1.47M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
383
1.47M
    iy = iy4 + k + 1;
384
1.47M
    iy = clamp(iy, 0, height - 1);
385
1.47M
    const __m128i src_1 =
386
1.47M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
387
1.47M
    const __m256i src_01 =
388
1.47M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
389
1.47M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
390
1.47M
                           shift, row);
391
1.47M
    row += 1;
392
1.47M
  }
393
212k
  iy = iy4 + k;
394
212k
  iy = clamp(iy, 0, height - 1);
395
212k
  const __m256i src_01 = _mm256_castsi128_si256(
396
212k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
397
212k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
398
212k
                         shift, row);
399
212k
}
400
401
static inline void warp_horizontal_filter_alpha0_beta0_avx2(
402
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
403
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
404
    const __m256i *round_const, const __m128i *shift,
405
279k
    const __m256i *shuffle_src) {
406
279k
  (void)alpha;
407
279k
  int k, iy, row = 0;
408
279k
  __m256i coeff[4];
409
279k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx4, coeff);
410
2.22M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
411
1.94M
    iy = iy4 + k;
412
1.94M
    iy = clamp(iy, 0, height - 1);
413
1.94M
    const __m128i src0 =
414
1.94M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
415
1.94M
    iy = iy4 + k + 1;
416
1.94M
    iy = clamp(iy, 0, height - 1);
417
1.94M
    const __m128i src1 =
418
1.94M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
419
1.94M
    const __m256i src_01 =
420
1.94M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
421
1.94M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
422
1.94M
                           shift, row);
423
1.94M
    row += 1;
424
1.94M
  }
425
279k
  iy = iy4 + k;
426
279k
  iy = clamp(iy, 0, height - 1);
427
279k
  const __m256i src_01 = _mm256_castsi128_si256(
428
279k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
429
279k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
430
279k
                         shift, row);
431
279k
}
432
433
static inline void unpack_weights_and_set_round_const_avx2(
434
    ConvolveParams *conv_params, const int round_bits, const int offset_bits,
435
99.6k
    __m256i *res_sub_const, __m256i *round_bits_const, __m256i *wt) {
436
99.6k
  *res_sub_const =
437
99.6k
      _mm256_set1_epi16(-(1 << (offset_bits - conv_params->round_1)) -
438
99.6k
                        (1 << (offset_bits - conv_params->round_1 - 1)));
439
99.6k
  *round_bits_const = _mm256_set1_epi16(((1 << round_bits) >> 1));
440
441
99.6k
  const int w0 = conv_params->fwd_offset;
442
99.6k
  const int w1 = conv_params->bck_offset;
443
99.6k
  const __m256i wt0 = _mm256_set1_epi16((short)w0);
444
99.6k
  const __m256i wt1 = _mm256_set1_epi16((short)w1);
445
99.6k
  *wt = _mm256_unpacklo_epi16(wt0, wt1);
446
99.6k
}
447
448
static inline void prepare_vertical_filter_coeffs_avx2(int gamma, int delta,
449
                                                       int sy,
450
1.40M
                                                       __m256i *coeffs) {
451
1.40M
  __m128i filt_00 =
452
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
453
1.40M
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
454
1.40M
  __m128i filt_01 =
455
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
456
1.40M
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
457
1.40M
  __m128i filt_02 =
458
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
459
1.40M
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
460
1.40M
  __m128i filt_03 =
461
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
462
1.40M
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
463
464
1.40M
  __m128i filt_10 = _mm_loadu_si128(
465
1.40M
      (__m128i *)(av1_warped_filter +
466
1.40M
                  (((sy + delta) + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
467
1.40M
  __m128i filt_11 = _mm_loadu_si128(
468
1.40M
      (__m128i *)(av1_warped_filter +
469
1.40M
                  (((sy + delta) + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
470
1.40M
  __m128i filt_12 = _mm_loadu_si128(
471
1.40M
      (__m128i *)(av1_warped_filter +
472
1.40M
                  (((sy + delta) + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
473
1.40M
  __m128i filt_13 = _mm_loadu_si128(
474
1.40M
      (__m128i *)(av1_warped_filter +
475
1.40M
                  (((sy + delta) + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
476
477
1.40M
  __m256i filt_0 =
478
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
479
1.40M
  __m256i filt_1 =
480
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
481
1.40M
  __m256i filt_2 =
482
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
483
1.40M
  __m256i filt_3 =
484
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
485
486
1.40M
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
487
1.40M
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
488
1.40M
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
489
1.40M
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
490
491
1.40M
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
492
1.40M
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
493
1.40M
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
494
1.40M
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
495
496
1.40M
  filt_00 =
497
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
498
1.40M
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
499
1.40M
  filt_01 =
500
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
501
1.40M
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
502
1.40M
  filt_02 =
503
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
504
1.40M
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
505
1.40M
  filt_03 =
506
1.40M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
507
1.40M
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
508
509
1.40M
  filt_10 = _mm_loadu_si128(
510
1.40M
      (__m128i *)(av1_warped_filter +
511
1.40M
                  (((sy + delta) + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
512
1.40M
  filt_11 = _mm_loadu_si128(
513
1.40M
      (__m128i *)(av1_warped_filter +
514
1.40M
                  (((sy + delta) + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
515
1.40M
  filt_12 = _mm_loadu_si128(
516
1.40M
      (__m128i *)(av1_warped_filter +
517
1.40M
                  (((sy + delta) + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
518
1.40M
  filt_13 = _mm_loadu_si128(
519
1.40M
      (__m128i *)(av1_warped_filter +
520
1.40M
                  (((sy + delta) + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
521
522
1.40M
  filt_0 =
523
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
524
1.40M
  filt_1 =
525
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
526
1.40M
  filt_2 =
527
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
528
1.40M
  filt_3 =
529
1.40M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
530
531
1.40M
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
532
1.40M
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
533
1.40M
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
534
1.40M
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
535
536
1.40M
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
537
1.40M
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
538
1.40M
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
539
1.40M
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
540
1.40M
}
541
542
static inline void prepare_vertical_filter_coeffs_delta0_avx2(int gamma, int sy,
543
169k
                                                              __m256i *coeffs) {
544
169k
  __m128i filt_00 =
545
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
546
169k
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
547
169k
  __m128i filt_01 =
548
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
549
169k
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
550
169k
  __m128i filt_02 =
551
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
552
169k
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
553
169k
  __m128i filt_03 =
554
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
555
169k
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
556
557
169k
  __m256i filt_0 = _mm256_broadcastsi128_si256(filt_00);
558
169k
  __m256i filt_1 = _mm256_broadcastsi128_si256(filt_01);
559
169k
  __m256i filt_2 = _mm256_broadcastsi128_si256(filt_02);
560
169k
  __m256i filt_3 = _mm256_broadcastsi128_si256(filt_03);
561
562
169k
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
563
169k
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
564
169k
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
565
169k
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
566
567
169k
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
568
169k
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
569
169k
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
570
169k
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
571
572
169k
  filt_00 =
573
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
574
169k
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
575
169k
  filt_01 =
576
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
577
169k
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
578
169k
  filt_02 =
579
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
580
169k
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
581
169k
  filt_03 =
582
169k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
583
169k
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
584
585
169k
  filt_0 = _mm256_broadcastsi128_si256(filt_00);
586
169k
  filt_1 = _mm256_broadcastsi128_si256(filt_01);
587
169k
  filt_2 = _mm256_broadcastsi128_si256(filt_02);
588
169k
  filt_3 = _mm256_broadcastsi128_si256(filt_03);
589
590
169k
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
591
169k
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
592
169k
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
593
169k
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
594
595
169k
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
596
169k
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
597
169k
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
598
169k
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
599
169k
}
600
601
static inline void prepare_vertical_filter_coeffs_gamma0_avx2(int delta, int sy,
602
892k
                                                              __m256i *coeffs) {
603
892k
  const __m128i filt_0 = _mm_loadu_si128(
604
892k
      (__m128i *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
605
892k
  const __m128i filt_1 = _mm_loadu_si128(
606
892k
      (__m128i *)(av1_warped_filter + ((sy + delta) >> WARPEDDIFF_PREC_BITS)));
607
608
892k
  __m256i res_0 =
609
892k
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_0), filt_1, 0x1);
610
611
892k
  coeffs[0] = _mm256_shuffle_epi8(
612
892k
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask0_avx2));
613
892k
  coeffs[1] = _mm256_shuffle_epi8(
614
892k
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask1_avx2));
615
892k
  coeffs[2] = _mm256_shuffle_epi8(
616
892k
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask2_avx2));
617
892k
  coeffs[3] = _mm256_shuffle_epi8(
618
892k
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask3_avx2));
619
620
892k
  coeffs[4] = coeffs[0];
621
892k
  coeffs[5] = coeffs[1];
622
892k
  coeffs[6] = coeffs[2];
623
892k
  coeffs[7] = coeffs[3];
624
892k
}
625
626
static inline void filter_src_pixels_vertical_avx2(__m256i *horz_out,
627
                                                   __m256i *src,
628
                                                   __m256i *coeffs,
629
                                                   __m256i *res_lo,
630
3.46M
                                                   __m256i *res_hi, int row) {
631
3.46M
  const __m256i src_6 = horz_out[row + 3];
632
3.46M
  const __m256i src_7 =
633
3.46M
      _mm256_permute2x128_si256(horz_out[row + 3], horz_out[row + 4], 0x21);
634
635
3.46M
  src[6] = _mm256_unpacklo_epi16(src_6, src_7);
636
637
3.46M
  const __m256i res_0 = _mm256_madd_epi16(src[0], coeffs[0]);
638
3.46M
  const __m256i res_2 = _mm256_madd_epi16(src[2], coeffs[1]);
639
3.46M
  const __m256i res_4 = _mm256_madd_epi16(src[4], coeffs[2]);
640
3.46M
  const __m256i res_6 = _mm256_madd_epi16(src[6], coeffs[3]);
641
642
3.46M
  const __m256i res_even = _mm256_add_epi32(_mm256_add_epi32(res_0, res_2),
643
3.46M
                                            _mm256_add_epi32(res_4, res_6));
644
645
3.46M
  src[7] = _mm256_unpackhi_epi16(src_6, src_7);
646
647
3.46M
  const __m256i res_1 = _mm256_madd_epi16(src[1], coeffs[4]);
648
3.46M
  const __m256i res_3 = _mm256_madd_epi16(src[3], coeffs[5]);
649
3.46M
  const __m256i res_5 = _mm256_madd_epi16(src[5], coeffs[6]);
650
3.46M
  const __m256i res_7 = _mm256_madd_epi16(src[7], coeffs[7]);
651
652
3.46M
  const __m256i res_odd = _mm256_add_epi32(_mm256_add_epi32(res_1, res_3),
653
3.46M
                                           _mm256_add_epi32(res_5, res_7));
654
655
  // Rearrange pixels back into the order 0 ... 7
656
3.46M
  *res_lo = _mm256_unpacklo_epi32(res_even, res_odd);
657
3.46M
  *res_hi = _mm256_unpackhi_epi32(res_even, res_odd);
658
3.46M
}
659
660
static inline void store_vertical_filter_output_avx2(
661
    const __m256i *res_lo, const __m256i *res_hi, const __m256i *res_add_const,
662
    const __m256i *wt, const __m256i *res_sub_const,
663
    const __m256i *round_bits_const, uint8_t *pred, ConvolveParams *conv_params,
664
    int i, int j, int k, const int reduce_bits_vert, int p_stride, int p_width,
665
3.46M
    const int round_bits) {
666
3.46M
  __m256i res_lo_1 = *res_lo;
667
3.46M
  __m256i res_hi_1 = *res_hi;
668
669
3.46M
  if (conv_params->is_compound) {
670
109k
    __m128i *const p_0 =
671
109k
        (__m128i *)&conv_params->dst[(i + k + 4) * conv_params->dst_stride + j];
672
109k
    __m128i *const p_1 =
673
109k
        (__m128i *)&conv_params
674
109k
            ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j];
675
676
109k
    res_lo_1 = _mm256_srai_epi32(_mm256_add_epi32(res_lo_1, *res_add_const),
677
109k
                                 reduce_bits_vert);
678
679
109k
    const __m256i temp_lo_16 = _mm256_packus_epi32(res_lo_1, res_lo_1);
680
109k
    __m256i res_lo_16;
681
109k
    if (conv_params->do_average) {
682
61.7k
      __m128i *const dst8_0 = (__m128i *)&pred[(i + k + 4) * p_stride + j];
683
61.7k
      __m128i *const dst8_1 =
684
61.7k
          (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
685
61.7k
      const __m128i p_16_0 = _mm_loadl_epi64(p_0);
686
61.7k
      const __m128i p_16_1 = _mm_loadl_epi64(p_1);
687
61.7k
      const __m256i p_16 =
688
61.7k
          _mm256_inserti128_si256(_mm256_castsi128_si256(p_16_0), p_16_1, 1);
689
61.7k
      if (conv_params->use_dist_wtd_comp_avg) {
690
30.0k
        const __m256i p_16_lo = _mm256_unpacklo_epi16(p_16, temp_lo_16);
691
30.0k
        const __m256i wt_res_lo = _mm256_madd_epi16(p_16_lo, *wt);
692
30.0k
        const __m256i shifted_32 =
693
30.0k
            _mm256_srai_epi32(wt_res_lo, DIST_PRECISION_BITS);
694
30.0k
        res_lo_16 = _mm256_packus_epi32(shifted_32, shifted_32);
695
31.6k
      } else {
696
31.6k
        res_lo_16 = _mm256_srai_epi16(_mm256_add_epi16(p_16, temp_lo_16), 1);
697
31.6k
      }
698
61.7k
      res_lo_16 = _mm256_add_epi16(res_lo_16, *res_sub_const);
699
61.7k
      res_lo_16 = _mm256_srai_epi16(
700
61.7k
          _mm256_add_epi16(res_lo_16, *round_bits_const), round_bits);
701
61.7k
      const __m256i res_8_lo = _mm256_packus_epi16(res_lo_16, res_lo_16);
702
61.7k
      const __m128i res_8_lo_0 = _mm256_castsi256_si128(res_8_lo);
703
61.7k
      const __m128i res_8_lo_1 = _mm256_extracti128_si256(res_8_lo, 1);
704
61.7k
      *(int *)dst8_0 = _mm_cvtsi128_si32(res_8_lo_0);
705
61.7k
      *(int *)dst8_1 = _mm_cvtsi128_si32(res_8_lo_1);
706
61.7k
    } else {
707
47.6k
      const __m128i temp_lo_16_0 = _mm256_castsi256_si128(temp_lo_16);
708
47.6k
      const __m128i temp_lo_16_1 = _mm256_extracti128_si256(temp_lo_16, 1);
709
47.6k
      _mm_storel_epi64(p_0, temp_lo_16_0);
710
47.6k
      _mm_storel_epi64(p_1, temp_lo_16_1);
711
47.6k
    }
712
109k
    if (p_width > 4) {
713
109k
      __m128i *const p4_0 =
714
109k
          (__m128i *)&conv_params
715
109k
              ->dst[(i + k + 4) * conv_params->dst_stride + j + 4];
716
109k
      __m128i *const p4_1 =
717
109k
          (__m128i *)&conv_params
718
109k
              ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j + 4];
719
109k
      res_hi_1 = _mm256_srai_epi32(_mm256_add_epi32(res_hi_1, *res_add_const),
720
109k
                                   reduce_bits_vert);
721
109k
      const __m256i temp_hi_16 = _mm256_packus_epi32(res_hi_1, res_hi_1);
722
109k
      __m256i res_hi_16;
723
109k
      if (conv_params->do_average) {
724
61.7k
        __m128i *const dst8_4_0 =
725
61.7k
            (__m128i *)&pred[(i + k + 4) * p_stride + j + 4];
726
61.7k
        __m128i *const dst8_4_1 =
727
61.7k
            (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j + 4];
728
61.7k
        const __m128i p4_16_0 = _mm_loadl_epi64(p4_0);
729
61.7k
        const __m128i p4_16_1 = _mm_loadl_epi64(p4_1);
730
61.7k
        const __m256i p4_16 = _mm256_inserti128_si256(
731
61.7k
            _mm256_castsi128_si256(p4_16_0), p4_16_1, 1);
732
61.7k
        if (conv_params->use_dist_wtd_comp_avg) {
733
30.0k
          const __m256i p_16_hi = _mm256_unpacklo_epi16(p4_16, temp_hi_16);
734
30.0k
          const __m256i wt_res_hi = _mm256_madd_epi16(p_16_hi, *wt);
735
30.0k
          const __m256i shifted_32 =
736
30.0k
              _mm256_srai_epi32(wt_res_hi, DIST_PRECISION_BITS);
737
30.0k
          res_hi_16 = _mm256_packus_epi32(shifted_32, shifted_32);
738
31.6k
        } else {
739
31.6k
          res_hi_16 = _mm256_srai_epi16(_mm256_add_epi16(p4_16, temp_hi_16), 1);
740
31.6k
        }
741
61.7k
        res_hi_16 = _mm256_add_epi16(res_hi_16, *res_sub_const);
742
61.7k
        res_hi_16 = _mm256_srai_epi16(
743
61.7k
            _mm256_add_epi16(res_hi_16, *round_bits_const), round_bits);
744
61.7k
        __m256i res_8_hi = _mm256_packus_epi16(res_hi_16, res_hi_16);
745
61.7k
        const __m128i res_8_hi_0 = _mm256_castsi256_si128(res_8_hi);
746
61.7k
        const __m128i res_8_hi_1 = _mm256_extracti128_si256(res_8_hi, 1);
747
61.7k
        *(int *)dst8_4_0 = _mm_cvtsi128_si32(res_8_hi_0);
748
61.7k
        *(int *)dst8_4_1 = _mm_cvtsi128_si32(res_8_hi_1);
749
61.7k
      } else {
750
47.6k
        const __m128i temp_hi_16_0 = _mm256_castsi256_si128(temp_hi_16);
751
47.6k
        const __m128i temp_hi_16_1 = _mm256_extracti128_si256(temp_hi_16, 1);
752
47.6k
        _mm_storel_epi64(p4_0, temp_hi_16_0);
753
47.6k
        _mm_storel_epi64(p4_1, temp_hi_16_1);
754
47.6k
      }
755
109k
    }
756
3.35M
  } else {
757
3.35M
    const __m256i res_lo_round = _mm256_srai_epi32(
758
3.35M
        _mm256_add_epi32(res_lo_1, *res_add_const), reduce_bits_vert);
759
3.35M
    const __m256i res_hi_round = _mm256_srai_epi32(
760
3.35M
        _mm256_add_epi32(res_hi_1, *res_add_const), reduce_bits_vert);
761
762
3.35M
    const __m256i res_16bit = _mm256_packs_epi32(res_lo_round, res_hi_round);
763
3.35M
    const __m256i res_8bit = _mm256_packus_epi16(res_16bit, res_16bit);
764
3.35M
    const __m128i res_8bit0 = _mm256_castsi256_si128(res_8bit);
765
3.35M
    const __m128i res_8bit1 = _mm256_extracti128_si256(res_8bit, 1);
766
767
    // Store, blending with 'pred' if needed
768
3.35M
    __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
769
3.35M
    __m128i *const p1 = (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
770
771
3.35M
    if (p_width == 4) {
772
0
      *(int *)p = _mm_cvtsi128_si32(res_8bit0);
773
0
      *(int *)p1 = _mm_cvtsi128_si32(res_8bit1);
774
3.35M
    } else {
775
3.35M
      _mm_storel_epi64(p, res_8bit0);
776
3.35M
      _mm_storel_epi64(p1, res_8bit1);
777
3.35M
    }
778
3.35M
  }
779
3.46M
}
780
781
static inline void warp_vertical_filter_avx2(
782
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
783
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
784
    int i, int j, int sy4, const int reduce_bits_vert,
785
    const __m256i *res_add_const, const int round_bits,
786
    const __m256i *res_sub_const, const __m256i *round_bits_const,
787
351k
    const __m256i *wt) {
788
351k
  int k, row = 0;
789
351k
  __m256i src[8];
790
351k
  const __m256i src_0 = horz_out[0];
791
351k
  const __m256i src_1 =
792
351k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
793
351k
  const __m256i src_2 = horz_out[1];
794
351k
  const __m256i src_3 =
795
351k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
796
351k
  const __m256i src_4 = horz_out[2];
797
351k
  const __m256i src_5 =
798
351k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
799
800
351k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
801
351k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
802
351k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
803
804
351k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
805
351k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
806
351k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
807
808
1.75M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
809
1.40M
    int sy = sy4 + delta * (k + 4);
810
1.40M
    __m256i coeffs[8];
811
1.40M
    prepare_vertical_filter_coeffs_avx2(gamma, delta, sy, coeffs);
812
1.40M
    __m256i res_lo, res_hi;
813
1.40M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
814
1.40M
                                    row);
815
1.40M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
816
1.40M
                                      res_sub_const, round_bits_const, pred,
817
1.40M
                                      conv_params, i, j, k, reduce_bits_vert,
818
1.40M
                                      p_stride, p_width, round_bits);
819
1.40M
    src[0] = src[2];
820
1.40M
    src[2] = src[4];
821
1.40M
    src[4] = src[6];
822
1.40M
    src[1] = src[3];
823
1.40M
    src[3] = src[5];
824
1.40M
    src[5] = src[7];
825
826
1.40M
    row += 1;
827
1.40M
  }
828
351k
}
829
830
static inline void warp_vertical_filter_gamma0_avx2(
831
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
832
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
833
    int i, int j, int sy4, const int reduce_bits_vert,
834
    const __m256i *res_add_const, const int round_bits,
835
    const __m256i *res_sub_const, const __m256i *round_bits_const,
836
181k
    const __m256i *wt) {
837
181k
  (void)gamma;
838
181k
  int k, row = 0;
839
181k
  __m256i src[8];
840
181k
  const __m256i src_0 = horz_out[0];
841
181k
  const __m256i src_1 =
842
181k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
843
181k
  const __m256i src_2 = horz_out[1];
844
181k
  const __m256i src_3 =
845
181k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
846
181k
  const __m256i src_4 = horz_out[2];
847
181k
  const __m256i src_5 =
848
181k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
849
850
181k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
851
181k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
852
181k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
853
854
181k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
855
181k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
856
181k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
857
858
907k
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
859
725k
    int sy = sy4 + delta * (k + 4);
860
725k
    __m256i coeffs[8];
861
725k
    prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy, coeffs);
862
725k
    __m256i res_lo, res_hi;
863
725k
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
864
725k
                                    row);
865
725k
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
866
725k
                                      res_sub_const, round_bits_const, pred,
867
725k
                                      conv_params, i, j, k, reduce_bits_vert,
868
725k
                                      p_stride, p_width, round_bits);
869
725k
    src[0] = src[2];
870
725k
    src[2] = src[4];
871
725k
    src[4] = src[6];
872
725k
    src[1] = src[3];
873
725k
    src[3] = src[5];
874
725k
    src[5] = src[7];
875
725k
    row += 1;
876
725k
  }
877
181k
}
878
879
static inline void warp_vertical_filter_delta0_avx2(
880
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
881
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
882
    int i, int j, int sy4, const int reduce_bits_vert,
883
    const __m256i *res_add_const, const int round_bits,
884
    const __m256i *res_sub_const, const __m256i *round_bits_const,
885
169k
    const __m256i *wt) {
886
169k
  (void)delta;
887
169k
  int k, row = 0;
888
169k
  __m256i src[8], coeffs[8];
889
169k
  const __m256i src_0 = horz_out[0];
890
169k
  const __m256i src_1 =
891
169k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
892
169k
  const __m256i src_2 = horz_out[1];
893
169k
  const __m256i src_3 =
894
169k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
895
169k
  const __m256i src_4 = horz_out[2];
896
169k
  const __m256i src_5 =
897
169k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
898
899
169k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
900
169k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
901
169k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
902
903
169k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
904
169k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
905
169k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
906
907
169k
  prepare_vertical_filter_coeffs_delta0_avx2(gamma, sy4, coeffs);
908
909
843k
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
910
673k
    __m256i res_lo, res_hi;
911
673k
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
912
673k
                                    row);
913
673k
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
914
673k
                                      res_sub_const, round_bits_const, pred,
915
673k
                                      conv_params, i, j, k, reduce_bits_vert,
916
673k
                                      p_stride, p_width, round_bits);
917
673k
    src[0] = src[2];
918
673k
    src[2] = src[4];
919
673k
    src[4] = src[6];
920
673k
    src[1] = src[3];
921
673k
    src[3] = src[5];
922
673k
    src[5] = src[7];
923
673k
    row += 1;
924
673k
  }
925
169k
}
926
927
static inline void warp_vertical_filter_gamma0_delta0_avx2(
928
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
929
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
930
    int i, int j, int sy4, const int reduce_bits_vert,
931
    const __m256i *res_add_const, const int round_bits,
932
    const __m256i *res_sub_const, const __m256i *round_bits_const,
933
166k
    const __m256i *wt) {
934
166k
  (void)gamma;
935
166k
  int k, row = 0;
936
166k
  __m256i src[8], coeffs[8];
937
166k
  const __m256i src_0 = horz_out[0];
938
166k
  const __m256i src_1 =
939
166k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
940
166k
  const __m256i src_2 = horz_out[1];
941
166k
  const __m256i src_3 =
942
166k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
943
166k
  const __m256i src_4 = horz_out[2];
944
166k
  const __m256i src_5 =
945
166k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
946
947
166k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
948
166k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
949
166k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
950
951
166k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
952
166k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
953
166k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
954
955
166k
  prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy4, coeffs);
956
957
833k
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
958
666k
    __m256i res_lo, res_hi;
959
666k
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
960
666k
                                    row);
961
666k
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
962
666k
                                      res_sub_const, round_bits_const, pred,
963
666k
                                      conv_params, i, j, k, reduce_bits_vert,
964
666k
                                      p_stride, p_width, round_bits);
965
666k
    src[0] = src[2];
966
666k
    src[2] = src[4];
967
666k
    src[4] = src[6];
968
666k
    src[1] = src[3];
969
666k
    src[3] = src[5];
970
666k
    src[5] = src[7];
971
666k
    row += 1;
972
666k
  }
973
166k
}
974
975
static inline void prepare_warp_vertical_filter_avx2(
976
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
977
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
978
    int i, int j, int sy4, const int reduce_bits_vert,
979
    const __m256i *res_add_const, const int round_bits,
980
    const __m256i *res_sub_const, const __m256i *round_bits_const,
981
869k
    const __m256i *wt) {
982
869k
  if (gamma == 0 && delta == 0)
983
166k
    warp_vertical_filter_gamma0_delta0_avx2(
984
166k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
985
166k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
986
166k
        round_bits_const, wt);
987
702k
  else if (gamma == 0 && delta != 0)
988
181k
    warp_vertical_filter_gamma0_avx2(
989
181k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
990
181k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
991
181k
        round_bits_const, wt);
992
520k
  else if (gamma != 0 && delta == 0)
993
169k
    warp_vertical_filter_delta0_avx2(
994
169k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
995
169k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
996
169k
        round_bits_const, wt);
997
351k
  else
998
351k
    warp_vertical_filter_avx2(pred, horz_out, conv_params, gamma, delta,
999
351k
                              p_height, p_stride, p_width, i, j, sy4,
1000
351k
                              reduce_bits_vert, res_add_const, round_bits,
1001
351k
                              res_sub_const, round_bits_const, wt);
1002
869k
}
1003
1004
static inline void prepare_warp_horizontal_filter_avx2(
1005
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
1006
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
1007
    const __m256i *round_const, const __m128i *shift,
1008
804k
    const __m256i *shuffle_src) {
1009
804k
  if (alpha == 0 && beta == 0)
1010
279k
    warp_horizontal_filter_alpha0_beta0_avx2(
1011
279k
        ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, i,
1012
279k
        round_const, shift, shuffle_src);
1013
525k
  else if (alpha == 0 && beta != 0)
1014
116k
    warp_horizontal_filter_alpha0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1015
116k
                                       alpha, beta, p_height, height, i,
1016
116k
                                       round_const, shift, shuffle_src);
1017
408k
  else if (alpha != 0 && beta == 0)
1018
212k
    warp_horizontal_filter_beta0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1019
212k
                                      alpha, beta, p_height, height, i,
1020
212k
                                      round_const, shift, shuffle_src);
1021
196k
  else
1022
196k
    warp_horizontal_filter_avx2(ref, horz_out, stride, ix4, iy4, sx4, alpha,
1023
196k
                                beta, p_height, height, i, round_const, shift,
1024
196k
                                shuffle_src);
1025
804k
}
1026
1027
void av1_warp_affine_avx2(const int32_t *mat, const uint8_t *ref, int width,
1028
                          int height, int stride, uint8_t *pred, int p_col,
1029
                          int p_row, int p_width, int p_height, int p_stride,
1030
                          int subsampling_x, int subsampling_y,
1031
                          ConvolveParams *conv_params, int16_t alpha,
1032
99.6k
                          int16_t beta, int16_t gamma, int16_t delta) {
1033
99.6k
  __m256i horz_out[8];
1034
99.6k
  int i, j, k;
1035
99.6k
  const int bd = 8;
1036
99.6k
  const int reduce_bits_horiz = conv_params->round_0;
1037
99.6k
  const int reduce_bits_vert = conv_params->is_compound
1038
99.6k
                                   ? conv_params->round_1
1039
99.6k
                                   : 2 * FILTER_BITS - reduce_bits_horiz;
1040
99.6k
  const int offset_bits_horiz = bd + FILTER_BITS - 1;
1041
99.6k
  assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL));
1042
1043
99.6k
  const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz;
1044
99.6k
  const __m256i reduce_bits_vert_const =
1045
99.6k
      _mm256_set1_epi32(((1 << reduce_bits_vert) >> 1));
1046
99.6k
  const __m256i res_add_const = _mm256_set1_epi32(1 << offset_bits_vert);
1047
99.6k
  const int round_bits =
1048
99.6k
      2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
1049
99.6k
  const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
1050
99.6k
  assert(IMPLIES(conv_params->do_average, conv_params->is_compound));
1051
1052
99.6k
  const __m256i round_const = _mm256_set1_epi16(
1053
99.6k
      (1 << offset_bits_horiz) + ((1 << reduce_bits_horiz) >> 1));
1054
99.6k
  const __m128i shift = _mm_cvtsi32_si128(reduce_bits_horiz);
1055
1056
99.6k
  __m256i res_sub_const, round_bits_const, wt;
1057
99.6k
  unpack_weights_and_set_round_const_avx2(conv_params, round_bits, offset_bits,
1058
99.6k
                                          &res_sub_const, &round_bits_const,
1059
99.6k
                                          &wt);
1060
1061
99.6k
  __m256i res_add_const_1;
1062
99.6k
  if (conv_params->is_compound == 1) {
1063
5.16k
    res_add_const_1 = _mm256_add_epi32(reduce_bits_vert_const, res_add_const);
1064
94.4k
  } else {
1065
94.4k
    res_add_const_1 = _mm256_set1_epi32(-(1 << (bd + reduce_bits_vert - 1)) +
1066
94.4k
                                        ((1 << reduce_bits_vert) >> 1));
1067
94.4k
  }
1068
99.6k
  const int32_t const1 = alpha * (-4) + beta * (-4) +
1069
99.6k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1070
99.6k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1071
99.6k
  const int32_t const2 = gamma * (-4) + delta * (-4) +
1072
99.6k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1073
99.6k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1074
99.6k
  const int32_t const3 = ((1 << WARP_PARAM_REDUCE_BITS) - 1);
1075
99.6k
  const int16_t const4 = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1));
1076
99.6k
  const int16_t const5 = (1 << (FILTER_BITS - reduce_bits_horiz));
1077
1078
99.6k
  __m256i shuffle_src[4];
1079
99.6k
  shuffle_src[0] = _mm256_load_si256((__m256i *)shuffle_src0);
1080
99.6k
  shuffle_src[1] = _mm256_load_si256((__m256i *)shuffle_src1);
1081
99.6k
  shuffle_src[2] = _mm256_load_si256((__m256i *)shuffle_src2);
1082
99.6k
  shuffle_src[3] = _mm256_load_si256((__m256i *)shuffle_src3);
1083
1084
320k
  for (i = 0; i < p_height; i += 8) {
1085
1.09M
    for (j = 0; j < p_width; j += 8) {
1086
871k
      const int32_t src_x = (p_col + j + 4) << subsampling_x;
1087
871k
      const int32_t src_y = (p_row + i + 4) << subsampling_y;
1088
871k
      const int64_t dst_x =
1089
871k
          (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
1090
871k
      const int64_t dst_y =
1091
871k
          (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
1092
871k
      const int64_t x4 = dst_x >> subsampling_x;
1093
871k
      const int64_t y4 = dst_y >> subsampling_y;
1094
1095
871k
      int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
1096
871k
      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1097
871k
      int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
1098
871k
      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1099
1100
      // Add in all the constant terms, including rounding and offset
1101
871k
      sx4 += const1;
1102
871k
      sy4 += const2;
1103
1104
871k
      sx4 &= ~const3;
1105
871k
      sy4 &= ~const3;
1106
1107
      // Horizontal filter
1108
      // If the block is aligned such that, after clamping, every sample
1109
      // would be taken from the leftmost/rightmost column, then we can
1110
      // skip the expensive horizontal filter.
1111
1112
871k
      if (ix4 <= -7) {
1113
14.2k
        int iy, row = 0;
1114
113k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1115
99.5k
          iy = iy4 + k;
1116
99.5k
          iy = clamp(iy, 0, height - 1);
1117
99.5k
          const __m256i temp_0 =
1118
99.5k
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1119
99.5k
          iy = iy4 + k + 1;
1120
99.5k
          iy = clamp(iy, 0, height - 1);
1121
99.5k
          const __m256i temp_1 =
1122
99.5k
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1123
99.5k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1124
99.5k
          row += 1;
1125
99.5k
        }
1126
14.2k
        iy = iy4 + k;
1127
14.2k
        iy = clamp(iy, 0, height - 1);
1128
14.2k
        horz_out[row] = _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1129
857k
      } else if (ix4 >= width + 6) {
1130
34.3k
        int iy, row = 0;
1131
274k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1132
240k
          iy = iy4 + k;
1133
240k
          iy = clamp(iy, 0, height - 1);
1134
240k
          const __m256i temp_0 = _mm256_set1_epi16(
1135
240k
              const4 + ref[iy * stride + (width - 1)] * const5);
1136
240k
          iy = iy4 + k + 1;
1137
240k
          iy = clamp(iy, 0, height - 1);
1138
240k
          const __m256i temp_1 = _mm256_set1_epi16(
1139
240k
              const4 + ref[iy * stride + (width - 1)] * const5);
1140
240k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1141
240k
          row += 1;
1142
240k
        }
1143
34.3k
        iy = iy4 + k;
1144
34.3k
        iy = clamp(iy, 0, height - 1);
1145
34.3k
        horz_out[row] =
1146
34.3k
            _mm256_set1_epi16(const4 + ref[iy * stride + (width - 1)] * const5);
1147
823k
      } else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) {
1148
28.1k
        const int out_of_boundary_left = -(ix4 - 6);
1149
28.1k
        const int out_of_boundary_right = (ix4 + 8) - width;
1150
28.1k
        int iy, sx, row = 0;
1151
223k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1152
194k
          iy = iy4 + k;
1153
194k
          iy = clamp(iy, 0, height - 1);
1154
194k
          __m128i src0 =
1155
194k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1156
194k
          iy = iy4 + k + 1;
1157
194k
          iy = clamp(iy, 0, height - 1);
1158
194k
          __m128i src1 =
1159
194k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1160
1161
194k
          if (out_of_boundary_left >= 0) {
1162
99.1k
            const __m128i shuffle_reg_left =
1163
99.1k
                _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1164
99.1k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_left);
1165
99.1k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_left);
1166
99.1k
          }
1167
194k
          if (out_of_boundary_right >= 0) {
1168
100k
            const __m128i shuffle_reg_right = _mm_loadu_si128(
1169
100k
                (__m128i *)warp_pad_right[out_of_boundary_right]);
1170
100k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_right);
1171
100k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_right);
1172
100k
          }
1173
194k
          sx = sx4 + beta * (k + 4);
1174
194k
          const __m256i src_01 =
1175
194k
              _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
1176
194k
          horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row,
1177
194k
                                 shuffle_src, &round_const, &shift);
1178
194k
          row += 1;
1179
194k
        }
1180
28.1k
        iy = iy4 + k;
1181
28.1k
        iy = clamp(iy, 0, height - 1);
1182
28.1k
        __m128i src = _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1183
28.1k
        if (out_of_boundary_left >= 0) {
1184
14.1k
          const __m128i shuffle_reg_left =
1185
14.1k
              _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1186
14.1k
          src = _mm_shuffle_epi8(src, shuffle_reg_left);
1187
14.1k
        }
1188
28.1k
        if (out_of_boundary_right >= 0) {
1189
14.3k
          const __m128i shuffle_reg_right =
1190
14.3k
              _mm_loadu_si128((__m128i *)warp_pad_right[out_of_boundary_right]);
1191
14.3k
          src = _mm_shuffle_epi8(src, shuffle_reg_right);
1192
14.3k
        }
1193
28.1k
        sx = sx4 + beta * (k + 4);
1194
28.1k
        const __m256i src_01 = _mm256_castsi128_si256(src);
1195
28.1k
        __m256i coeff[4];
1196
28.1k
        prepare_horizontal_filter_coeff(alpha, sx, coeff);
1197
28.1k
        filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src,
1198
28.1k
                               &round_const, &shift, row);
1199
795k
      } else {
1200
795k
        prepare_warp_horizontal_filter_avx2(
1201
795k
            ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height,
1202
795k
            i, &round_const, &shift, shuffle_src);
1203
795k
      }
1204
1205
      // Vertical filter
1206
871k
      prepare_warp_vertical_filter_avx2(
1207
871k
          pred, horz_out, conv_params, gamma, delta, p_height, p_stride,
1208
871k
          p_width, i, j, sy4, reduce_bits_vert, &res_add_const_1, round_bits,
1209
871k
          &res_sub_const, &round_bits_const, &wt);
1210
871k
    }
1211
220k
  }
1212
99.6k
}
1213
1214
#endif  // !CONFIG_HIGHWAY