Coverage Report

Created: 2025-11-16 07:09

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/av1/common/x86/warp_plane_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2019, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
#include "config/av1_rtcd.h"
14
#include "av1/common/warped_motion.h"
15
#include "aom_dsp/x86/synonyms.h"
16
17
#if !CONFIG_HIGHWAY
18
19
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask01_avx2[32]) = {
20
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
21
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
22
};
23
24
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask23_avx2[32]) = {
25
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
26
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3
27
};
28
29
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask45_avx2[32]) = {
30
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
31
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5
32
};
33
34
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask67_avx2[32]) = {
35
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
36
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7
37
};
38
39
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask0_avx2[32]) = {
40
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
41
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
42
};
43
44
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask1_avx2[32]) = {
45
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7,
46
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7
47
};
48
49
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask2_avx2[32]) = {
50
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11,
51
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11
52
};
53
54
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask3_avx2[32]) = {
55
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15,
56
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15
57
};
58
59
DECLARE_ALIGNED(32, static const uint8_t,
60
                shuffle_src0[32]) = { 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3,
61
                                      5, 5, 7, 7, 9, 0, 2, 2, 4, 4, 6,
62
                                      6, 8, 1, 3, 3, 5, 5, 7, 7, 9 };
63
64
DECLARE_ALIGNED(32, static const uint8_t,
65
                shuffle_src1[32]) = { 4,  6,  6,  8,  8,  10, 10, 12, 5,  7, 7,
66
                                      9,  9,  11, 11, 13, 4,  6,  6,  8,  8, 10,
67
                                      10, 12, 5,  7,  7,  9,  9,  11, 11, 13 };
68
69
DECLARE_ALIGNED(32, static const uint8_t,
70
                shuffle_src2[32]) = { 1, 3, 3, 5, 5,  7, 7, 9, 2, 4, 4,
71
                                      6, 6, 8, 8, 10, 1, 3, 3, 5, 5, 7,
72
                                      7, 9, 2, 4, 4,  6, 6, 8, 8, 10 };
73
74
DECLARE_ALIGNED(32, static const uint8_t,
75
                shuffle_src3[32]) = { 5,  7,  7,  9,  9,  11, 11, 13, 6,  8, 8,
76
                                      10, 10, 12, 12, 14, 5,  7,  7,  9,  9, 11,
77
                                      11, 13, 6,  8,  8,  10, 10, 12, 12, 14 };
78
79
static inline void filter_src_pixels_avx2(const __m256i src, __m256i *horz_out,
80
                                          __m256i *coeff,
81
                                          const __m256i *shuffle_src,
82
                                          const __m256i *round_const,
83
22.8M
                                          const __m128i *shift, int row) {
84
22.8M
  const __m256i src_0 = _mm256_shuffle_epi8(src, shuffle_src[0]);
85
22.8M
  const __m256i src_1 = _mm256_shuffle_epi8(src, shuffle_src[1]);
86
22.8M
  const __m256i src_2 = _mm256_shuffle_epi8(src, shuffle_src[2]);
87
22.8M
  const __m256i src_3 = _mm256_shuffle_epi8(src, shuffle_src[3]);
88
89
22.8M
  const __m256i res_02 = _mm256_maddubs_epi16(src_0, coeff[0]);
90
22.8M
  const __m256i res_46 = _mm256_maddubs_epi16(src_1, coeff[1]);
91
22.8M
  const __m256i res_13 = _mm256_maddubs_epi16(src_2, coeff[2]);
92
22.8M
  const __m256i res_57 = _mm256_maddubs_epi16(src_3, coeff[3]);
93
94
22.8M
  const __m256i res_even = _mm256_add_epi16(res_02, res_46);
95
22.8M
  const __m256i res_odd = _mm256_add_epi16(res_13, res_57);
96
22.8M
  const __m256i res =
97
22.8M
      _mm256_add_epi16(_mm256_add_epi16(res_even, res_odd), *round_const);
98
22.8M
  horz_out[row] = _mm256_srl_epi16(res, *shift);
99
22.8M
}
100
101
static inline void prepare_horizontal_filter_coeff_avx2(int alpha, int beta,
102
                                                        int sx,
103
4.58M
                                                        __m256i *coeff) {
104
4.58M
  __m128i tmp_0 = _mm_loadl_epi64(
105
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 0 * alpha)) >>
106
4.58M
                                  WARPEDDIFF_PREC_BITS]);
107
4.58M
  __m128i tmp_1 = _mm_loadl_epi64(
108
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 1 * alpha)) >>
109
4.58M
                                  WARPEDDIFF_PREC_BITS]);
110
4.58M
  __m128i tmp_2 = _mm_loadl_epi64(
111
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 2 * alpha)) >>
112
4.58M
                                  WARPEDDIFF_PREC_BITS]);
113
4.58M
  __m128i tmp_3 = _mm_loadl_epi64(
114
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 3 * alpha)) >>
115
4.58M
                                  WARPEDDIFF_PREC_BITS]);
116
117
4.58M
  __m128i tmp_4 = _mm_loadl_epi64(
118
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 4 * alpha)) >>
119
4.58M
                                  WARPEDDIFF_PREC_BITS]);
120
4.58M
  __m128i tmp_5 = _mm_loadl_epi64(
121
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 5 * alpha)) >>
122
4.58M
                                  WARPEDDIFF_PREC_BITS]);
123
4.58M
  __m128i tmp_6 = _mm_loadl_epi64(
124
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 6 * alpha)) >>
125
4.58M
                                  WARPEDDIFF_PREC_BITS]);
126
4.58M
  __m128i tmp_7 = _mm_loadl_epi64(
127
4.58M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 7 * alpha)) >>
128
4.58M
                                  WARPEDDIFF_PREC_BITS]);
129
130
4.58M
  __m256i tmp0_256 = _mm256_castsi128_si256(tmp_0);
131
4.58M
  __m256i tmp2_256 = _mm256_castsi128_si256(tmp_2);
132
4.58M
  __m256i tmp1_256 = _mm256_castsi128_si256(tmp_1);
133
4.58M
  __m256i tmp3_256 = _mm256_castsi128_si256(tmp_3);
134
135
4.58M
  __m256i tmp4_256 = _mm256_castsi128_si256(tmp_4);
136
4.58M
  __m256i tmp6_256 = _mm256_castsi128_si256(tmp_6);
137
4.58M
  __m256i tmp5_256 = _mm256_castsi128_si256(tmp_5);
138
4.58M
  __m256i tmp7_256 = _mm256_castsi128_si256(tmp_7);
139
140
4.58M
  __m128i tmp_8 = _mm_loadl_epi64(
141
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 0 * alpha) >>
142
4.58M
                                  WARPEDDIFF_PREC_BITS]);
143
4.58M
  tmp0_256 = _mm256_inserti128_si256(tmp0_256, tmp_8, 1);
144
145
4.58M
  __m128i tmp_9 = _mm_loadl_epi64(
146
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 1 * alpha) >>
147
4.58M
                                  WARPEDDIFF_PREC_BITS]);
148
4.58M
  tmp1_256 = _mm256_inserti128_si256(tmp1_256, tmp_9, 1);
149
150
4.58M
  __m128i tmp_10 = _mm_loadl_epi64(
151
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 2 * alpha) >>
152
4.58M
                                  WARPEDDIFF_PREC_BITS]);
153
4.58M
  tmp2_256 = _mm256_inserti128_si256(tmp2_256, tmp_10, 1);
154
155
4.58M
  __m128i tmp_11 = _mm_loadl_epi64(
156
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 3 * alpha) >>
157
4.58M
                                  WARPEDDIFF_PREC_BITS]);
158
4.58M
  tmp3_256 = _mm256_inserti128_si256(tmp3_256, tmp_11, 1);
159
160
4.58M
  tmp_2 = _mm_loadl_epi64(
161
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 4 * alpha) >>
162
4.58M
                                  WARPEDDIFF_PREC_BITS]);
163
4.58M
  tmp4_256 = _mm256_inserti128_si256(tmp4_256, tmp_2, 1);
164
165
4.58M
  tmp_3 = _mm_loadl_epi64(
166
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 5 * alpha) >>
167
4.58M
                                  WARPEDDIFF_PREC_BITS]);
168
4.58M
  tmp5_256 = _mm256_inserti128_si256(tmp5_256, tmp_3, 1);
169
170
4.58M
  tmp_6 = _mm_loadl_epi64(
171
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 6 * alpha) >>
172
4.58M
                                  WARPEDDIFF_PREC_BITS]);
173
4.58M
  tmp6_256 = _mm256_inserti128_si256(tmp6_256, tmp_6, 1);
174
175
4.58M
  tmp_7 = _mm_loadl_epi64(
176
4.58M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 7 * alpha) >>
177
4.58M
                                  WARPEDDIFF_PREC_BITS]);
178
4.58M
  tmp7_256 = _mm256_inserti128_si256(tmp7_256, tmp_7, 1);
179
180
4.58M
  const __m256i tmp_12 = _mm256_unpacklo_epi16(tmp0_256, tmp2_256);
181
4.58M
  const __m256i tmp_13 = _mm256_unpacklo_epi16(tmp1_256, tmp3_256);
182
4.58M
  const __m256i tmp_14 = _mm256_unpacklo_epi16(tmp4_256, tmp6_256);
183
4.58M
  const __m256i tmp_15 = _mm256_unpacklo_epi16(tmp5_256, tmp7_256);
184
185
4.58M
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
186
4.58M
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
187
4.58M
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
188
4.58M
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
189
190
4.58M
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
191
4.58M
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
192
4.58M
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
193
4.58M
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
194
4.58M
}
195
196
static inline void prepare_horizontal_filter_coeff_beta0_avx2(int alpha, int sx,
197
533k
                                                              __m256i *coeff) {
198
533k
  __m128i tmp_0 = _mm_loadl_epi64(
199
533k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
200
533k
  __m128i tmp_1 = _mm_loadl_epi64(
201
533k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
202
533k
  __m128i tmp_2 = _mm_loadl_epi64(
203
533k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
204
533k
  __m128i tmp_3 = _mm_loadl_epi64(
205
533k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
206
533k
  __m128i tmp_4 = _mm_loadl_epi64(
207
533k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
208
533k
  __m128i tmp_5 = _mm_loadl_epi64(
209
533k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
210
533k
  __m128i tmp_6 = _mm_loadl_epi64(
211
533k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
212
533k
  __m128i tmp_7 = _mm_loadl_epi64(
213
533k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
214
215
533k
  tmp_0 = _mm_unpacklo_epi16(tmp_0, tmp_2);
216
533k
  tmp_1 = _mm_unpacklo_epi16(tmp_1, tmp_3);
217
533k
  tmp_4 = _mm_unpacklo_epi16(tmp_4, tmp_6);
218
533k
  tmp_5 = _mm_unpacklo_epi16(tmp_5, tmp_7);
219
220
533k
  const __m256i tmp_12 = _mm256_broadcastsi128_si256(tmp_0);
221
533k
  const __m256i tmp_13 = _mm256_broadcastsi128_si256(tmp_1);
222
533k
  const __m256i tmp_14 = _mm256_broadcastsi128_si256(tmp_4);
223
533k
  const __m256i tmp_15 = _mm256_broadcastsi128_si256(tmp_5);
224
225
533k
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
226
533k
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
227
533k
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
228
533k
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
229
230
533k
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
231
533k
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
232
533k
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
233
533k
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
234
533k
}
235
236
static inline void prepare_horizontal_filter_coeff_alpha0_avx2(int beta, int sx,
237
6.87M
                                                               __m256i *coeff) {
238
6.87M
  const __m128i tmp_0 =
239
6.87M
      _mm_loadl_epi64((__m128i *)&av1_filter_8bit[sx >> WARPEDDIFF_PREC_BITS]);
240
6.87M
  const __m128i tmp_1 = _mm_loadl_epi64(
241
6.87M
      (__m128i *)&av1_filter_8bit[(sx + beta) >> WARPEDDIFF_PREC_BITS]);
242
243
6.87M
  const __m256i res_0 =
244
6.87M
      _mm256_inserti128_si256(_mm256_castsi128_si256(tmp_0), tmp_1, 0x1);
245
246
6.87M
  coeff[0] = _mm256_shuffle_epi8(
247
6.87M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask01_avx2));
248
6.87M
  coeff[1] = _mm256_shuffle_epi8(
249
6.87M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask23_avx2));
250
6.87M
  coeff[2] = _mm256_shuffle_epi8(
251
6.87M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask45_avx2));
252
6.87M
  coeff[3] = _mm256_shuffle_epi8(
253
6.87M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask67_avx2));
254
6.87M
}
255
256
static inline void horizontal_filter_avx2(const __m256i src, __m256i *horz_out,
257
                                          int sx, int alpha, int beta, int row,
258
                                          const __m256i *shuffle_src,
259
                                          const __m256i *round_const,
260
4.58M
                                          const __m128i *shift) {
261
4.58M
  __m256i coeff[4];
262
4.58M
  prepare_horizontal_filter_coeff_avx2(alpha, beta, sx, coeff);
263
4.58M
  filter_src_pixels_avx2(src, horz_out, coeff, shuffle_src, round_const, shift,
264
4.58M
                         row);
265
4.58M
}
266
static inline void prepare_horizontal_filter_coeff(int alpha, int sx,
267
658k
                                                   __m256i *coeff) {
268
658k
  const __m128i tmp_0 = _mm_loadl_epi64(
269
658k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
270
658k
  const __m128i tmp_1 = _mm_loadl_epi64(
271
658k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
272
658k
  const __m128i tmp_2 = _mm_loadl_epi64(
273
658k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
274
658k
  const __m128i tmp_3 = _mm_loadl_epi64(
275
658k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
276
658k
  const __m128i tmp_4 = _mm_loadl_epi64(
277
658k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
278
658k
  const __m128i tmp_5 = _mm_loadl_epi64(
279
658k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
280
658k
  const __m128i tmp_6 = _mm_loadl_epi64(
281
658k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
282
658k
  const __m128i tmp_7 = _mm_loadl_epi64(
283
658k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
284
285
658k
  const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2);
286
658k
  const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3);
287
658k
  const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6);
288
658k
  const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7);
289
290
658k
  const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10);
291
658k
  const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10);
292
658k
  const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11);
293
658k
  const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11);
294
295
658k
  coeff[0] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_12, tmp_14));
296
658k
  coeff[1] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_12, tmp_14));
297
658k
  coeff[2] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_13, tmp_15));
298
658k
  coeff[3] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_13, tmp_15));
299
658k
}
300
301
static inline void warp_horizontal_filter_avx2(
302
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
303
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
304
    const __m256i *round_const, const __m128i *shift,
305
530k
    const __m256i *shuffle_src) {
306
530k
  int k, iy, sx, row = 0;
307
530k
  __m256i coeff[4];
308
4.23M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
309
3.70M
    iy = iy4 + k;
310
3.70M
    iy = clamp(iy, 0, height - 1);
311
3.70M
    const __m128i src_0 =
312
3.70M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
313
3.70M
    iy = iy4 + k + 1;
314
3.70M
    iy = clamp(iy, 0, height - 1);
315
3.70M
    const __m128i src_1 =
316
3.70M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
317
3.70M
    const __m256i src_01 =
318
3.70M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
319
3.70M
    sx = sx4 + beta * (k + 4);
320
3.70M
    horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, shuffle_src,
321
3.70M
                           round_const, shift);
322
3.70M
    row += 1;
323
3.70M
  }
324
530k
  iy = iy4 + k;
325
530k
  iy = clamp(iy, 0, height - 1);
326
530k
  const __m256i src_01 = _mm256_castsi128_si256(
327
530k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
328
530k
  sx = sx4 + beta * (k + 4);
329
530k
  prepare_horizontal_filter_coeff(alpha, sx, coeff);
330
530k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
331
530k
                         shift, row);
332
530k
}
333
334
static inline void warp_horizontal_filter_alpha0_avx2(
335
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
336
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
337
    const __m256i *round_const, const __m128i *shift,
338
745k
    const __m256i *shuffle_src) {
339
745k
  (void)alpha;
340
745k
  int k, iy, sx, row = 0;
341
745k
  __m256i coeff[4];
342
5.89M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
343
5.14M
    iy = iy4 + k;
344
5.14M
    iy = clamp(iy, 0, height - 1);
345
5.14M
    const __m128i src_0 =
346
5.14M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
347
5.14M
    iy = iy4 + k + 1;
348
5.14M
    iy = clamp(iy, 0, height - 1);
349
5.14M
    const __m128i src_1 =
350
5.14M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
351
5.14M
    const __m256i src_01 =
352
5.14M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
353
5.14M
    sx = sx4 + beta * (k + 4);
354
5.14M
    prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
355
5.14M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
356
5.14M
                           shift, row);
357
5.14M
    row += 1;
358
5.14M
  }
359
745k
  iy = iy4 + k;
360
745k
  iy = clamp(iy, 0, height - 1);
361
745k
  const __m256i src_01 = _mm256_castsi128_si256(
362
745k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
363
745k
  sx = sx4 + beta * (k + 4);
364
745k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
365
745k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
366
745k
                         shift, row);
367
745k
}
368
369
static inline void warp_horizontal_filter_beta0_avx2(
370
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
371
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
372
    const __m256i *round_const, const __m128i *shift,
373
533k
    const __m256i *shuffle_src) {
374
533k
  (void)beta;
375
533k
  int k, iy, row = 0;
376
533k
  __m256i coeff[4];
377
533k
  prepare_horizontal_filter_coeff_beta0_avx2(alpha, sx4, coeff);
378
4.25M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
379
3.72M
    iy = iy4 + k;
380
3.72M
    iy = clamp(iy, 0, height - 1);
381
3.72M
    const __m128i src_0 =
382
3.72M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
383
3.72M
    iy = iy4 + k + 1;
384
3.72M
    iy = clamp(iy, 0, height - 1);
385
3.72M
    const __m128i src_1 =
386
3.72M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
387
3.72M
    const __m256i src_01 =
388
3.72M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
389
3.72M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
390
3.72M
                           shift, row);
391
3.72M
    row += 1;
392
3.72M
  }
393
533k
  iy = iy4 + k;
394
533k
  iy = clamp(iy, 0, height - 1);
395
533k
  const __m256i src_01 = _mm256_castsi128_si256(
396
533k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
397
533k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
398
533k
                         shift, row);
399
533k
}
400
401
static inline void warp_horizontal_filter_alpha0_beta0_avx2(
402
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
403
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
404
    const __m256i *round_const, const __m128i *shift,
405
1.00M
    const __m256i *shuffle_src) {
406
1.00M
  (void)alpha;
407
1.00M
  int k, iy, row = 0;
408
1.00M
  __m256i coeff[4];
409
1.00M
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx4, coeff);
410
7.95M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
411
6.94M
    iy = iy4 + k;
412
6.94M
    iy = clamp(iy, 0, height - 1);
413
6.94M
    const __m128i src0 =
414
6.94M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
415
6.94M
    iy = iy4 + k + 1;
416
6.94M
    iy = clamp(iy, 0, height - 1);
417
6.94M
    const __m128i src1 =
418
6.94M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
419
6.94M
    const __m256i src_01 =
420
6.94M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
421
6.94M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
422
6.94M
                           shift, row);
423
6.94M
    row += 1;
424
6.94M
  }
425
1.00M
  iy = iy4 + k;
426
1.00M
  iy = clamp(iy, 0, height - 1);
427
1.00M
  const __m256i src_01 = _mm256_castsi128_si256(
428
1.00M
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
429
1.00M
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
430
1.00M
                         shift, row);
431
1.00M
}
432
433
static inline void unpack_weights_and_set_round_const_avx2(
434
    ConvolveParams *conv_params, const int round_bits, const int offset_bits,
435
280k
    __m256i *res_sub_const, __m256i *round_bits_const, __m256i *wt) {
436
280k
  *res_sub_const =
437
280k
      _mm256_set1_epi16(-(1 << (offset_bits - conv_params->round_1)) -
438
280k
                        (1 << (offset_bits - conv_params->round_1 - 1)));
439
280k
  *round_bits_const = _mm256_set1_epi16(((1 << round_bits) >> 1));
440
441
280k
  const int w0 = conv_params->fwd_offset;
442
280k
  const int w1 = conv_params->bck_offset;
443
280k
  const __m256i wt0 = _mm256_set1_epi16((short)w0);
444
280k
  const __m256i wt1 = _mm256_set1_epi16((short)w1);
445
280k
  *wt = _mm256_unpacklo_epi16(wt0, wt1);
446
280k
}
447
448
static inline void prepare_vertical_filter_coeffs_avx2(int gamma, int delta,
449
                                                       int sy,
450
6.03M
                                                       __m256i *coeffs) {
451
6.03M
  __m128i filt_00 =
452
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
453
6.03M
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
454
6.03M
  __m128i filt_01 =
455
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
456
6.03M
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
457
6.03M
  __m128i filt_02 =
458
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
459
6.03M
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
460
6.03M
  __m128i filt_03 =
461
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
462
6.03M
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
463
464
6.03M
  __m128i filt_10 = _mm_loadu_si128(
465
6.03M
      (__m128i *)(av1_warped_filter +
466
6.03M
                  (((sy + delta) + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
467
6.03M
  __m128i filt_11 = _mm_loadu_si128(
468
6.03M
      (__m128i *)(av1_warped_filter +
469
6.03M
                  (((sy + delta) + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
470
6.03M
  __m128i filt_12 = _mm_loadu_si128(
471
6.03M
      (__m128i *)(av1_warped_filter +
472
6.03M
                  (((sy + delta) + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
473
6.03M
  __m128i filt_13 = _mm_loadu_si128(
474
6.03M
      (__m128i *)(av1_warped_filter +
475
6.03M
                  (((sy + delta) + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
476
477
6.03M
  __m256i filt_0 =
478
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
479
6.03M
  __m256i filt_1 =
480
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
481
6.03M
  __m256i filt_2 =
482
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
483
6.03M
  __m256i filt_3 =
484
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
485
486
6.03M
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
487
6.03M
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
488
6.03M
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
489
6.03M
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
490
491
6.03M
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
492
6.03M
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
493
6.03M
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
494
6.03M
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
495
496
6.03M
  filt_00 =
497
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
498
6.03M
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
499
6.03M
  filt_01 =
500
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
501
6.03M
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
502
6.03M
  filt_02 =
503
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
504
6.03M
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
505
6.03M
  filt_03 =
506
6.03M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
507
6.03M
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
508
509
6.03M
  filt_10 = _mm_loadu_si128(
510
6.03M
      (__m128i *)(av1_warped_filter +
511
6.03M
                  (((sy + delta) + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
512
6.03M
  filt_11 = _mm_loadu_si128(
513
6.03M
      (__m128i *)(av1_warped_filter +
514
6.03M
                  (((sy + delta) + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
515
6.03M
  filt_12 = _mm_loadu_si128(
516
6.03M
      (__m128i *)(av1_warped_filter +
517
6.03M
                  (((sy + delta) + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
518
6.03M
  filt_13 = _mm_loadu_si128(
519
6.03M
      (__m128i *)(av1_warped_filter +
520
6.03M
                  (((sy + delta) + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
521
522
6.03M
  filt_0 =
523
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
524
6.03M
  filt_1 =
525
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
526
6.03M
  filt_2 =
527
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
528
6.03M
  filt_3 =
529
6.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
530
531
6.03M
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
532
6.03M
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
533
6.03M
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
534
6.03M
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
535
536
6.03M
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
537
6.03M
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
538
6.03M
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
539
6.03M
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
540
6.03M
}
541
542
static inline void prepare_vertical_filter_coeffs_delta0_avx2(int gamma, int sy,
543
677k
                                                              __m256i *coeffs) {
544
677k
  __m128i filt_00 =
545
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
546
677k
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
547
677k
  __m128i filt_01 =
548
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
549
677k
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
550
677k
  __m128i filt_02 =
551
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
552
677k
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
553
677k
  __m128i filt_03 =
554
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
555
677k
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
556
557
677k
  __m256i filt_0 = _mm256_broadcastsi128_si256(filt_00);
558
677k
  __m256i filt_1 = _mm256_broadcastsi128_si256(filt_01);
559
677k
  __m256i filt_2 = _mm256_broadcastsi128_si256(filt_02);
560
677k
  __m256i filt_3 = _mm256_broadcastsi128_si256(filt_03);
561
562
677k
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
563
677k
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
564
677k
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
565
677k
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
566
567
677k
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
568
677k
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
569
677k
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
570
677k
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
571
572
677k
  filt_00 =
573
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
574
677k
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
575
677k
  filt_01 =
576
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
577
677k
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
578
677k
  filt_02 =
579
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
580
677k
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
581
677k
  filt_03 =
582
677k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
583
677k
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
584
585
677k
  filt_0 = _mm256_broadcastsi128_si256(filt_00);
586
677k
  filt_1 = _mm256_broadcastsi128_si256(filt_01);
587
677k
  filt_2 = _mm256_broadcastsi128_si256(filt_02);
588
677k
  filt_3 = _mm256_broadcastsi128_si256(filt_03);
589
590
677k
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
591
677k
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
592
677k
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
593
677k
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
594
595
677k
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
596
677k
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
597
677k
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
598
677k
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
599
677k
}
600
601
static inline void prepare_vertical_filter_coeffs_gamma0_avx2(int delta, int sy,
602
2.23M
                                                              __m256i *coeffs) {
603
2.23M
  const __m128i filt_0 = _mm_loadu_si128(
604
2.23M
      (__m128i *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
605
2.23M
  const __m128i filt_1 = _mm_loadu_si128(
606
2.23M
      (__m128i *)(av1_warped_filter + ((sy + delta) >> WARPEDDIFF_PREC_BITS)));
607
608
2.23M
  __m256i res_0 =
609
2.23M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_0), filt_1, 0x1);
610
611
2.23M
  coeffs[0] = _mm256_shuffle_epi8(
612
2.23M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask0_avx2));
613
2.23M
  coeffs[1] = _mm256_shuffle_epi8(
614
2.23M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask1_avx2));
615
2.23M
  coeffs[2] = _mm256_shuffle_epi8(
616
2.23M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask2_avx2));
617
2.23M
  coeffs[3] = _mm256_shuffle_epi8(
618
2.23M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask3_avx2));
619
620
2.23M
  coeffs[4] = coeffs[0];
621
2.23M
  coeffs[5] = coeffs[1];
622
2.23M
  coeffs[6] = coeffs[2];
623
2.23M
  coeffs[7] = coeffs[3];
624
2.23M
}
625
626
static inline void filter_src_pixels_vertical_avx2(__m256i *horz_out,
627
                                                   __m256i *src,
628
                                                   __m256i *coeffs,
629
                                                   __m256i *res_lo,
630
12.2M
                                                   __m256i *res_hi, int row) {
631
12.2M
  const __m256i src_6 = horz_out[row + 3];
632
12.2M
  const __m256i src_7 =
633
12.2M
      _mm256_permute2x128_si256(horz_out[row + 3], horz_out[row + 4], 0x21);
634
635
12.2M
  src[6] = _mm256_unpacklo_epi16(src_6, src_7);
636
637
12.2M
  const __m256i res_0 = _mm256_madd_epi16(src[0], coeffs[0]);
638
12.2M
  const __m256i res_2 = _mm256_madd_epi16(src[2], coeffs[1]);
639
12.2M
  const __m256i res_4 = _mm256_madd_epi16(src[4], coeffs[2]);
640
12.2M
  const __m256i res_6 = _mm256_madd_epi16(src[6], coeffs[3]);
641
642
12.2M
  const __m256i res_even = _mm256_add_epi32(_mm256_add_epi32(res_0, res_2),
643
12.2M
                                            _mm256_add_epi32(res_4, res_6));
644
645
12.2M
  src[7] = _mm256_unpackhi_epi16(src_6, src_7);
646
647
12.2M
  const __m256i res_1 = _mm256_madd_epi16(src[1], coeffs[4]);
648
12.2M
  const __m256i res_3 = _mm256_madd_epi16(src[3], coeffs[5]);
649
12.2M
  const __m256i res_5 = _mm256_madd_epi16(src[5], coeffs[6]);
650
12.2M
  const __m256i res_7 = _mm256_madd_epi16(src[7], coeffs[7]);
651
652
12.2M
  const __m256i res_odd = _mm256_add_epi32(_mm256_add_epi32(res_1, res_3),
653
12.2M
                                           _mm256_add_epi32(res_5, res_7));
654
655
  // Rearrange pixels back into the order 0 ... 7
656
12.2M
  *res_lo = _mm256_unpacklo_epi32(res_even, res_odd);
657
12.2M
  *res_hi = _mm256_unpackhi_epi32(res_even, res_odd);
658
12.2M
}
659
660
static inline void store_vertical_filter_output_avx2(
661
    const __m256i *res_lo, const __m256i *res_hi, const __m256i *res_add_const,
662
    const __m256i *wt, const __m256i *res_sub_const,
663
    const __m256i *round_bits_const, uint8_t *pred, ConvolveParams *conv_params,
664
    int i, int j, int k, const int reduce_bits_vert, int p_stride, int p_width,
665
12.2M
    const int round_bits) {
666
12.2M
  __m256i res_lo_1 = *res_lo;
667
12.2M
  __m256i res_hi_1 = *res_hi;
668
669
12.2M
  if (conv_params->is_compound) {
670
276k
    __m128i *const p_0 =
671
276k
        (__m128i *)&conv_params->dst[(i + k + 4) * conv_params->dst_stride + j];
672
276k
    __m128i *const p_1 =
673
276k
        (__m128i *)&conv_params
674
276k
            ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j];
675
676
276k
    res_lo_1 = _mm256_srai_epi32(_mm256_add_epi32(res_lo_1, *res_add_const),
677
276k
                                 reduce_bits_vert);
678
679
276k
    const __m256i temp_lo_16 = _mm256_packus_epi32(res_lo_1, res_lo_1);
680
276k
    __m256i res_lo_16;
681
276k
    if (conv_params->do_average) {
682
129k
      __m128i *const dst8_0 = (__m128i *)&pred[(i + k + 4) * p_stride + j];
683
129k
      __m128i *const dst8_1 =
684
129k
          (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
685
129k
      const __m128i p_16_0 = _mm_loadl_epi64(p_0);
686
129k
      const __m128i p_16_1 = _mm_loadl_epi64(p_1);
687
129k
      const __m256i p_16 =
688
129k
          _mm256_inserti128_si256(_mm256_castsi128_si256(p_16_0), p_16_1, 1);
689
129k
      if (conv_params->use_dist_wtd_comp_avg) {
690
67.4k
        const __m256i p_16_lo = _mm256_unpacklo_epi16(p_16, temp_lo_16);
691
67.4k
        const __m256i wt_res_lo = _mm256_madd_epi16(p_16_lo, *wt);
692
67.4k
        const __m256i shifted_32 =
693
67.4k
            _mm256_srai_epi32(wt_res_lo, DIST_PRECISION_BITS);
694
67.4k
        res_lo_16 = _mm256_packus_epi32(shifted_32, shifted_32);
695
67.4k
      } else {
696
61.6k
        res_lo_16 = _mm256_srai_epi16(_mm256_add_epi16(p_16, temp_lo_16), 1);
697
61.6k
      }
698
129k
      res_lo_16 = _mm256_add_epi16(res_lo_16, *res_sub_const);
699
129k
      res_lo_16 = _mm256_srai_epi16(
700
129k
          _mm256_add_epi16(res_lo_16, *round_bits_const), round_bits);
701
129k
      const __m256i res_8_lo = _mm256_packus_epi16(res_lo_16, res_lo_16);
702
129k
      const __m128i res_8_lo_0 = _mm256_castsi256_si128(res_8_lo);
703
129k
      const __m128i res_8_lo_1 = _mm256_extracti128_si256(res_8_lo, 1);
704
129k
      *(int *)dst8_0 = _mm_cvtsi128_si32(res_8_lo_0);
705
129k
      *(int *)dst8_1 = _mm_cvtsi128_si32(res_8_lo_1);
706
147k
    } else {
707
147k
      const __m128i temp_lo_16_0 = _mm256_castsi256_si128(temp_lo_16);
708
147k
      const __m128i temp_lo_16_1 = _mm256_extracti128_si256(temp_lo_16, 1);
709
147k
      _mm_storel_epi64(p_0, temp_lo_16_0);
710
147k
      _mm_storel_epi64(p_1, temp_lo_16_1);
711
147k
    }
712
276k
    if (p_width > 4) {
713
276k
      __m128i *const p4_0 =
714
276k
          (__m128i *)&conv_params
715
276k
              ->dst[(i + k + 4) * conv_params->dst_stride + j + 4];
716
276k
      __m128i *const p4_1 =
717
276k
          (__m128i *)&conv_params
718
276k
              ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j + 4];
719
276k
      res_hi_1 = _mm256_srai_epi32(_mm256_add_epi32(res_hi_1, *res_add_const),
720
276k
                                   reduce_bits_vert);
721
276k
      const __m256i temp_hi_16 = _mm256_packus_epi32(res_hi_1, res_hi_1);
722
276k
      __m256i res_hi_16;
723
276k
      if (conv_params->do_average) {
724
129k
        __m128i *const dst8_4_0 =
725
129k
            (__m128i *)&pred[(i + k + 4) * p_stride + j + 4];
726
129k
        __m128i *const dst8_4_1 =
727
129k
            (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j + 4];
728
129k
        const __m128i p4_16_0 = _mm_loadl_epi64(p4_0);
729
129k
        const __m128i p4_16_1 = _mm_loadl_epi64(p4_1);
730
129k
        const __m256i p4_16 = _mm256_inserti128_si256(
731
129k
            _mm256_castsi128_si256(p4_16_0), p4_16_1, 1);
732
129k
        if (conv_params->use_dist_wtd_comp_avg) {
733
67.4k
          const __m256i p_16_hi = _mm256_unpacklo_epi16(p4_16, temp_hi_16);
734
67.4k
          const __m256i wt_res_hi = _mm256_madd_epi16(p_16_hi, *wt);
735
67.4k
          const __m256i shifted_32 =
736
67.4k
              _mm256_srai_epi32(wt_res_hi, DIST_PRECISION_BITS);
737
67.4k
          res_hi_16 = _mm256_packus_epi32(shifted_32, shifted_32);
738
67.4k
        } else {
739
61.6k
          res_hi_16 = _mm256_srai_epi16(_mm256_add_epi16(p4_16, temp_hi_16), 1);
740
61.6k
        }
741
129k
        res_hi_16 = _mm256_add_epi16(res_hi_16, *res_sub_const);
742
129k
        res_hi_16 = _mm256_srai_epi16(
743
129k
            _mm256_add_epi16(res_hi_16, *round_bits_const), round_bits);
744
129k
        __m256i res_8_hi = _mm256_packus_epi16(res_hi_16, res_hi_16);
745
129k
        const __m128i res_8_hi_0 = _mm256_castsi256_si128(res_8_hi);
746
129k
        const __m128i res_8_hi_1 = _mm256_extracti128_si256(res_8_hi, 1);
747
129k
        *(int *)dst8_4_0 = _mm_cvtsi128_si32(res_8_hi_0);
748
129k
        *(int *)dst8_4_1 = _mm_cvtsi128_si32(res_8_hi_1);
749
147k
      } else {
750
147k
        const __m128i temp_hi_16_0 = _mm256_castsi256_si128(temp_hi_16);
751
147k
        const __m128i temp_hi_16_1 = _mm256_extracti128_si256(temp_hi_16, 1);
752
147k
        _mm_storel_epi64(p4_0, temp_hi_16_0);
753
147k
        _mm_storel_epi64(p4_1, temp_hi_16_1);
754
147k
      }
755
276k
    }
756
11.9M
  } else {
757
11.9M
    const __m256i res_lo_round = _mm256_srai_epi32(
758
11.9M
        _mm256_add_epi32(res_lo_1, *res_add_const), reduce_bits_vert);
759
11.9M
    const __m256i res_hi_round = _mm256_srai_epi32(
760
11.9M
        _mm256_add_epi32(res_hi_1, *res_add_const), reduce_bits_vert);
761
762
11.9M
    const __m256i res_16bit = _mm256_packs_epi32(res_lo_round, res_hi_round);
763
11.9M
    const __m256i res_8bit = _mm256_packus_epi16(res_16bit, res_16bit);
764
11.9M
    const __m128i res_8bit0 = _mm256_castsi256_si128(res_8bit);
765
11.9M
    const __m128i res_8bit1 = _mm256_extracti128_si256(res_8bit, 1);
766
767
    // Store, blending with 'pred' if needed
768
11.9M
    __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
769
11.9M
    __m128i *const p1 = (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
770
771
11.9M
    if (p_width == 4) {
772
0
      *(int *)p = _mm_cvtsi128_si32(res_8bit0);
773
0
      *(int *)p1 = _mm_cvtsi128_si32(res_8bit1);
774
11.9M
    } else {
775
11.9M
      _mm_storel_epi64(p, res_8bit0);
776
11.9M
      _mm_storel_epi64(p1, res_8bit1);
777
11.9M
    }
778
11.9M
  }
779
12.2M
}
780
781
static inline void warp_vertical_filter_avx2(
782
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
783
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
784
    int i, int j, int sy4, const int reduce_bits_vert,
785
    const __m256i *res_add_const, const int round_bits,
786
    const __m256i *res_sub_const, const __m256i *round_bits_const,
787
1.51M
    const __m256i *wt) {
788
1.51M
  int k, row = 0;
789
1.51M
  __m256i src[8];
790
1.51M
  const __m256i src_0 = horz_out[0];
791
1.51M
  const __m256i src_1 =
792
1.51M
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
793
1.51M
  const __m256i src_2 = horz_out[1];
794
1.51M
  const __m256i src_3 =
795
1.51M
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
796
1.51M
  const __m256i src_4 = horz_out[2];
797
1.51M
  const __m256i src_5 =
798
1.51M
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
799
800
1.51M
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
801
1.51M
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
802
1.51M
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
803
804
1.51M
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
805
1.51M
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
806
1.51M
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
807
808
7.55M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
809
6.03M
    int sy = sy4 + delta * (k + 4);
810
6.03M
    __m256i coeffs[8];
811
6.03M
    prepare_vertical_filter_coeffs_avx2(gamma, delta, sy, coeffs);
812
6.03M
    __m256i res_lo, res_hi;
813
6.03M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
814
6.03M
                                    row);
815
6.03M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
816
6.03M
                                      res_sub_const, round_bits_const, pred,
817
6.03M
                                      conv_params, i, j, k, reduce_bits_vert,
818
6.03M
                                      p_stride, p_width, round_bits);
819
6.03M
    src[0] = src[2];
820
6.03M
    src[2] = src[4];
821
6.03M
    src[4] = src[6];
822
6.03M
    src[1] = src[3];
823
6.03M
    src[3] = src[5];
824
6.03M
    src[5] = src[7];
825
826
6.03M
    row += 1;
827
6.03M
  }
828
1.51M
}
829
830
static inline void warp_vertical_filter_gamma0_avx2(
831
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
832
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
833
    int i, int j, int sy4, const int reduce_bits_vert,
834
    const __m256i *res_add_const, const int round_bits,
835
    const __m256i *res_sub_const, const __m256i *round_bits_const,
836
450k
    const __m256i *wt) {
837
450k
  (void)gamma;
838
450k
  int k, row = 0;
839
450k
  __m256i src[8];
840
450k
  const __m256i src_0 = horz_out[0];
841
450k
  const __m256i src_1 =
842
450k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
843
450k
  const __m256i src_2 = horz_out[1];
844
450k
  const __m256i src_3 =
845
450k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
846
450k
  const __m256i src_4 = horz_out[2];
847
450k
  const __m256i src_5 =
848
450k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
849
850
450k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
851
450k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
852
450k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
853
854
450k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
855
450k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
856
450k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
857
858
2.25M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
859
1.80M
    int sy = sy4 + delta * (k + 4);
860
1.80M
    __m256i coeffs[8];
861
1.80M
    prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy, coeffs);
862
1.80M
    __m256i res_lo, res_hi;
863
1.80M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
864
1.80M
                                    row);
865
1.80M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
866
1.80M
                                      res_sub_const, round_bits_const, pred,
867
1.80M
                                      conv_params, i, j, k, reduce_bits_vert,
868
1.80M
                                      p_stride, p_width, round_bits);
869
1.80M
    src[0] = src[2];
870
1.80M
    src[2] = src[4];
871
1.80M
    src[4] = src[6];
872
1.80M
    src[1] = src[3];
873
1.80M
    src[3] = src[5];
874
1.80M
    src[5] = src[7];
875
1.80M
    row += 1;
876
1.80M
  }
877
450k
}
878
879
static inline void warp_vertical_filter_delta0_avx2(
880
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
881
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
882
    int i, int j, int sy4, const int reduce_bits_vert,
883
    const __m256i *res_add_const, const int round_bits,
884
    const __m256i *res_sub_const, const __m256i *round_bits_const,
885
677k
    const __m256i *wt) {
886
677k
  (void)delta;
887
677k
  int k, row = 0;
888
677k
  __m256i src[8], coeffs[8];
889
677k
  const __m256i src_0 = horz_out[0];
890
677k
  const __m256i src_1 =
891
677k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
892
677k
  const __m256i src_2 = horz_out[1];
893
677k
  const __m256i src_3 =
894
677k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
895
677k
  const __m256i src_4 = horz_out[2];
896
677k
  const __m256i src_5 =
897
677k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
898
899
677k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
900
677k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
901
677k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
902
903
677k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
904
677k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
905
677k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
906
907
677k
  prepare_vertical_filter_coeffs_delta0_avx2(gamma, sy4, coeffs);
908
909
3.35M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
910
2.67M
    __m256i res_lo, res_hi;
911
2.67M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
912
2.67M
                                    row);
913
2.67M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
914
2.67M
                                      res_sub_const, round_bits_const, pred,
915
2.67M
                                      conv_params, i, j, k, reduce_bits_vert,
916
2.67M
                                      p_stride, p_width, round_bits);
917
2.67M
    src[0] = src[2];
918
2.67M
    src[2] = src[4];
919
2.67M
    src[4] = src[6];
920
2.67M
    src[1] = src[3];
921
2.67M
    src[3] = src[5];
922
2.67M
    src[5] = src[7];
923
2.67M
    row += 1;
924
2.67M
  }
925
677k
}
926
927
static inline void warp_vertical_filter_gamma0_delta0_avx2(
928
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
929
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
930
    int i, int j, int sy4, const int reduce_bits_vert,
931
    const __m256i *res_add_const, const int round_bits,
932
    const __m256i *res_sub_const, const __m256i *round_bits_const,
933
431k
    const __m256i *wt) {
934
431k
  (void)gamma;
935
431k
  int k, row = 0;
936
431k
  __m256i src[8], coeffs[8];
937
431k
  const __m256i src_0 = horz_out[0];
938
431k
  const __m256i src_1 =
939
431k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
940
431k
  const __m256i src_2 = horz_out[1];
941
431k
  const __m256i src_3 =
942
431k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
943
431k
  const __m256i src_4 = horz_out[2];
944
431k
  const __m256i src_5 =
945
431k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
946
947
431k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
948
431k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
949
431k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
950
951
431k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
952
431k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
953
431k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
954
955
431k
  prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy4, coeffs);
956
957
2.15M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
958
1.72M
    __m256i res_lo, res_hi;
959
1.72M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
960
1.72M
                                    row);
961
1.72M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
962
1.72M
                                      res_sub_const, round_bits_const, pred,
963
1.72M
                                      conv_params, i, j, k, reduce_bits_vert,
964
1.72M
                                      p_stride, p_width, round_bits);
965
1.72M
    src[0] = src[2];
966
1.72M
    src[2] = src[4];
967
1.72M
    src[4] = src[6];
968
1.72M
    src[1] = src[3];
969
1.72M
    src[3] = src[5];
970
1.72M
    src[5] = src[7];
971
1.72M
    row += 1;
972
1.72M
  }
973
431k
}
974
975
static inline void prepare_warp_vertical_filter_avx2(
976
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
977
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
978
    int i, int j, int sy4, const int reduce_bits_vert,
979
    const __m256i *res_add_const, const int round_bits,
980
    const __m256i *res_sub_const, const __m256i *round_bits_const,
981
3.07M
    const __m256i *wt) {
982
3.07M
  if (gamma == 0 && delta == 0)
983
431k
    warp_vertical_filter_gamma0_delta0_avx2(
984
431k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
985
431k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
986
431k
        round_bits_const, wt);
987
2.64M
  else if (gamma == 0 && delta != 0)
988
450k
    warp_vertical_filter_gamma0_avx2(
989
450k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
990
450k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
991
450k
        round_bits_const, wt);
992
2.19M
  else if (gamma != 0 && delta == 0)
993
677k
    warp_vertical_filter_delta0_avx2(
994
677k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
995
677k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
996
677k
        round_bits_const, wt);
997
1.51M
  else
998
1.51M
    warp_vertical_filter_avx2(pred, horz_out, conv_params, gamma, delta,
999
1.51M
                              p_height, p_stride, p_width, i, j, sy4,
1000
1.51M
                              reduce_bits_vert, res_add_const, round_bits,
1001
1.51M
                              res_sub_const, round_bits_const, wt);
1002
3.07M
}
1003
1004
static inline void prepare_warp_horizontal_filter_avx2(
1005
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
1006
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
1007
    const __m256i *round_const, const __m128i *shift,
1008
2.81M
    const __m256i *shuffle_src) {
1009
2.81M
  if (alpha == 0 && beta == 0)
1010
1.00M
    warp_horizontal_filter_alpha0_beta0_avx2(
1011
1.00M
        ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, i,
1012
1.00M
        round_const, shift, shuffle_src);
1013
1.80M
  else if (alpha == 0 && beta != 0)
1014
743k
    warp_horizontal_filter_alpha0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1015
743k
                                       alpha, beta, p_height, height, i,
1016
743k
                                       round_const, shift, shuffle_src);
1017
1.06M
  else if (alpha != 0 && beta == 0)
1018
533k
    warp_horizontal_filter_beta0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1019
533k
                                      alpha, beta, p_height, height, i,
1020
533k
                                      round_const, shift, shuffle_src);
1021
531k
  else
1022
531k
    warp_horizontal_filter_avx2(ref, horz_out, stride, ix4, iy4, sx4, alpha,
1023
531k
                                beta, p_height, height, i, round_const, shift,
1024
531k
                                shuffle_src);
1025
2.81M
}
1026
1027
void av1_warp_affine_avx2(const int32_t *mat, const uint8_t *ref, int width,
1028
                          int height, int stride, uint8_t *pred, int p_col,
1029
                          int p_row, int p_width, int p_height, int p_stride,
1030
                          int subsampling_x, int subsampling_y,
1031
                          ConvolveParams *conv_params, int16_t alpha,
1032
280k
                          int16_t beta, int16_t gamma, int16_t delta) {
1033
280k
  __m256i horz_out[8];
1034
280k
  int i, j, k;
1035
280k
  const int bd = 8;
1036
280k
  const int reduce_bits_horiz = conv_params->round_0;
1037
280k
  const int reduce_bits_vert = conv_params->is_compound
1038
280k
                                   ? conv_params->round_1
1039
280k
                                   : 2 * FILTER_BITS - reduce_bits_horiz;
1040
280k
  const int offset_bits_horiz = bd + FILTER_BITS - 1;
1041
280k
  assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL));
1042
1043
280k
  const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz;
1044
280k
  const __m256i reduce_bits_vert_const =
1045
280k
      _mm256_set1_epi32(((1 << reduce_bits_vert) >> 1));
1046
280k
  const __m256i res_add_const = _mm256_set1_epi32(1 << offset_bits_vert);
1047
280k
  const int round_bits =
1048
280k
      2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
1049
280k
  const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
1050
280k
  assert(IMPLIES(conv_params->do_average, conv_params->is_compound));
1051
1052
280k
  const __m256i round_const = _mm256_set1_epi16(
1053
280k
      (1 << offset_bits_horiz) + ((1 << reduce_bits_horiz) >> 1));
1054
280k
  const __m128i shift = _mm_cvtsi32_si128(reduce_bits_horiz);
1055
1056
280k
  __m256i res_sub_const, round_bits_const, wt;
1057
280k
  unpack_weights_and_set_round_const_avx2(conv_params, round_bits, offset_bits,
1058
280k
                                          &res_sub_const, &round_bits_const,
1059
280k
                                          &wt);
1060
1061
280k
  __m256i res_add_const_1;
1062
280k
  if (conv_params->is_compound == 1) {
1063
10.5k
    res_add_const_1 = _mm256_add_epi32(reduce_bits_vert_const, res_add_const);
1064
270k
  } else {
1065
270k
    res_add_const_1 = _mm256_set1_epi32(-(1 << (bd + reduce_bits_vert - 1)) +
1066
270k
                                        ((1 << reduce_bits_vert) >> 1));
1067
270k
  }
1068
280k
  const int32_t const1 = alpha * (-4) + beta * (-4) +
1069
280k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1070
280k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1071
280k
  const int32_t const2 = gamma * (-4) + delta * (-4) +
1072
280k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1073
280k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1074
280k
  const int32_t const3 = ((1 << WARP_PARAM_REDUCE_BITS) - 1);
1075
280k
  const int16_t const4 = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1));
1076
280k
  const int16_t const5 = (1 << (FILTER_BITS - reduce_bits_horiz));
1077
1078
280k
  __m256i shuffle_src[4];
1079
280k
  shuffle_src[0] = _mm256_load_si256((__m256i *)shuffle_src0);
1080
280k
  shuffle_src[1] = _mm256_load_si256((__m256i *)shuffle_src1);
1081
280k
  shuffle_src[2] = _mm256_load_si256((__m256i *)shuffle_src2);
1082
280k
  shuffle_src[3] = _mm256_load_si256((__m256i *)shuffle_src3);
1083
1084
967k
  for (i = 0; i < p_height; i += 8) {
1085
3.78M
    for (j = 0; j < p_width; j += 8) {
1086
3.09M
      const int32_t src_x = (p_col + j + 4) << subsampling_x;
1087
3.09M
      const int32_t src_y = (p_row + i + 4) << subsampling_y;
1088
3.09M
      const int64_t dst_x =
1089
3.09M
          (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
1090
3.09M
      const int64_t dst_y =
1091
3.09M
          (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
1092
3.09M
      const int64_t x4 = dst_x >> subsampling_x;
1093
3.09M
      const int64_t y4 = dst_y >> subsampling_y;
1094
1095
3.09M
      int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
1096
3.09M
      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1097
3.09M
      int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
1098
3.09M
      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1099
1100
      // Add in all the constant terms, including rounding and offset
1101
3.09M
      sx4 += const1;
1102
3.09M
      sy4 += const2;
1103
1104
3.09M
      sx4 &= ~const3;
1105
3.09M
      sy4 &= ~const3;
1106
1107
      // Horizontal filter
1108
      // If the block is aligned such that, after clamping, every sample
1109
      // would be taken from the leftmost/rightmost column, then we can
1110
      // skip the expensive horizontal filter.
1111
1112
3.09M
      if (ix4 <= -7) {
1113
172k
        int iy, row = 0;
1114
1.38M
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1115
1.21M
          iy = iy4 + k;
1116
1.21M
          iy = clamp(iy, 0, height - 1);
1117
1.21M
          const __m256i temp_0 =
1118
1.21M
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1119
1.21M
          iy = iy4 + k + 1;
1120
1.21M
          iy = clamp(iy, 0, height - 1);
1121
1.21M
          const __m256i temp_1 =
1122
1.21M
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1123
1.21M
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1124
1.21M
          row += 1;
1125
1.21M
        }
1126
172k
        iy = iy4 + k;
1127
172k
        iy = clamp(iy, 0, height - 1);
1128
172k
        horz_out[row] = _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1129
2.92M
      } else if (ix4 >= width + 6) {
1130
66.4k
        int iy, row = 0;
1131
531k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1132
464k
          iy = iy4 + k;
1133
464k
          iy = clamp(iy, 0, height - 1);
1134
464k
          const __m256i temp_0 = _mm256_set1_epi16(
1135
464k
              const4 + ref[iy * stride + (width - 1)] * const5);
1136
464k
          iy = iy4 + k + 1;
1137
464k
          iy = clamp(iy, 0, height - 1);
1138
464k
          const __m256i temp_1 = _mm256_set1_epi16(
1139
464k
              const4 + ref[iy * stride + (width - 1)] * const5);
1140
464k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1141
464k
          row += 1;
1142
464k
        }
1143
66.4k
        iy = iy4 + k;
1144
66.4k
        iy = clamp(iy, 0, height - 1);
1145
66.4k
        horz_out[row] =
1146
66.4k
            _mm256_set1_epi16(const4 + ref[iy * stride + (width - 1)] * const5);
1147
2.85M
      } else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) {
1148
128k
        const int out_of_boundary_left = -(ix4 - 6);
1149
128k
        const int out_of_boundary_right = (ix4 + 8) - width;
1150
128k
        int iy, sx, row = 0;
1151
1.01M
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1152
888k
          iy = iy4 + k;
1153
888k
          iy = clamp(iy, 0, height - 1);
1154
888k
          __m128i src0 =
1155
888k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1156
888k
          iy = iy4 + k + 1;
1157
888k
          iy = clamp(iy, 0, height - 1);
1158
888k
          __m128i src1 =
1159
888k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1160
1161
888k
          if (out_of_boundary_left >= 0) {
1162
568k
            const __m128i shuffle_reg_left =
1163
568k
                _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1164
568k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_left);
1165
568k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_left);
1166
568k
          }
1167
888k
          if (out_of_boundary_right >= 0) {
1168
354k
            const __m128i shuffle_reg_right = _mm_loadu_si128(
1169
354k
                (__m128i *)warp_pad_right[out_of_boundary_right]);
1170
354k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_right);
1171
354k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_right);
1172
354k
          }
1173
888k
          sx = sx4 + beta * (k + 4);
1174
888k
          const __m256i src_01 =
1175
888k
              _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
1176
888k
          horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row,
1177
888k
                                 shuffle_src, &round_const, &shift);
1178
888k
          row += 1;
1179
888k
        }
1180
128k
        iy = iy4 + k;
1181
128k
        iy = clamp(iy, 0, height - 1);
1182
128k
        __m128i src = _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1183
128k
        if (out_of_boundary_left >= 0) {
1184
81.1k
          const __m128i shuffle_reg_left =
1185
81.1k
              _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1186
81.1k
          src = _mm_shuffle_epi8(src, shuffle_reg_left);
1187
81.1k
        }
1188
128k
        if (out_of_boundary_right >= 0) {
1189
50.5k
          const __m128i shuffle_reg_right =
1190
50.5k
              _mm_loadu_si128((__m128i *)warp_pad_right[out_of_boundary_right]);
1191
50.5k
          src = _mm_shuffle_epi8(src, shuffle_reg_right);
1192
50.5k
        }
1193
128k
        sx = sx4 + beta * (k + 4);
1194
128k
        const __m256i src_01 = _mm256_castsi128_si256(src);
1195
128k
        __m256i coeff[4];
1196
128k
        prepare_horizontal_filter_coeff(alpha, sx, coeff);
1197
128k
        filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src,
1198
128k
                               &round_const, &shift, row);
1199
2.73M
      } else {
1200
2.73M
        prepare_warp_horizontal_filter_avx2(
1201
2.73M
            ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height,
1202
2.73M
            i, &round_const, &shift, shuffle_src);
1203
2.73M
      }
1204
1205
      // Vertical filter
1206
3.09M
      prepare_warp_vertical_filter_avx2(
1207
3.09M
          pred, horz_out, conv_params, gamma, delta, p_height, p_stride,
1208
3.09M
          p_width, i, j, sy4, reduce_bits_vert, &res_add_const_1, round_bits,
1209
3.09M
          &res_sub_const, &round_bits_const, &wt);
1210
3.09M
    }
1211
686k
  }
1212
280k
}
1213
1214
#endif  // !CONFIG_HIGHWAY