Coverage Report

Created: 2025-07-23 06:32

/src/aom/av1/common/x86/warp_plane_avx2.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2019, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
#include "config/av1_rtcd.h"
14
#include "av1/common/warped_motion.h"
15
#include "aom_dsp/x86/synonyms.h"
16
17
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask01_avx2[32]) = {
18
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
19
  0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
20
};
21
22
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask23_avx2[32]) = {
23
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
24
  2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3
25
};
26
27
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask45_avx2[32]) = {
28
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
29
  4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5
30
};
31
32
DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask67_avx2[32]) = {
33
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
34
  6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7
35
};
36
37
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask0_avx2[32]) = {
38
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
39
  0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
40
};
41
42
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask1_avx2[32]) = {
43
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7,
44
  4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7
45
};
46
47
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask2_avx2[32]) = {
48
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11,
49
  8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11
50
};
51
52
DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask3_avx2[32]) = {
53
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15,
54
  12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15
55
};
56
57
DECLARE_ALIGNED(32, static const uint8_t,
58
                shuffle_src0[32]) = { 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3,
59
                                      5, 5, 7, 7, 9, 0, 2, 2, 4, 4, 6,
60
                                      6, 8, 1, 3, 3, 5, 5, 7, 7, 9 };
61
62
DECLARE_ALIGNED(32, static const uint8_t,
63
                shuffle_src1[32]) = { 4,  6,  6,  8,  8,  10, 10, 12, 5,  7, 7,
64
                                      9,  9,  11, 11, 13, 4,  6,  6,  8,  8, 10,
65
                                      10, 12, 5,  7,  7,  9,  9,  11, 11, 13 };
66
67
DECLARE_ALIGNED(32, static const uint8_t,
68
                shuffle_src2[32]) = { 1, 3, 3, 5, 5,  7, 7, 9, 2, 4, 4,
69
                                      6, 6, 8, 8, 10, 1, 3, 3, 5, 5, 7,
70
                                      7, 9, 2, 4, 4,  6, 6, 8, 8, 10 };
71
72
DECLARE_ALIGNED(32, static const uint8_t,
73
                shuffle_src3[32]) = { 5,  7,  7,  9,  9,  11, 11, 13, 6,  8, 8,
74
                                      10, 10, 12, 12, 14, 5,  7,  7,  9,  9, 11,
75
                                      11, 13, 6,  8,  8,  10, 10, 12, 12, 14 };
76
77
static inline void filter_src_pixels_avx2(const __m256i src, __m256i *horz_out,
78
                                          __m256i *coeff,
79
                                          const __m256i *shuffle_src,
80
                                          const __m256i *round_const,
81
15.5M
                                          const __m128i *shift, int row) {
82
15.5M
  const __m256i src_0 = _mm256_shuffle_epi8(src, shuffle_src[0]);
83
15.5M
  const __m256i src_1 = _mm256_shuffle_epi8(src, shuffle_src[1]);
84
15.5M
  const __m256i src_2 = _mm256_shuffle_epi8(src, shuffle_src[2]);
85
15.5M
  const __m256i src_3 = _mm256_shuffle_epi8(src, shuffle_src[3]);
86
87
15.5M
  const __m256i res_02 = _mm256_maddubs_epi16(src_0, coeff[0]);
88
15.5M
  const __m256i res_46 = _mm256_maddubs_epi16(src_1, coeff[1]);
89
15.5M
  const __m256i res_13 = _mm256_maddubs_epi16(src_2, coeff[2]);
90
15.5M
  const __m256i res_57 = _mm256_maddubs_epi16(src_3, coeff[3]);
91
92
15.5M
  const __m256i res_even = _mm256_add_epi16(res_02, res_46);
93
15.5M
  const __m256i res_odd = _mm256_add_epi16(res_13, res_57);
94
15.5M
  const __m256i res =
95
15.5M
      _mm256_add_epi16(_mm256_add_epi16(res_even, res_odd), *round_const);
96
15.5M
  horz_out[row] = _mm256_srl_epi16(res, *shift);
97
15.5M
}
98
99
static inline void prepare_horizontal_filter_coeff_avx2(int alpha, int beta,
100
                                                        int sx,
101
3.68M
                                                        __m256i *coeff) {
102
3.68M
  __m128i tmp_0 = _mm_loadl_epi64(
103
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 0 * alpha)) >>
104
3.68M
                                  WARPEDDIFF_PREC_BITS]);
105
3.68M
  __m128i tmp_1 = _mm_loadl_epi64(
106
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 1 * alpha)) >>
107
3.68M
                                  WARPEDDIFF_PREC_BITS]);
108
3.68M
  __m128i tmp_2 = _mm_loadl_epi64(
109
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 2 * alpha)) >>
110
3.68M
                                  WARPEDDIFF_PREC_BITS]);
111
3.68M
  __m128i tmp_3 = _mm_loadl_epi64(
112
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 3 * alpha)) >>
113
3.68M
                                  WARPEDDIFF_PREC_BITS]);
114
115
3.68M
  __m128i tmp_4 = _mm_loadl_epi64(
116
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 4 * alpha)) >>
117
3.68M
                                  WARPEDDIFF_PREC_BITS]);
118
3.68M
  __m128i tmp_5 = _mm_loadl_epi64(
119
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 5 * alpha)) >>
120
3.68M
                                  WARPEDDIFF_PREC_BITS]);
121
3.68M
  __m128i tmp_6 = _mm_loadl_epi64(
122
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 6 * alpha)) >>
123
3.68M
                                  WARPEDDIFF_PREC_BITS]);
124
3.68M
  __m128i tmp_7 = _mm_loadl_epi64(
125
3.68M
      (__m128i *)&av1_filter_8bit[((unsigned)(sx + 7 * alpha)) >>
126
3.68M
                                  WARPEDDIFF_PREC_BITS]);
127
128
3.68M
  __m256i tmp0_256 = _mm256_castsi128_si256(tmp_0);
129
3.68M
  __m256i tmp2_256 = _mm256_castsi128_si256(tmp_2);
130
3.68M
  __m256i tmp1_256 = _mm256_castsi128_si256(tmp_1);
131
3.68M
  __m256i tmp3_256 = _mm256_castsi128_si256(tmp_3);
132
133
3.68M
  __m256i tmp4_256 = _mm256_castsi128_si256(tmp_4);
134
3.68M
  __m256i tmp6_256 = _mm256_castsi128_si256(tmp_6);
135
3.68M
  __m256i tmp5_256 = _mm256_castsi128_si256(tmp_5);
136
3.68M
  __m256i tmp7_256 = _mm256_castsi128_si256(tmp_7);
137
138
3.68M
  __m128i tmp_8 = _mm_loadl_epi64(
139
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 0 * alpha) >>
140
3.68M
                                  WARPEDDIFF_PREC_BITS]);
141
3.68M
  tmp0_256 = _mm256_inserti128_si256(tmp0_256, tmp_8, 1);
142
143
3.68M
  __m128i tmp_9 = _mm_loadl_epi64(
144
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 1 * alpha) >>
145
3.68M
                                  WARPEDDIFF_PREC_BITS]);
146
3.68M
  tmp1_256 = _mm256_inserti128_si256(tmp1_256, tmp_9, 1);
147
148
3.68M
  __m128i tmp_10 = _mm_loadl_epi64(
149
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 2 * alpha) >>
150
3.68M
                                  WARPEDDIFF_PREC_BITS]);
151
3.68M
  tmp2_256 = _mm256_inserti128_si256(tmp2_256, tmp_10, 1);
152
153
3.68M
  __m128i tmp_11 = _mm_loadl_epi64(
154
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 3 * alpha) >>
155
3.68M
                                  WARPEDDIFF_PREC_BITS]);
156
3.68M
  tmp3_256 = _mm256_inserti128_si256(tmp3_256, tmp_11, 1);
157
158
3.68M
  tmp_2 = _mm_loadl_epi64(
159
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 4 * alpha) >>
160
3.68M
                                  WARPEDDIFF_PREC_BITS]);
161
3.68M
  tmp4_256 = _mm256_inserti128_si256(tmp4_256, tmp_2, 1);
162
163
3.68M
  tmp_3 = _mm_loadl_epi64(
164
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 5 * alpha) >>
165
3.68M
                                  WARPEDDIFF_PREC_BITS]);
166
3.68M
  tmp5_256 = _mm256_inserti128_si256(tmp5_256, tmp_3, 1);
167
168
3.68M
  tmp_6 = _mm_loadl_epi64(
169
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 6 * alpha) >>
170
3.68M
                                  WARPEDDIFF_PREC_BITS]);
171
3.68M
  tmp6_256 = _mm256_inserti128_si256(tmp6_256, tmp_6, 1);
172
173
3.68M
  tmp_7 = _mm_loadl_epi64(
174
3.68M
      (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 7 * alpha) >>
175
3.68M
                                  WARPEDDIFF_PREC_BITS]);
176
3.68M
  tmp7_256 = _mm256_inserti128_si256(tmp7_256, tmp_7, 1);
177
178
3.68M
  const __m256i tmp_12 = _mm256_unpacklo_epi16(tmp0_256, tmp2_256);
179
3.68M
  const __m256i tmp_13 = _mm256_unpacklo_epi16(tmp1_256, tmp3_256);
180
3.68M
  const __m256i tmp_14 = _mm256_unpacklo_epi16(tmp4_256, tmp6_256);
181
3.68M
  const __m256i tmp_15 = _mm256_unpacklo_epi16(tmp5_256, tmp7_256);
182
183
3.68M
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
184
3.68M
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
185
3.68M
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
186
3.68M
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
187
188
3.68M
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
189
3.68M
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
190
3.68M
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
191
3.68M
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
192
3.68M
}
193
194
static inline void prepare_horizontal_filter_coeff_beta0_avx2(int alpha, int sx,
195
524k
                                                              __m256i *coeff) {
196
524k
  __m128i tmp_0 = _mm_loadl_epi64(
197
524k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
198
524k
  __m128i tmp_1 = _mm_loadl_epi64(
199
524k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
200
524k
  __m128i tmp_2 = _mm_loadl_epi64(
201
524k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
202
524k
  __m128i tmp_3 = _mm_loadl_epi64(
203
524k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
204
524k
  __m128i tmp_4 = _mm_loadl_epi64(
205
524k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
206
524k
  __m128i tmp_5 = _mm_loadl_epi64(
207
524k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
208
524k
  __m128i tmp_6 = _mm_loadl_epi64(
209
524k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
210
524k
  __m128i tmp_7 = _mm_loadl_epi64(
211
524k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
212
213
524k
  tmp_0 = _mm_unpacklo_epi16(tmp_0, tmp_2);
214
524k
  tmp_1 = _mm_unpacklo_epi16(tmp_1, tmp_3);
215
524k
  tmp_4 = _mm_unpacklo_epi16(tmp_4, tmp_6);
216
524k
  tmp_5 = _mm_unpacklo_epi16(tmp_5, tmp_7);
217
218
524k
  const __m256i tmp_12 = _mm256_broadcastsi128_si256(tmp_0);
219
524k
  const __m256i tmp_13 = _mm256_broadcastsi128_si256(tmp_1);
220
524k
  const __m256i tmp_14 = _mm256_broadcastsi128_si256(tmp_4);
221
524k
  const __m256i tmp_15 = _mm256_broadcastsi128_si256(tmp_5);
222
223
524k
  const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14);
224
524k
  const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14);
225
524k
  const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15);
226
524k
  const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15);
227
228
524k
  coeff[0] = _mm256_unpacklo_epi64(res_0, res_2);
229
524k
  coeff[1] = _mm256_unpackhi_epi64(res_0, res_2);
230
524k
  coeff[2] = _mm256_unpacklo_epi64(res_1, res_3);
231
524k
  coeff[3] = _mm256_unpackhi_epi64(res_1, res_3);
232
524k
}
233
234
static inline void prepare_horizontal_filter_coeff_alpha0_avx2(int beta, int sx,
235
3.03M
                                                               __m256i *coeff) {
236
3.03M
  const __m128i tmp_0 =
237
3.03M
      _mm_loadl_epi64((__m128i *)&av1_filter_8bit[sx >> WARPEDDIFF_PREC_BITS]);
238
3.03M
  const __m128i tmp_1 = _mm_loadl_epi64(
239
3.03M
      (__m128i *)&av1_filter_8bit[(sx + beta) >> WARPEDDIFF_PREC_BITS]);
240
241
3.03M
  const __m256i res_0 =
242
3.03M
      _mm256_inserti128_si256(_mm256_castsi128_si256(tmp_0), tmp_1, 0x1);
243
244
3.03M
  coeff[0] = _mm256_shuffle_epi8(
245
3.03M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask01_avx2));
246
3.03M
  coeff[1] = _mm256_shuffle_epi8(
247
3.03M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask23_avx2));
248
3.03M
  coeff[2] = _mm256_shuffle_epi8(
249
3.03M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask45_avx2));
250
3.03M
  coeff[3] = _mm256_shuffle_epi8(
251
3.03M
      res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask67_avx2));
252
3.03M
}
253
254
static inline void horizontal_filter_avx2(const __m256i src, __m256i *horz_out,
255
                                          int sx, int alpha, int beta, int row,
256
                                          const __m256i *shuffle_src,
257
                                          const __m256i *round_const,
258
3.68M
                                          const __m128i *shift) {
259
3.68M
  __m256i coeff[4];
260
3.68M
  prepare_horizontal_filter_coeff_avx2(alpha, beta, sx, coeff);
261
3.68M
  filter_src_pixels_avx2(src, horz_out, coeff, shuffle_src, round_const, shift,
262
3.68M
                         row);
263
3.68M
}
264
static inline void prepare_horizontal_filter_coeff(int alpha, int sx,
265
534k
                                                   __m256i *coeff) {
266
534k
  const __m128i tmp_0 = _mm_loadl_epi64(
267
534k
      (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
268
534k
  const __m128i tmp_1 = _mm_loadl_epi64(
269
534k
      (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
270
534k
  const __m128i tmp_2 = _mm_loadl_epi64(
271
534k
      (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
272
534k
  const __m128i tmp_3 = _mm_loadl_epi64(
273
534k
      (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
274
534k
  const __m128i tmp_4 = _mm_loadl_epi64(
275
534k
      (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
276
534k
  const __m128i tmp_5 = _mm_loadl_epi64(
277
534k
      (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
278
534k
  const __m128i tmp_6 = _mm_loadl_epi64(
279
534k
      (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
280
534k
  const __m128i tmp_7 = _mm_loadl_epi64(
281
534k
      (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
282
283
534k
  const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2);
284
534k
  const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3);
285
534k
  const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6);
286
534k
  const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7);
287
288
534k
  const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10);
289
534k
  const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10);
290
534k
  const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11);
291
534k
  const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11);
292
293
534k
  coeff[0] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_12, tmp_14));
294
534k
  coeff[1] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_12, tmp_14));
295
534k
  coeff[2] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_13, tmp_15));
296
534k
  coeff[3] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_13, tmp_15));
297
534k
}
298
299
static inline void warp_horizontal_filter_avx2(
300
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
301
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
302
    const __m256i *round_const, const __m128i *shift,
303
446k
    const __m256i *shuffle_src) {
304
446k
  int k, iy, sx, row = 0;
305
446k
  __m256i coeff[4];
306
3.51M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
307
3.06M
    iy = iy4 + k;
308
3.06M
    iy = clamp(iy, 0, height - 1);
309
3.06M
    const __m128i src_0 =
310
3.06M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
311
3.06M
    iy = iy4 + k + 1;
312
3.06M
    iy = clamp(iy, 0, height - 1);
313
3.06M
    const __m128i src_1 =
314
3.06M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
315
3.06M
    const __m256i src_01 =
316
3.06M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
317
3.06M
    sx = sx4 + beta * (k + 4);
318
3.06M
    horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, shuffle_src,
319
3.06M
                           round_const, shift);
320
3.06M
    row += 1;
321
3.06M
  }
322
446k
  iy = iy4 + k;
323
446k
  iy = clamp(iy, 0, height - 1);
324
446k
  const __m256i src_01 = _mm256_castsi128_si256(
325
446k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
326
446k
  sx = sx4 + beta * (k + 4);
327
446k
  prepare_horizontal_filter_coeff(alpha, sx, coeff);
328
446k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
329
446k
                         shift, row);
330
446k
}
331
332
static inline void warp_horizontal_filter_alpha0_avx2(
333
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
334
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
335
    const __m256i *round_const, const __m128i *shift,
336
304k
    const __m256i *shuffle_src) {
337
304k
  (void)alpha;
338
304k
  int k, iy, sx, row = 0;
339
304k
  __m256i coeff[4];
340
2.40M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
341
2.10M
    iy = iy4 + k;
342
2.10M
    iy = clamp(iy, 0, height - 1);
343
2.10M
    const __m128i src_0 =
344
2.10M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
345
2.10M
    iy = iy4 + k + 1;
346
2.10M
    iy = clamp(iy, 0, height - 1);
347
2.10M
    const __m128i src_1 =
348
2.10M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
349
2.10M
    const __m256i src_01 =
350
2.10M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
351
2.10M
    sx = sx4 + beta * (k + 4);
352
2.10M
    prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
353
2.10M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
354
2.10M
                           shift, row);
355
2.10M
    row += 1;
356
2.10M
  }
357
304k
  iy = iy4 + k;
358
304k
  iy = clamp(iy, 0, height - 1);
359
304k
  const __m256i src_01 = _mm256_castsi128_si256(
360
304k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
361
304k
  sx = sx4 + beta * (k + 4);
362
304k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff);
363
304k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
364
304k
                         shift, row);
365
304k
}
366
367
static inline void warp_horizontal_filter_beta0_avx2(
368
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
369
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
370
    const __m256i *round_const, const __m128i *shift,
371
524k
    const __m256i *shuffle_src) {
372
524k
  (void)beta;
373
524k
  int k, iy, row = 0;
374
524k
  __m256i coeff[4];
375
524k
  prepare_horizontal_filter_coeff_beta0_avx2(alpha, sx4, coeff);
376
4.14M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
377
3.62M
    iy = iy4 + k;
378
3.62M
    iy = clamp(iy, 0, height - 1);
379
3.62M
    const __m128i src_0 =
380
3.62M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
381
3.62M
    iy = iy4 + k + 1;
382
3.62M
    iy = clamp(iy, 0, height - 1);
383
3.62M
    const __m128i src_1 =
384
3.62M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
385
3.62M
    const __m256i src_01 =
386
3.62M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
387
3.62M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
388
3.62M
                           shift, row);
389
3.62M
    row += 1;
390
3.62M
  }
391
524k
  iy = iy4 + k;
392
524k
  iy = clamp(iy, 0, height - 1);
393
524k
  const __m256i src_01 = _mm256_castsi128_si256(
394
524k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
395
524k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
396
524k
                         shift, row);
397
524k
}
398
399
static inline void warp_horizontal_filter_alpha0_beta0_avx2(
400
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
401
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
402
    const __m256i *round_const, const __m128i *shift,
403
641k
    const __m256i *shuffle_src) {
404
641k
  (void)alpha;
405
641k
  int k, iy, row = 0;
406
641k
  __m256i coeff[4];
407
641k
  prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx4, coeff);
408
5.05M
  for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
409
4.41M
    iy = iy4 + k;
410
4.41M
    iy = clamp(iy, 0, height - 1);
411
4.41M
    const __m128i src0 =
412
4.41M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
413
4.41M
    iy = iy4 + k + 1;
414
4.41M
    iy = clamp(iy, 0, height - 1);
415
4.41M
    const __m128i src1 =
416
4.41M
        _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
417
4.41M
    const __m256i src_01 =
418
4.41M
        _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
419
4.41M
    filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
420
4.41M
                           shift, row);
421
4.41M
    row += 1;
422
4.41M
  }
423
641k
  iy = iy4 + k;
424
641k
  iy = clamp(iy, 0, height - 1);
425
641k
  const __m256i src_01 = _mm256_castsi128_si256(
426
641k
      _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
427
641k
  filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
428
641k
                         shift, row);
429
641k
}
430
431
static inline void unpack_weights_and_set_round_const_avx2(
432
    ConvolveParams *conv_params, const int round_bits, const int offset_bits,
433
210k
    __m256i *res_sub_const, __m256i *round_bits_const, __m256i *wt) {
434
210k
  *res_sub_const =
435
210k
      _mm256_set1_epi16(-(1 << (offset_bits - conv_params->round_1)) -
436
210k
                        (1 << (offset_bits - conv_params->round_1 - 1)));
437
210k
  *round_bits_const = _mm256_set1_epi16(((1 << round_bits) >> 1));
438
439
210k
  const int w0 = conv_params->fwd_offset;
440
210k
  const int w1 = conv_params->bck_offset;
441
210k
  const __m256i wt0 = _mm256_set1_epi16((short)w0);
442
210k
  const __m256i wt1 = _mm256_set1_epi16((short)w1);
443
210k
  *wt = _mm256_unpacklo_epi16(wt0, wt1);
444
210k
}
445
446
static inline void prepare_vertical_filter_coeffs_avx2(int gamma, int delta,
447
                                                       int sy,
448
3.39M
                                                       __m256i *coeffs) {
449
3.39M
  __m128i filt_00 =
450
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
451
3.39M
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
452
3.39M
  __m128i filt_01 =
453
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
454
3.39M
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
455
3.39M
  __m128i filt_02 =
456
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
457
3.39M
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
458
3.39M
  __m128i filt_03 =
459
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
460
3.39M
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
461
462
3.39M
  __m128i filt_10 = _mm_loadu_si128(
463
3.39M
      (__m128i *)(av1_warped_filter +
464
3.39M
                  (((sy + delta) + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
465
3.39M
  __m128i filt_11 = _mm_loadu_si128(
466
3.39M
      (__m128i *)(av1_warped_filter +
467
3.39M
                  (((sy + delta) + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
468
3.39M
  __m128i filt_12 = _mm_loadu_si128(
469
3.39M
      (__m128i *)(av1_warped_filter +
470
3.39M
                  (((sy + delta) + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
471
3.39M
  __m128i filt_13 = _mm_loadu_si128(
472
3.39M
      (__m128i *)(av1_warped_filter +
473
3.39M
                  (((sy + delta) + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
474
475
3.39M
  __m256i filt_0 =
476
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
477
3.39M
  __m256i filt_1 =
478
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
479
3.39M
  __m256i filt_2 =
480
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
481
3.39M
  __m256i filt_3 =
482
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
483
484
3.39M
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
485
3.39M
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
486
3.39M
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
487
3.39M
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
488
489
3.39M
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
490
3.39M
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
491
3.39M
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
492
3.39M
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
493
494
3.39M
  filt_00 =
495
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
496
3.39M
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
497
3.39M
  filt_01 =
498
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
499
3.39M
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
500
3.39M
  filt_02 =
501
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
502
3.39M
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
503
3.39M
  filt_03 =
504
3.39M
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
505
3.39M
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
506
507
3.39M
  filt_10 = _mm_loadu_si128(
508
3.39M
      (__m128i *)(av1_warped_filter +
509
3.39M
                  (((sy + delta) + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
510
3.39M
  filt_11 = _mm_loadu_si128(
511
3.39M
      (__m128i *)(av1_warped_filter +
512
3.39M
                  (((sy + delta) + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
513
3.39M
  filt_12 = _mm_loadu_si128(
514
3.39M
      (__m128i *)(av1_warped_filter +
515
3.39M
                  (((sy + delta) + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
516
3.39M
  filt_13 = _mm_loadu_si128(
517
3.39M
      (__m128i *)(av1_warped_filter +
518
3.39M
                  (((sy + delta) + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
519
520
3.39M
  filt_0 =
521
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1);
522
3.39M
  filt_1 =
523
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1);
524
3.39M
  filt_2 =
525
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1);
526
3.39M
  filt_3 =
527
3.39M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1);
528
529
3.39M
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
530
3.39M
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
531
3.39M
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
532
3.39M
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
533
534
3.39M
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
535
3.39M
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
536
3.39M
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
537
3.39M
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
538
3.39M
}
539
540
static inline void prepare_vertical_filter_coeffs_delta0_avx2(int gamma, int sy,
541
421k
                                                              __m256i *coeffs) {
542
421k
  __m128i filt_00 =
543
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
544
421k
                                  ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
545
421k
  __m128i filt_01 =
546
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
547
421k
                                  ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
548
421k
  __m128i filt_02 =
549
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
550
421k
                                  ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
551
421k
  __m128i filt_03 =
552
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
553
421k
                                  ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
554
555
421k
  __m256i filt_0 = _mm256_broadcastsi128_si256(filt_00);
556
421k
  __m256i filt_1 = _mm256_broadcastsi128_si256(filt_01);
557
421k
  __m256i filt_2 = _mm256_broadcastsi128_si256(filt_02);
558
421k
  __m256i filt_3 = _mm256_broadcastsi128_si256(filt_03);
559
560
421k
  __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
561
421k
  __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
562
421k
  __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
563
421k
  __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
564
565
421k
  coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1);
566
421k
  coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1);
567
421k
  coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3);
568
421k
  coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3);
569
570
421k
  filt_00 =
571
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
572
421k
                                  ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
573
421k
  filt_01 =
574
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
575
421k
                                  ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
576
421k
  filt_02 =
577
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
578
421k
                                  ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
579
421k
  filt_03 =
580
421k
      _mm_loadu_si128((__m128i *)(av1_warped_filter +
581
421k
                                  ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
582
583
421k
  filt_0 = _mm256_broadcastsi128_si256(filt_00);
584
421k
  filt_1 = _mm256_broadcastsi128_si256(filt_01);
585
421k
  filt_2 = _mm256_broadcastsi128_si256(filt_02);
586
421k
  filt_3 = _mm256_broadcastsi128_si256(filt_03);
587
588
421k
  res_0 = _mm256_unpacklo_epi32(filt_0, filt_1);
589
421k
  res_1 = _mm256_unpacklo_epi32(filt_2, filt_3);
590
421k
  res_2 = _mm256_unpackhi_epi32(filt_0, filt_1);
591
421k
  res_3 = _mm256_unpackhi_epi32(filt_2, filt_3);
592
593
421k
  coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1);
594
421k
  coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1);
595
421k
  coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3);
596
421k
  coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3);
597
421k
}
598
599
static inline void prepare_vertical_filter_coeffs_gamma0_avx2(int delta, int sy,
600
2.62M
                                                              __m256i *coeffs) {
601
2.62M
  const __m128i filt_0 = _mm_loadu_si128(
602
2.62M
      (__m128i *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
603
2.62M
  const __m128i filt_1 = _mm_loadu_si128(
604
2.62M
      (__m128i *)(av1_warped_filter + ((sy + delta) >> WARPEDDIFF_PREC_BITS)));
605
606
2.62M
  __m256i res_0 =
607
2.62M
      _mm256_inserti128_si256(_mm256_castsi128_si256(filt_0), filt_1, 0x1);
608
609
2.62M
  coeffs[0] = _mm256_shuffle_epi8(
610
2.62M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask0_avx2));
611
2.62M
  coeffs[1] = _mm256_shuffle_epi8(
612
2.62M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask1_avx2));
613
2.62M
  coeffs[2] = _mm256_shuffle_epi8(
614
2.62M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask2_avx2));
615
2.62M
  coeffs[3] = _mm256_shuffle_epi8(
616
2.62M
      res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask3_avx2));
617
618
2.62M
  coeffs[4] = coeffs[0];
619
2.62M
  coeffs[5] = coeffs[1];
620
2.62M
  coeffs[6] = coeffs[2];
621
2.62M
  coeffs[7] = coeffs[3];
622
2.62M
}
623
624
static inline void filter_src_pixels_vertical_avx2(__m256i *horz_out,
625
                                                   __m256i *src,
626
                                                   __m256i *coeffs,
627
                                                   __m256i *res_lo,
628
8.84M
                                                   __m256i *res_hi, int row) {
629
8.84M
  const __m256i src_6 = horz_out[row + 3];
630
8.84M
  const __m256i src_7 =
631
8.84M
      _mm256_permute2x128_si256(horz_out[row + 3], horz_out[row + 4], 0x21);
632
633
8.84M
  src[6] = _mm256_unpacklo_epi16(src_6, src_7);
634
635
8.84M
  const __m256i res_0 = _mm256_madd_epi16(src[0], coeffs[0]);
636
8.84M
  const __m256i res_2 = _mm256_madd_epi16(src[2], coeffs[1]);
637
8.84M
  const __m256i res_4 = _mm256_madd_epi16(src[4], coeffs[2]);
638
8.84M
  const __m256i res_6 = _mm256_madd_epi16(src[6], coeffs[3]);
639
640
8.84M
  const __m256i res_even = _mm256_add_epi32(_mm256_add_epi32(res_0, res_2),
641
8.84M
                                            _mm256_add_epi32(res_4, res_6));
642
643
8.84M
  src[7] = _mm256_unpackhi_epi16(src_6, src_7);
644
645
8.84M
  const __m256i res_1 = _mm256_madd_epi16(src[1], coeffs[4]);
646
8.84M
  const __m256i res_3 = _mm256_madd_epi16(src[3], coeffs[5]);
647
8.84M
  const __m256i res_5 = _mm256_madd_epi16(src[5], coeffs[6]);
648
8.84M
  const __m256i res_7 = _mm256_madd_epi16(src[7], coeffs[7]);
649
650
8.84M
  const __m256i res_odd = _mm256_add_epi32(_mm256_add_epi32(res_1, res_3),
651
8.84M
                                           _mm256_add_epi32(res_5, res_7));
652
653
  // Rearrange pixels back into the order 0 ... 7
654
8.84M
  *res_lo = _mm256_unpacklo_epi32(res_even, res_odd);
655
8.84M
  *res_hi = _mm256_unpackhi_epi32(res_even, res_odd);
656
8.84M
}
657
658
static inline void store_vertical_filter_output_avx2(
659
    const __m256i *res_lo, const __m256i *res_hi, const __m256i *res_add_const,
660
    const __m256i *wt, const __m256i *res_sub_const,
661
    const __m256i *round_bits_const, uint8_t *pred, ConvolveParams *conv_params,
662
    int i, int j, int k, const int reduce_bits_vert, int p_stride, int p_width,
663
8.84M
    const int round_bits) {
664
8.84M
  __m256i res_lo_1 = *res_lo;
665
8.84M
  __m256i res_hi_1 = *res_hi;
666
667
8.84M
  if (conv_params->is_compound) {
668
208k
    __m128i *const p_0 =
669
208k
        (__m128i *)&conv_params->dst[(i + k + 4) * conv_params->dst_stride + j];
670
208k
    __m128i *const p_1 =
671
208k
        (__m128i *)&conv_params
672
208k
            ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j];
673
674
208k
    res_lo_1 = _mm256_srai_epi32(_mm256_add_epi32(res_lo_1, *res_add_const),
675
208k
                                 reduce_bits_vert);
676
677
208k
    const __m256i temp_lo_16 = _mm256_packus_epi32(res_lo_1, res_lo_1);
678
208k
    __m256i res_lo_16;
679
208k
    if (conv_params->do_average) {
680
101k
      __m128i *const dst8_0 = (__m128i *)&pred[(i + k + 4) * p_stride + j];
681
101k
      __m128i *const dst8_1 =
682
101k
          (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
683
101k
      const __m128i p_16_0 = _mm_loadl_epi64(p_0);
684
101k
      const __m128i p_16_1 = _mm_loadl_epi64(p_1);
685
101k
      const __m256i p_16 =
686
101k
          _mm256_inserti128_si256(_mm256_castsi128_si256(p_16_0), p_16_1, 1);
687
101k
      if (conv_params->use_dist_wtd_comp_avg) {
688
46.6k
        const __m256i p_16_lo = _mm256_unpacklo_epi16(p_16, temp_lo_16);
689
46.6k
        const __m256i wt_res_lo = _mm256_madd_epi16(p_16_lo, *wt);
690
46.6k
        const __m256i shifted_32 =
691
46.6k
            _mm256_srai_epi32(wt_res_lo, DIST_PRECISION_BITS);
692
46.6k
        res_lo_16 = _mm256_packus_epi32(shifted_32, shifted_32);
693
54.9k
      } else {
694
54.9k
        res_lo_16 = _mm256_srai_epi16(_mm256_add_epi16(p_16, temp_lo_16), 1);
695
54.9k
      }
696
101k
      res_lo_16 = _mm256_add_epi16(res_lo_16, *res_sub_const);
697
101k
      res_lo_16 = _mm256_srai_epi16(
698
101k
          _mm256_add_epi16(res_lo_16, *round_bits_const), round_bits);
699
101k
      const __m256i res_8_lo = _mm256_packus_epi16(res_lo_16, res_lo_16);
700
101k
      const __m128i res_8_lo_0 = _mm256_castsi256_si128(res_8_lo);
701
101k
      const __m128i res_8_lo_1 = _mm256_extracti128_si256(res_8_lo, 1);
702
101k
      *(int *)dst8_0 = _mm_cvtsi128_si32(res_8_lo_0);
703
101k
      *(int *)dst8_1 = _mm_cvtsi128_si32(res_8_lo_1);
704
106k
    } else {
705
106k
      const __m128i temp_lo_16_0 = _mm256_castsi256_si128(temp_lo_16);
706
106k
      const __m128i temp_lo_16_1 = _mm256_extracti128_si256(temp_lo_16, 1);
707
106k
      _mm_storel_epi64(p_0, temp_lo_16_0);
708
106k
      _mm_storel_epi64(p_1, temp_lo_16_1);
709
106k
    }
710
208k
    if (p_width > 4) {
711
208k
      __m128i *const p4_0 =
712
208k
          (__m128i *)&conv_params
713
208k
              ->dst[(i + k + 4) * conv_params->dst_stride + j + 4];
714
208k
      __m128i *const p4_1 =
715
208k
          (__m128i *)&conv_params
716
208k
              ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j + 4];
717
208k
      res_hi_1 = _mm256_srai_epi32(_mm256_add_epi32(res_hi_1, *res_add_const),
718
208k
                                   reduce_bits_vert);
719
208k
      const __m256i temp_hi_16 = _mm256_packus_epi32(res_hi_1, res_hi_1);
720
208k
      __m256i res_hi_16;
721
208k
      if (conv_params->do_average) {
722
101k
        __m128i *const dst8_4_0 =
723
101k
            (__m128i *)&pred[(i + k + 4) * p_stride + j + 4];
724
101k
        __m128i *const dst8_4_1 =
725
101k
            (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j + 4];
726
101k
        const __m128i p4_16_0 = _mm_loadl_epi64(p4_0);
727
101k
        const __m128i p4_16_1 = _mm_loadl_epi64(p4_1);
728
101k
        const __m256i p4_16 = _mm256_inserti128_si256(
729
101k
            _mm256_castsi128_si256(p4_16_0), p4_16_1, 1);
730
101k
        if (conv_params->use_dist_wtd_comp_avg) {
731
46.6k
          const __m256i p_16_hi = _mm256_unpacklo_epi16(p4_16, temp_hi_16);
732
46.6k
          const __m256i wt_res_hi = _mm256_madd_epi16(p_16_hi, *wt);
733
46.6k
          const __m256i shifted_32 =
734
46.6k
              _mm256_srai_epi32(wt_res_hi, DIST_PRECISION_BITS);
735
46.6k
          res_hi_16 = _mm256_packus_epi32(shifted_32, shifted_32);
736
54.9k
        } else {
737
54.9k
          res_hi_16 = _mm256_srai_epi16(_mm256_add_epi16(p4_16, temp_hi_16), 1);
738
54.9k
        }
739
101k
        res_hi_16 = _mm256_add_epi16(res_hi_16, *res_sub_const);
740
101k
        res_hi_16 = _mm256_srai_epi16(
741
101k
            _mm256_add_epi16(res_hi_16, *round_bits_const), round_bits);
742
101k
        __m256i res_8_hi = _mm256_packus_epi16(res_hi_16, res_hi_16);
743
101k
        const __m128i res_8_hi_0 = _mm256_castsi256_si128(res_8_hi);
744
101k
        const __m128i res_8_hi_1 = _mm256_extracti128_si256(res_8_hi, 1);
745
101k
        *(int *)dst8_4_0 = _mm_cvtsi128_si32(res_8_hi_0);
746
101k
        *(int *)dst8_4_1 = _mm_cvtsi128_si32(res_8_hi_1);
747
106k
      } else {
748
106k
        const __m128i temp_hi_16_0 = _mm256_castsi256_si128(temp_hi_16);
749
106k
        const __m128i temp_hi_16_1 = _mm256_extracti128_si256(temp_hi_16, 1);
750
106k
        _mm_storel_epi64(p4_0, temp_hi_16_0);
751
106k
        _mm_storel_epi64(p4_1, temp_hi_16_1);
752
106k
      }
753
208k
    }
754
8.64M
  } else {
755
8.64M
    const __m256i res_lo_round = _mm256_srai_epi32(
756
8.64M
        _mm256_add_epi32(res_lo_1, *res_add_const), reduce_bits_vert);
757
8.64M
    const __m256i res_hi_round = _mm256_srai_epi32(
758
8.64M
        _mm256_add_epi32(res_hi_1, *res_add_const), reduce_bits_vert);
759
760
8.64M
    const __m256i res_16bit = _mm256_packs_epi32(res_lo_round, res_hi_round);
761
8.64M
    const __m256i res_8bit = _mm256_packus_epi16(res_16bit, res_16bit);
762
8.64M
    const __m128i res_8bit0 = _mm256_castsi256_si128(res_8bit);
763
8.64M
    const __m128i res_8bit1 = _mm256_extracti128_si256(res_8bit, 1);
764
765
    // Store, blending with 'pred' if needed
766
8.64M
    __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
767
8.64M
    __m128i *const p1 = (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j];
768
769
8.64M
    if (p_width == 4) {
770
0
      *(int *)p = _mm_cvtsi128_si32(res_8bit0);
771
0
      *(int *)p1 = _mm_cvtsi128_si32(res_8bit1);
772
8.64M
    } else {
773
8.64M
      _mm_storel_epi64(p, res_8bit0);
774
8.64M
      _mm_storel_epi64(p1, res_8bit1);
775
8.64M
    }
776
8.64M
  }
777
8.84M
}
778
779
static inline void warp_vertical_filter_avx2(
780
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
781
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
782
    int i, int j, int sy4, const int reduce_bits_vert,
783
    const __m256i *res_add_const, const int round_bits,
784
    const __m256i *res_sub_const, const __m256i *round_bits_const,
785
850k
    const __m256i *wt) {
786
850k
  int k, row = 0;
787
850k
  __m256i src[8];
788
850k
  const __m256i src_0 = horz_out[0];
789
850k
  const __m256i src_1 =
790
850k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
791
850k
  const __m256i src_2 = horz_out[1];
792
850k
  const __m256i src_3 =
793
850k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
794
850k
  const __m256i src_4 = horz_out[2];
795
850k
  const __m256i src_5 =
796
850k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
797
798
850k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
799
850k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
800
850k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
801
802
850k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
803
850k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
804
850k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
805
806
4.24M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
807
3.39M
    int sy = sy4 + delta * (k + 4);
808
3.39M
    __m256i coeffs[8];
809
3.39M
    prepare_vertical_filter_coeffs_avx2(gamma, delta, sy, coeffs);
810
3.39M
    __m256i res_lo, res_hi;
811
3.39M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
812
3.39M
                                    row);
813
3.39M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
814
3.39M
                                      res_sub_const, round_bits_const, pred,
815
3.39M
                                      conv_params, i, j, k, reduce_bits_vert,
816
3.39M
                                      p_stride, p_width, round_bits);
817
3.39M
    src[0] = src[2];
818
3.39M
    src[2] = src[4];
819
3.39M
    src[4] = src[6];
820
3.39M
    src[1] = src[3];
821
3.39M
    src[3] = src[5];
822
3.39M
    src[5] = src[7];
823
824
3.39M
    row += 1;
825
3.39M
  }
826
850k
}
827
828
static inline void warp_vertical_filter_gamma0_avx2(
829
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
830
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
831
    int i, int j, int sy4, const int reduce_bits_vert,
832
    const __m256i *res_add_const, const int round_bits,
833
    const __m256i *res_sub_const, const __m256i *round_bits_const,
834
560k
    const __m256i *wt) {
835
560k
  (void)gamma;
836
560k
  int k, row = 0;
837
560k
  __m256i src[8];
838
560k
  const __m256i src_0 = horz_out[0];
839
560k
  const __m256i src_1 =
840
560k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
841
560k
  const __m256i src_2 = horz_out[1];
842
560k
  const __m256i src_3 =
843
560k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
844
560k
  const __m256i src_4 = horz_out[2];
845
560k
  const __m256i src_5 =
846
560k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
847
848
560k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
849
560k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
850
560k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
851
852
560k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
853
560k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
854
560k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
855
856
2.81M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
857
2.24M
    int sy = sy4 + delta * (k + 4);
858
2.24M
    __m256i coeffs[8];
859
2.24M
    prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy, coeffs);
860
2.24M
    __m256i res_lo, res_hi;
861
2.24M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
862
2.24M
                                    row);
863
2.24M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
864
2.24M
                                      res_sub_const, round_bits_const, pred,
865
2.24M
                                      conv_params, i, j, k, reduce_bits_vert,
866
2.24M
                                      p_stride, p_width, round_bits);
867
2.24M
    src[0] = src[2];
868
2.24M
    src[2] = src[4];
869
2.24M
    src[4] = src[6];
870
2.24M
    src[1] = src[3];
871
2.24M
    src[3] = src[5];
872
2.24M
    src[5] = src[7];
873
2.24M
    row += 1;
874
2.24M
  }
875
560k
}
876
877
static inline void warp_vertical_filter_delta0_avx2(
878
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
879
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
880
    int i, int j, int sy4, const int reduce_bits_vert,
881
    const __m256i *res_add_const, const int round_bits,
882
    const __m256i *res_sub_const, const __m256i *round_bits_const,
883
421k
    const __m256i *wt) {
884
421k
  (void)delta;
885
421k
  int k, row = 0;
886
421k
  __m256i src[8], coeffs[8];
887
421k
  const __m256i src_0 = horz_out[0];
888
421k
  const __m256i src_1 =
889
421k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
890
421k
  const __m256i src_2 = horz_out[1];
891
421k
  const __m256i src_3 =
892
421k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
893
421k
  const __m256i src_4 = horz_out[2];
894
421k
  const __m256i src_5 =
895
421k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
896
897
421k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
898
421k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
899
421k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
900
901
421k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
902
421k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
903
421k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
904
905
421k
  prepare_vertical_filter_coeffs_delta0_avx2(gamma, sy4, coeffs);
906
907
2.10M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
908
1.68M
    __m256i res_lo, res_hi;
909
1.68M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
910
1.68M
                                    row);
911
1.68M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
912
1.68M
                                      res_sub_const, round_bits_const, pred,
913
1.68M
                                      conv_params, i, j, k, reduce_bits_vert,
914
1.68M
                                      p_stride, p_width, round_bits);
915
1.68M
    src[0] = src[2];
916
1.68M
    src[2] = src[4];
917
1.68M
    src[4] = src[6];
918
1.68M
    src[1] = src[3];
919
1.68M
    src[3] = src[5];
920
1.68M
    src[5] = src[7];
921
1.68M
    row += 1;
922
1.68M
  }
923
421k
}
924
925
static inline void warp_vertical_filter_gamma0_delta0_avx2(
926
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
927
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
928
    int i, int j, int sy4, const int reduce_bits_vert,
929
    const __m256i *res_add_const, const int round_bits,
930
    const __m256i *res_sub_const, const __m256i *round_bits_const,
931
374k
    const __m256i *wt) {
932
374k
  (void)gamma;
933
374k
  int k, row = 0;
934
374k
  __m256i src[8], coeffs[8];
935
374k
  const __m256i src_0 = horz_out[0];
936
374k
  const __m256i src_1 =
937
374k
      _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21);
938
374k
  const __m256i src_2 = horz_out[1];
939
374k
  const __m256i src_3 =
940
374k
      _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21);
941
374k
  const __m256i src_4 = horz_out[2];
942
374k
  const __m256i src_5 =
943
374k
      _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21);
944
945
374k
  src[0] = _mm256_unpacklo_epi16(src_0, src_1);
946
374k
  src[2] = _mm256_unpacklo_epi16(src_2, src_3);
947
374k
  src[4] = _mm256_unpacklo_epi16(src_4, src_5);
948
949
374k
  src[1] = _mm256_unpackhi_epi16(src_0, src_1);
950
374k
  src[3] = _mm256_unpackhi_epi16(src_2, src_3);
951
374k
  src[5] = _mm256_unpackhi_epi16(src_4, src_5);
952
953
374k
  prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy4, coeffs);
954
955
1.87M
  for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) {
956
1.50M
    __m256i res_lo, res_hi;
957
1.50M
    filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi,
958
1.50M
                                    row);
959
1.50M
    store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt,
960
1.50M
                                      res_sub_const, round_bits_const, pred,
961
1.50M
                                      conv_params, i, j, k, reduce_bits_vert,
962
1.50M
                                      p_stride, p_width, round_bits);
963
1.50M
    src[0] = src[2];
964
1.50M
    src[2] = src[4];
965
1.50M
    src[4] = src[6];
966
1.50M
    src[1] = src[3];
967
1.50M
    src[3] = src[5];
968
1.50M
    src[5] = src[7];
969
1.50M
    row += 1;
970
1.50M
  }
971
374k
}
972
973
static inline void prepare_warp_vertical_filter_avx2(
974
    uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params,
975
    int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width,
976
    int i, int j, int sy4, const int reduce_bits_vert,
977
    const __m256i *res_add_const, const int round_bits,
978
    const __m256i *res_sub_const, const __m256i *round_bits_const,
979
2.20M
    const __m256i *wt) {
980
2.20M
  if (gamma == 0 && delta == 0)
981
374k
    warp_vertical_filter_gamma0_delta0_avx2(
982
374k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
983
374k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
984
374k
        round_bits_const, wt);
985
1.82M
  else if (gamma == 0 && delta != 0)
986
560k
    warp_vertical_filter_gamma0_avx2(
987
560k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
988
560k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
989
560k
        round_bits_const, wt);
990
1.26M
  else if (gamma != 0 && delta == 0)
991
421k
    warp_vertical_filter_delta0_avx2(
992
421k
        pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width,
993
421k
        i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const,
994
421k
        round_bits_const, wt);
995
846k
  else
996
846k
    warp_vertical_filter_avx2(pred, horz_out, conv_params, gamma, delta,
997
846k
                              p_height, p_stride, p_width, i, j, sy4,
998
846k
                              reduce_bits_vert, res_add_const, round_bits,
999
846k
                              res_sub_const, round_bits_const, wt);
1000
2.20M
}
1001
1002
static inline void prepare_warp_horizontal_filter_avx2(
1003
    const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
1004
    int32_t sx4, int alpha, int beta, int p_height, int height, int i,
1005
    const __m256i *round_const, const __m128i *shift,
1006
1.91M
    const __m256i *shuffle_src) {
1007
1.91M
  if (alpha == 0 && beta == 0)
1008
642k
    warp_horizontal_filter_alpha0_beta0_avx2(
1009
642k
        ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, i,
1010
642k
        round_const, shift, shuffle_src);
1011
1.27M
  else if (alpha == 0 && beta != 0)
1012
305k
    warp_horizontal_filter_alpha0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1013
305k
                                       alpha, beta, p_height, height, i,
1014
305k
                                       round_const, shift, shuffle_src);
1015
970k
  else if (alpha != 0 && beta == 0)
1016
524k
    warp_horizontal_filter_beta0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
1017
524k
                                      alpha, beta, p_height, height, i,
1018
524k
                                      round_const, shift, shuffle_src);
1019
446k
  else
1020
446k
    warp_horizontal_filter_avx2(ref, horz_out, stride, ix4, iy4, sx4, alpha,
1021
446k
                                beta, p_height, height, i, round_const, shift,
1022
446k
                                shuffle_src);
1023
1.91M
}
1024
1025
void av1_warp_affine_avx2(const int32_t *mat, const uint8_t *ref, int width,
1026
                          int height, int stride, uint8_t *pred, int p_col,
1027
                          int p_row, int p_width, int p_height, int p_stride,
1028
                          int subsampling_x, int subsampling_y,
1029
                          ConvolveParams *conv_params, int16_t alpha,
1030
210k
                          int16_t beta, int16_t gamma, int16_t delta) {
1031
210k
  __m256i horz_out[8];
1032
210k
  int i, j, k;
1033
210k
  const int bd = 8;
1034
210k
  const int reduce_bits_horiz = conv_params->round_0;
1035
210k
  const int reduce_bits_vert = conv_params->is_compound
1036
210k
                                   ? conv_params->round_1
1037
210k
                                   : 2 * FILTER_BITS - reduce_bits_horiz;
1038
210k
  const int offset_bits_horiz = bd + FILTER_BITS - 1;
1039
210k
  assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL));
1040
1041
210k
  const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz;
1042
210k
  const __m256i reduce_bits_vert_const =
1043
210k
      _mm256_set1_epi32(((1 << reduce_bits_vert) >> 1));
1044
210k
  const __m256i res_add_const = _mm256_set1_epi32(1 << offset_bits_vert);
1045
210k
  const int round_bits =
1046
210k
      2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
1047
210k
  const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
1048
210k
  assert(IMPLIES(conv_params->do_average, conv_params->is_compound));
1049
1050
210k
  const __m256i round_const = _mm256_set1_epi16(
1051
210k
      (1 << offset_bits_horiz) + ((1 << reduce_bits_horiz) >> 1));
1052
210k
  const __m128i shift = _mm_cvtsi32_si128(reduce_bits_horiz);
1053
1054
210k
  __m256i res_sub_const, round_bits_const, wt;
1055
210k
  unpack_weights_and_set_round_const_avx2(conv_params, round_bits, offset_bits,
1056
210k
                                          &res_sub_const, &round_bits_const,
1057
210k
                                          &wt);
1058
1059
210k
  __m256i res_add_const_1;
1060
210k
  if (conv_params->is_compound == 1) {
1061
7.98k
    res_add_const_1 = _mm256_add_epi32(reduce_bits_vert_const, res_add_const);
1062
202k
  } else {
1063
202k
    res_add_const_1 = _mm256_set1_epi32(-(1 << (bd + reduce_bits_vert - 1)) +
1064
202k
                                        ((1 << reduce_bits_vert) >> 1));
1065
202k
  }
1066
210k
  const int32_t const1 = alpha * (-4) + beta * (-4) +
1067
210k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1068
210k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1069
210k
  const int32_t const2 = gamma * (-4) + delta * (-4) +
1070
210k
                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
1071
210k
                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
1072
210k
  const int32_t const3 = ((1 << WARP_PARAM_REDUCE_BITS) - 1);
1073
210k
  const int16_t const4 = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1));
1074
210k
  const int16_t const5 = (1 << (FILTER_BITS - reduce_bits_horiz));
1075
1076
210k
  __m256i shuffle_src[4];
1077
210k
  shuffle_src[0] = _mm256_load_si256((__m256i *)shuffle_src0);
1078
210k
  shuffle_src[1] = _mm256_load_si256((__m256i *)shuffle_src1);
1079
210k
  shuffle_src[2] = _mm256_load_si256((__m256i *)shuffle_src2);
1080
210k
  shuffle_src[3] = _mm256_load_si256((__m256i *)shuffle_src3);
1081
1082
718k
  for (i = 0; i < p_height; i += 8) {
1083
2.73M
    for (j = 0; j < p_width; j += 8) {
1084
2.23M
      const int32_t src_x = (p_col + j + 4) << subsampling_x;
1085
2.23M
      const int32_t src_y = (p_row + i + 4) << subsampling_y;
1086
2.23M
      const int64_t dst_x =
1087
2.23M
          (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
1088
2.23M
      const int64_t dst_y =
1089
2.23M
          (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
1090
2.23M
      const int64_t x4 = dst_x >> subsampling_x;
1091
2.23M
      const int64_t y4 = dst_y >> subsampling_y;
1092
1093
2.23M
      int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
1094
2.23M
      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1095
2.23M
      int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
1096
2.23M
      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
1097
1098
      // Add in all the constant terms, including rounding and offset
1099
2.23M
      sx4 += const1;
1100
2.23M
      sy4 += const2;
1101
1102
2.23M
      sx4 &= ~const3;
1103
2.23M
      sy4 &= ~const3;
1104
1105
      // Horizontal filter
1106
      // If the block is aligned such that, after clamping, every sample
1107
      // would be taken from the leftmost/rightmost column, then we can
1108
      // skip the expensive horizontal filter.
1109
1110
2.23M
      if (ix4 <= -7) {
1111
87.8k
        int iy, row = 0;
1112
702k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1113
614k
          iy = iy4 + k;
1114
614k
          iy = clamp(iy, 0, height - 1);
1115
614k
          const __m256i temp_0 =
1116
614k
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1117
614k
          iy = iy4 + k + 1;
1118
614k
          iy = clamp(iy, 0, height - 1);
1119
614k
          const __m256i temp_1 =
1120
614k
              _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1121
614k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1122
614k
          row += 1;
1123
614k
        }
1124
87.8k
        iy = iy4 + k;
1125
87.8k
        iy = clamp(iy, 0, height - 1);
1126
87.8k
        horz_out[row] = _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
1127
2.14M
      } else if (ix4 >= width + 6) {
1128
139k
        int iy, row = 0;
1129
1.11M
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1130
975k
          iy = iy4 + k;
1131
975k
          iy = clamp(iy, 0, height - 1);
1132
975k
          const __m256i temp_0 = _mm256_set1_epi16(
1133
975k
              const4 + ref[iy * stride + (width - 1)] * const5);
1134
975k
          iy = iy4 + k + 1;
1135
975k
          iy = clamp(iy, 0, height - 1);
1136
975k
          const __m256i temp_1 = _mm256_set1_epi16(
1137
975k
              const4 + ref[iy * stride + (width - 1)] * const5);
1138
975k
          horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
1139
975k
          row += 1;
1140
975k
        }
1141
139k
        iy = iy4 + k;
1142
139k
        iy = clamp(iy, 0, height - 1);
1143
139k
        horz_out[row] =
1144
139k
            _mm256_set1_epi16(const4 + ref[iy * stride + (width - 1)] * const5);
1145
2.00M
      } else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) {
1146
90.4k
        const int out_of_boundary_left = -(ix4 - 6);
1147
90.4k
        const int out_of_boundary_right = (ix4 + 8) - width;
1148
90.4k
        int iy, sx, row = 0;
1149
717k
        for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
1150
626k
          iy = iy4 + k;
1151
626k
          iy = clamp(iy, 0, height - 1);
1152
626k
          __m128i src0 =
1153
626k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1154
626k
          iy = iy4 + k + 1;
1155
626k
          iy = clamp(iy, 0, height - 1);
1156
626k
          __m128i src1 =
1157
626k
              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1158
1159
626k
          if (out_of_boundary_left >= 0) {
1160
375k
            const __m128i shuffle_reg_left =
1161
375k
                _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1162
375k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_left);
1163
375k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_left);
1164
375k
          }
1165
626k
          if (out_of_boundary_right >= 0) {
1166
335k
            const __m128i shuffle_reg_right = _mm_loadu_si128(
1167
335k
                (__m128i *)warp_pad_right[out_of_boundary_right]);
1168
335k
            src0 = _mm_shuffle_epi8(src0, shuffle_reg_right);
1169
335k
            src1 = _mm_shuffle_epi8(src1, shuffle_reg_right);
1170
335k
          }
1171
626k
          sx = sx4 + beta * (k + 4);
1172
626k
          const __m256i src_01 =
1173
626k
              _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
1174
626k
          horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row,
1175
626k
                                 shuffle_src, &round_const, &shift);
1176
626k
          row += 1;
1177
626k
        }
1178
90.4k
        iy = iy4 + k;
1179
90.4k
        iy = clamp(iy, 0, height - 1);
1180
90.4k
        __m128i src = _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
1181
90.4k
        if (out_of_boundary_left >= 0) {
1182
53.6k
          const __m128i shuffle_reg_left =
1183
53.6k
              _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
1184
53.6k
          src = _mm_shuffle_epi8(src, shuffle_reg_left);
1185
53.6k
        }
1186
90.4k
        if (out_of_boundary_right >= 0) {
1187
47.9k
          const __m128i shuffle_reg_right =
1188
47.9k
              _mm_loadu_si128((__m128i *)warp_pad_right[out_of_boundary_right]);
1189
47.9k
          src = _mm_shuffle_epi8(src, shuffle_reg_right);
1190
47.9k
        }
1191
90.4k
        sx = sx4 + beta * (k + 4);
1192
90.4k
        const __m256i src_01 = _mm256_castsi128_si256(src);
1193
90.4k
        __m256i coeff[4];
1194
90.4k
        prepare_horizontal_filter_coeff(alpha, sx, coeff);
1195
90.4k
        filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src,
1196
90.4k
                               &round_const, &shift, row);
1197
1.91M
      } else {
1198
1.91M
        prepare_warp_horizontal_filter_avx2(
1199
1.91M
            ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height,
1200
1.91M
            i, &round_const, &shift, shuffle_src);
1201
1.91M
      }
1202
1203
      // Vertical filter
1204
2.23M
      prepare_warp_vertical_filter_avx2(
1205
2.23M
          pred, horz_out, conv_params, gamma, delta, p_height, p_stride,
1206
2.23M
          p_width, i, j, sy4, reduce_bits_vert, &res_add_const_1, round_bits,
1207
2.23M
          &res_sub_const, &round_bits_const, &wt);
1208
2.23M
    }
1209
508k
  }
1210
210k
}