/src/aom/third_party/SVT-AV1/convolve_2d_avx2.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2017, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #ifndef THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |
13 | | #define THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |
14 | | |
15 | | #include "convolve_avx2.h" |
16 | | |
17 | | static void convolve_2d_sr_hor_2tap_avx2( |
18 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
19 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
20 | 79.6k | const int32_t subpel_x_q4, int16_t *const im_block) { |
21 | 79.6k | const uint8_t *src_ptr = src; |
22 | 79.6k | int32_t y = h; |
23 | 79.6k | int16_t *im = im_block; |
24 | | |
25 | 79.6k | if (w <= 8) { |
26 | 63.3k | __m128i coeffs_128; |
27 | | |
28 | 63.3k | prepare_half_coeffs_2tap_ssse3(filter_params_x, subpel_x_q4, &coeffs_128); |
29 | | |
30 | 63.3k | if (w == 2) { |
31 | 31.9k | do { |
32 | 31.9k | const __m128i r = |
33 | 31.9k | x_convolve_2tap_2x2_sse4_1(src_ptr, src_stride, &coeffs_128); |
34 | 31.9k | xy_x_round_store_2x2_sse2(r, im); |
35 | 31.9k | src_ptr += 2 * src_stride; |
36 | 31.9k | im += 2 * 2; |
37 | 31.9k | y -= 2; |
38 | 31.9k | } while (y); |
39 | 52.3k | } else if (w == 4) { |
40 | 119k | do { |
41 | 119k | const __m128i r = |
42 | 119k | x_convolve_2tap_4x2_ssse3(src_ptr, src_stride, &coeffs_128); |
43 | 119k | xy_x_round_store_4x2_sse2(r, im); |
44 | 119k | src_ptr += 2 * src_stride; |
45 | 119k | im += 2 * 4; |
46 | 119k | y -= 2; |
47 | 119k | } while (y); |
48 | 30.4k | } else { |
49 | 21.9k | assert(w == 8); |
50 | | |
51 | 99.5k | do { |
52 | 99.5k | __m128i r[2]; |
53 | | |
54 | 99.5k | x_convolve_2tap_8x2_ssse3(src_ptr, src_stride, &coeffs_128, r); |
55 | 99.5k | xy_x_round_store_8x2_sse2(r, im); |
56 | 99.5k | src_ptr += 2 * src_stride; |
57 | 99.5k | im += 2 * 8; |
58 | 99.5k | y -= 2; |
59 | 99.5k | } while (y); |
60 | 21.9k | } |
61 | 63.3k | } else { |
62 | 16.3k | __m256i coeffs_256; |
63 | | |
64 | 16.3k | prepare_half_coeffs_2tap_avx2(filter_params_x, subpel_x_q4, &coeffs_256); |
65 | | |
66 | 16.3k | if (w == 16) { |
67 | 69.6k | do { |
68 | 69.6k | __m256i r[2]; |
69 | | |
70 | 69.6k | x_convolve_2tap_16x2_avx2(src_ptr, src_stride, &coeffs_256, r); |
71 | 69.6k | xy_x_round_store_32_avx2(r, im); |
72 | 69.6k | src_ptr += 2 * src_stride; |
73 | 69.6k | im += 2 * 16; |
74 | 69.6k | y -= 2; |
75 | 69.6k | } while (y); |
76 | 10.1k | } else if (w == 32) { |
77 | 108k | do { |
78 | 108k | xy_x_2tap_32_avx2(src_ptr, &coeffs_256, im); |
79 | 108k | src_ptr += src_stride; |
80 | 108k | im += 32; |
81 | 108k | } while (--y); |
82 | 3.84k | } else if (w == 64) { |
83 | 92.7k | do { |
84 | 92.7k | xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32); |
85 | 92.7k | xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32); |
86 | 92.7k | src_ptr += src_stride; |
87 | 92.7k | im += 64; |
88 | 92.7k | } while (--y); |
89 | 1.89k | } else { |
90 | 415 | assert(w == 128); |
91 | | |
92 | 37.1k | do { |
93 | 37.1k | xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32); |
94 | 37.1k | xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32); |
95 | 37.1k | xy_x_2tap_32_avx2(src_ptr + 2 * 32, &coeffs_256, im + 2 * 32); |
96 | 37.1k | xy_x_2tap_32_avx2(src_ptr + 3 * 32, &coeffs_256, im + 3 * 32); |
97 | 37.1k | src_ptr += src_stride; |
98 | 37.1k | im += 128; |
99 | 37.1k | } while (--y); |
100 | 415 | } |
101 | 16.3k | } |
102 | 79.6k | } |
103 | | |
104 | | static void convolve_2d_sr_hor_4tap_ssse3( |
105 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
106 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
107 | 875k | const int32_t subpel_x_q4, int16_t *const im_block) { |
108 | 875k | const uint8_t *src_ptr = src - 1; |
109 | 875k | int32_t y = h; |
110 | 875k | int16_t *im = im_block; |
111 | | |
112 | 875k | if (w <= 4) { |
113 | 822k | __m128i coeffs_128[2]; |
114 | | |
115 | 822k | prepare_half_coeffs_4tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128); |
116 | 822k | if (w == 2) { |
117 | 845k | do { |
118 | 845k | const __m128i r = |
119 | 845k | x_convolve_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128); |
120 | 845k | xy_x_round_store_2x2_sse2(r, im); |
121 | 845k | src_ptr += 2 * src_stride; |
122 | 845k | im += 2 * 2; |
123 | 845k | y -= 2; |
124 | 845k | } while (y); |
125 | 658k | } else if (w == 4) { |
126 | 3.58M | do { |
127 | 3.58M | const __m128i r = |
128 | 3.58M | x_convolve_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128); |
129 | 3.58M | xy_x_round_store_4x2_sse2(r, im); |
130 | 3.58M | src_ptr += 2 * src_stride; |
131 | 3.58M | im += 2 * 4; |
132 | 3.58M | y -= 2; |
133 | 3.58M | } while (y); |
134 | 658k | } |
135 | 822k | } else { |
136 | | // TODO(chiyotsai@google.com): Add better optimization |
137 | 52.5k | __m256i coeffs_256[2], filt_256[2]; |
138 | | |
139 | 52.5k | prepare_half_coeffs_4tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
140 | 52.5k | filt_256[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2); |
141 | 52.5k | filt_256[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2); |
142 | | |
143 | 52.5k | if (w == 8) { |
144 | 173k | do { |
145 | 173k | __m256i res = |
146 | 173k | x_convolve_4tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
147 | 173k | xy_x_round_store_8x2_avx2(res, im); |
148 | | |
149 | 173k | src_ptr += 2 * src_stride; |
150 | 173k | im += 2 * 8; |
151 | 173k | y -= 2; |
152 | 173k | } while (y); |
153 | 30.3k | } else if (w == 16) { |
154 | 117k | do { |
155 | 117k | __m256i r[2]; |
156 | | |
157 | 117k | x_convolve_4tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
158 | 117k | xy_x_round_store_32_avx2(r, im); |
159 | 117k | src_ptr += 2 * src_stride; |
160 | 117k | im += 2 * 16; |
161 | 117k | y -= 2; |
162 | 117k | } while (y); |
163 | 15.4k | } else if (w == 32) { |
164 | 116k | do { |
165 | 116k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
166 | | |
167 | 116k | src_ptr += src_stride; |
168 | 116k | im += 32; |
169 | 116k | } while (--y); |
170 | 4.14k | } else if (w == 64) { |
171 | 123k | do { |
172 | 123k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
173 | 123k | xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
174 | 123k | src_ptr += src_stride; |
175 | 123k | im += 64; |
176 | 123k | } while (--y); |
177 | 1.97k | } else { |
178 | 612 | assert(w == 128); |
179 | | |
180 | 83.0k | do { |
181 | 83.0k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
182 | 83.0k | xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
183 | 83.0k | xy_x_4tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
184 | 83.0k | xy_x_4tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
185 | 83.0k | src_ptr += src_stride; |
186 | 83.0k | im += 128; |
187 | 83.0k | } while (--y); |
188 | 639 | } |
189 | 52.5k | } |
190 | 875k | } |
191 | | |
192 | | static void convolve_2d_sr_hor_6tap_avx2( |
193 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
194 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
195 | 922k | const int32_t subpel_x_q4, int16_t *const im_block) { |
196 | 922k | const uint8_t *src_ptr = src - 2; |
197 | 922k | int32_t y = h; |
198 | 922k | int16_t *im = im_block; |
199 | | |
200 | 922k | if (w <= 4) { |
201 | 0 | __m128i coeffs_128[3]; |
202 | |
|
203 | 0 | prepare_half_coeffs_6tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128); |
204 | 0 | if (w == 2) { |
205 | 0 | do { |
206 | 0 | const __m128i r = |
207 | 0 | x_convolve_6tap_2x2_ssse3(src_ptr, src_stride, coeffs_128); |
208 | 0 | xy_x_round_store_2x2_sse2(r, im); |
209 | 0 | src_ptr += 2 * src_stride; |
210 | 0 | im += 2 * 2; |
211 | 0 | y -= 2; |
212 | 0 | } while (y); |
213 | 0 | } else if (w == 4) { |
214 | 0 | do { |
215 | 0 | const __m128i r = |
216 | 0 | x_convolve_6tap_4x2_ssse3(src_ptr, src_stride, coeffs_128); |
217 | 0 | xy_x_round_store_4x2_sse2(r, im); |
218 | 0 | src_ptr += 2 * src_stride; |
219 | 0 | im += 2 * 4; |
220 | 0 | y -= 2; |
221 | 0 | } while (y); |
222 | 0 | } |
223 | 922k | } else { |
224 | 922k | __m256i coeffs_256[3], filt_256[3]; |
225 | | |
226 | 922k | filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2); |
227 | 922k | filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2); |
228 | 922k | filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2); |
229 | | |
230 | 922k | prepare_half_coeffs_6tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
231 | | |
232 | 922k | if (w == 8) { |
233 | 3.28M | do { |
234 | 3.28M | const __m256i res = |
235 | 3.28M | x_convolve_6tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
236 | 3.28M | xy_x_round_store_8x2_avx2(res, im); |
237 | | |
238 | 3.28M | src_ptr += 2 * src_stride; |
239 | 3.28M | im += 2 * 8; |
240 | 3.28M | y -= 2; |
241 | 3.28M | } while (y); |
242 | 557k | } else if (w == 16) { |
243 | 2.19M | do { |
244 | 2.19M | __m256i r[2]; |
245 | | |
246 | 2.19M | x_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
247 | 2.19M | xy_x_round_store_32_avx2(r, im); |
248 | 2.19M | src_ptr += 2 * src_stride; |
249 | 2.19M | im += 2 * 16; |
250 | 2.19M | y -= 2; |
251 | 2.19M | } while (y); |
252 | 291k | } else if (w == 32) { |
253 | 1.61M | do { |
254 | 1.61M | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
255 | 1.61M | src_ptr += src_stride; |
256 | 1.61M | im += 32; |
257 | 1.61M | } while (--y); |
258 | 62.4k | } else if (w == 64) { |
259 | 556k | do { |
260 | 556k | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
261 | 556k | xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
262 | 556k | src_ptr += src_stride; |
263 | 556k | im += 64; |
264 | 556k | } while (--y); |
265 | 10.0k | } else { |
266 | 1.15k | assert(w == 128); |
267 | | |
268 | 160k | do { |
269 | 160k | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
270 | 160k | xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
271 | 160k | xy_x_6tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
272 | 160k | xy_x_6tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
273 | 160k | src_ptr += src_stride; |
274 | 160k | im += 128; |
275 | 160k | } while (--y); |
276 | 1.35k | } |
277 | 922k | } |
278 | 922k | } |
279 | | |
280 | | static void convolve_2d_sr_hor_8tap_avx2( |
281 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
282 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
283 | 71.0k | const int32_t subpel_x_q4, int16_t *const im_block) { |
284 | 71.0k | const uint8_t *src_ptr = src - 3; |
285 | 71.0k | int32_t y = h; |
286 | 71.0k | int16_t *im = im_block; |
287 | 71.0k | __m256i coeffs_256[4], filt_256[4]; |
288 | | |
289 | 71.0k | filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2); |
290 | 71.0k | filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2); |
291 | 71.0k | filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2); |
292 | 71.0k | filt_256[3] = _mm256_loadu_si256((__m256i const *)filt4_global_avx2); |
293 | | |
294 | 71.0k | prepare_half_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
295 | | |
296 | 71.0k | if (w == 8) { |
297 | 203k | do { |
298 | 203k | const __m256i res = |
299 | 203k | x_convolve_8tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
300 | 203k | xy_x_round_store_8x2_avx2(res, im); |
301 | 203k | src_ptr += 2 * src_stride; |
302 | 203k | im += 2 * 8; |
303 | 203k | y -= 2; |
304 | 203k | } while (y); |
305 | 38.8k | } else if (w == 16) { |
306 | 129k | do { |
307 | 129k | __m256i r[2]; |
308 | | |
309 | 129k | x_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
310 | 129k | xy_x_round_store_32_avx2(r, im); |
311 | 129k | src_ptr += 2 * src_stride; |
312 | 129k | im += 2 * 16; |
313 | 129k | y -= 2; |
314 | 129k | } while (y); |
315 | 23.6k | } else if (w == 32) { |
316 | 409k | do { |
317 | 409k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
318 | 409k | src_ptr += src_stride; |
319 | 409k | im += 32; |
320 | 409k | } while (--y); |
321 | 16.0k | } else if (w == 64) { |
322 | 316k | do { |
323 | 316k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
324 | 316k | xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
325 | 316k | src_ptr += src_stride; |
326 | 316k | im += 64; |
327 | 316k | } while (--y); |
328 | 7.26k | } else { |
329 | 281 | assert(w == 128); |
330 | | |
331 | 33.3k | do { |
332 | 33.3k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
333 | 33.3k | xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
334 | 33.3k | xy_x_8tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
335 | 33.3k | xy_x_8tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
336 | 33.3k | src_ptr += src_stride; |
337 | 33.3k | im += 128; |
338 | 33.3k | } while (--y); |
339 | 281 | } |
340 | 71.0k | } |
341 | | |
342 | | static void convolve_2d_sr_ver_2tap_avx2( |
343 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
344 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
345 | 57.8k | uint8_t *dst, const int32_t dst_stride) { |
346 | 57.8k | const int16_t *im = im_block; |
347 | 57.8k | int32_t y = h; |
348 | | |
349 | 57.8k | if (w <= 4) { |
350 | 30.6k | __m128i coeffs_128; |
351 | | |
352 | 30.6k | prepare_coeffs_2tap_sse2(filter_params_y, subpel_y_q4, &coeffs_128); |
353 | | |
354 | 30.6k | if (w == 2) { |
355 | 8.29k | __m128i s_32[2]; |
356 | | |
357 | 8.29k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im); |
358 | | |
359 | 15.7k | do { |
360 | 15.7k | const __m128i res = xy_y_convolve_2tap_2x2_sse2(im, s_32, &coeffs_128); |
361 | 15.7k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
362 | 15.7k | im += 2 * 2; |
363 | 15.7k | dst += 2 * dst_stride; |
364 | 15.7k | y -= 2; |
365 | 15.7k | } while (y); |
366 | 22.3k | } else { |
367 | 22.3k | __m128i s_64[2], r[2]; |
368 | | |
369 | 22.3k | assert(w == 4); |
370 | | |
371 | 22.3k | s_64[0] = _mm_loadl_epi64((__m128i *)im); |
372 | | |
373 | 66.2k | do { |
374 | 66.2k | xy_y_convolve_2tap_4x2_sse2(im, s_64, &coeffs_128, r); |
375 | 66.2k | r[0] = xy_y_round_sse2(r[0]); |
376 | 66.2k | r[1] = xy_y_round_sse2(r[1]); |
377 | 66.2k | const __m128i rr = _mm_packs_epi32(r[0], r[1]); |
378 | 66.2k | pack_store_4x2_sse2(rr, dst, dst_stride); |
379 | 66.2k | im += 2 * 4; |
380 | 66.2k | dst += 2 * dst_stride; |
381 | 66.2k | y -= 2; |
382 | 66.2k | } while (y); |
383 | 22.3k | } |
384 | 30.6k | } else { |
385 | 27.2k | __m256i coeffs_256; |
386 | | |
387 | 27.2k | prepare_coeffs_2tap_avx2(filter_params_y, subpel_y_q4, &coeffs_256); |
388 | | |
389 | 27.2k | if (w == 8) { |
390 | 16.0k | __m128i s_128[2]; |
391 | 16.0k | __m256i r[2]; |
392 | | |
393 | 16.0k | s_128[0] = _mm_loadu_si128((__m128i *)im); |
394 | | |
395 | 56.3k | do { |
396 | 56.3k | xy_y_convolve_2tap_8x2_avx2(im, s_128, &coeffs_256, r); |
397 | 56.3k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
398 | 56.3k | im += 2 * 8; |
399 | 56.3k | dst += 2 * dst_stride; |
400 | 56.3k | y -= 2; |
401 | 56.3k | } while (y); |
402 | 16.0k | } else if (w == 16) { |
403 | 7.24k | __m256i s_256[2], r[4]; |
404 | | |
405 | 7.24k | s_256[0] = _mm256_loadu_si256((__m256i *)im); |
406 | | |
407 | 40.9k | do { |
408 | 40.9k | xy_y_convolve_2tap_16x2_avx2(im, s_256, &coeffs_256, r); |
409 | 40.9k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
410 | 40.9k | im += 2 * 16; |
411 | 40.9k | dst += 2 * dst_stride; |
412 | 40.9k | y -= 2; |
413 | 40.9k | } while (y); |
414 | 7.24k | } else if (w == 32) { |
415 | 2.45k | __m256i s_256[2][2]; |
416 | | |
417 | 2.45k | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
418 | 2.45k | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
419 | | |
420 | 31.7k | do { |
421 | 31.7k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[0], s_256[1], &coeffs_256, |
422 | 31.7k | dst); |
423 | 31.7k | im += 2 * 32; |
424 | 31.7k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1], s_256[0], &coeffs_256, |
425 | 31.7k | dst + dst_stride); |
426 | 31.7k | dst += 2 * dst_stride; |
427 | 31.7k | y -= 2; |
428 | 31.7k | } while (y); |
429 | 2.45k | } else if (w == 64) { |
430 | 1.28k | __m256i s_256[2][4]; |
431 | | |
432 | 1.28k | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
433 | 1.28k | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
434 | 1.28k | s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
435 | 1.28k | s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
436 | | |
437 | 28.9k | do { |
438 | 28.9k | xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[0] + 0, s_256[1] + 0, |
439 | 28.9k | &coeffs_256, dst); |
440 | 28.9k | xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[0] + 2, s_256[1] + 2, |
441 | 28.9k | &coeffs_256, dst + 32); |
442 | 28.9k | im += 2 * 64; |
443 | 28.9k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
444 | 28.9k | &coeffs_256, dst + dst_stride); |
445 | 28.9k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2, |
446 | 28.9k | &coeffs_256, dst + dst_stride + 32); |
447 | 28.9k | dst += 2 * dst_stride; |
448 | 28.9k | y -= 2; |
449 | 28.9k | } while (y); |
450 | 1.28k | } else { |
451 | 224 | __m256i s_256[2][8]; |
452 | | |
453 | 224 | assert(w == 128); |
454 | | |
455 | 224 | load_16bit_8rows_avx2(im, 16, s_256[0]); |
456 | | |
457 | 10.4k | do { |
458 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 128, s_256[0] + 0, s_256[1] + 0, |
459 | 10.4k | &coeffs_256, dst); |
460 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 160, s_256[0] + 2, s_256[1] + 2, |
461 | 10.4k | &coeffs_256, dst + 1 * 32); |
462 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 192, s_256[0] + 4, s_256[1] + 4, |
463 | 10.4k | &coeffs_256, dst + 2 * 32); |
464 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 224, s_256[0] + 6, s_256[1] + 6, |
465 | 10.4k | &coeffs_256, dst + 3 * 32); |
466 | 10.4k | im += 2 * 128; |
467 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
468 | 10.4k | &coeffs_256, dst + dst_stride); |
469 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2, |
470 | 10.4k | &coeffs_256, dst + dst_stride + 1 * 32); |
471 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[1] + 4, s_256[0] + 4, |
472 | 10.4k | &coeffs_256, dst + dst_stride + 2 * 32); |
473 | 10.4k | xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[1] + 6, s_256[0] + 6, |
474 | 10.4k | &coeffs_256, dst + dst_stride + 3 * 32); |
475 | 10.4k | dst += 2 * dst_stride; |
476 | 10.4k | y -= 2; |
477 | 10.4k | } while (y); |
478 | 224 | } |
479 | 27.2k | } |
480 | 57.8k | } |
481 | | |
482 | | static void convolve_2d_sr_ver_2tap_half_avx2( |
483 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
484 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
485 | 21.8k | uint8_t *dst, const int32_t dst_stride) { |
486 | 21.8k | const int16_t *im = im_block; |
487 | 21.8k | int32_t y = h; |
488 | | |
489 | 21.8k | (void)filter_params_y; |
490 | 21.8k | (void)subpel_y_q4; |
491 | | |
492 | 21.8k | if (w == 2) { |
493 | 2.70k | __m128i s_32[2]; |
494 | | |
495 | 2.70k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im); |
496 | | |
497 | 5.20k | do { |
498 | 5.20k | const __m128i res = xy_y_convolve_2tap_2x2_half_pel_sse2(im, s_32); |
499 | 5.20k | const __m128i r = xy_y_round_half_pel_sse2(res); |
500 | 5.20k | pack_store_2x2_sse2(r, dst, dst_stride); |
501 | 5.20k | im += 2 * 2; |
502 | 5.20k | dst += 2 * dst_stride; |
503 | 5.20k | y -= 2; |
504 | 5.20k | } while (y); |
505 | 19.1k | } else if (w == 4) { |
506 | 8.05k | __m128i s_64[2]; |
507 | | |
508 | 8.05k | s_64[0] = _mm_loadl_epi64((__m128i *)im); |
509 | | |
510 | 22.8k | do { |
511 | 22.8k | const __m128i res = xy_y_convolve_2tap_4x2_half_pel_sse2(im, s_64); |
512 | 22.8k | const __m128i r = xy_y_round_half_pel_sse2(res); |
513 | 22.8k | pack_store_4x2_sse2(r, dst, dst_stride); |
514 | 22.8k | im += 2 * 4; |
515 | 22.8k | dst += 2 * dst_stride; |
516 | 22.8k | y -= 2; |
517 | 22.8k | } while (y); |
518 | 11.0k | } else if (w == 8) { |
519 | 5.94k | __m128i s_128[2]; |
520 | | |
521 | 5.94k | s_128[0] = _mm_loadu_si128((__m128i *)im); |
522 | | |
523 | 21.3k | do { |
524 | 21.3k | const __m256i res = xy_y_convolve_2tap_8x2_half_pel_avx2(im, s_128); |
525 | 21.3k | const __m256i r = xy_y_round_half_pel_avx2(res); |
526 | 21.3k | pack_store_8x2_avx2(r, dst, dst_stride); |
527 | 21.3k | im += 2 * 8; |
528 | 21.3k | dst += 2 * dst_stride; |
529 | 21.3k | y -= 2; |
530 | 21.3k | } while (y); |
531 | 5.94k | } else if (w == 16) { |
532 | 2.92k | __m256i s_256[2], r[2]; |
533 | | |
534 | 2.92k | s_256[0] = _mm256_loadu_si256((__m256i *)im); |
535 | | |
536 | 18.5k | do { |
537 | 18.5k | xy_y_convolve_2tap_16x2_half_pel_avx2(im, s_256, r); |
538 | 18.5k | r[0] = xy_y_round_half_pel_avx2(r[0]); |
539 | 18.5k | r[1] = xy_y_round_half_pel_avx2(r[1]); |
540 | 18.5k | xy_y_pack_store_16x2_avx2(r[0], r[1], dst, dst_stride); |
541 | 18.5k | im += 2 * 16; |
542 | 18.5k | dst += 2 * dst_stride; |
543 | 18.5k | y -= 2; |
544 | 18.5k | } while (y); |
545 | 2.92k | } else if (w == 32) { |
546 | 1.39k | __m256i s_256[2][2]; |
547 | | |
548 | 1.39k | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
549 | 1.39k | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
550 | | |
551 | 18.7k | do { |
552 | 18.7k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 32, s_256[0], s_256[1], dst); |
553 | 18.7k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 2 * 32, s_256[1], s_256[0], |
554 | 18.7k | dst + dst_stride); |
555 | 18.7k | im += 2 * 32; |
556 | 18.7k | dst += 2 * dst_stride; |
557 | 18.7k | y -= 2; |
558 | 18.7k | } while (y); |
559 | 1.39k | } else if (w == 64) { |
560 | 606 | __m256i s_256[2][4]; |
561 | | |
562 | 606 | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
563 | 606 | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
564 | 606 | s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
565 | 606 | s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
566 | | |
567 | 15.5k | do { |
568 | 15.5k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 64, s_256[0] + 0, |
569 | 15.5k | s_256[1] + 0, dst); |
570 | 15.5k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 96, s_256[0] + 2, |
571 | 15.5k | s_256[1] + 2, dst + 32); |
572 | 15.5k | im += 2 * 64; |
573 | 15.5k | xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
574 | 15.5k | dst + dst_stride); |
575 | 15.5k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
576 | 15.5k | im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 32); |
577 | 15.5k | dst += 2 * dst_stride; |
578 | 15.5k | y -= 2; |
579 | 15.5k | } while (y); |
580 | 606 | } else { |
581 | 191 | __m256i s_256[2][8]; |
582 | | |
583 | 191 | assert(w == 128); |
584 | | |
585 | 191 | load_16bit_8rows_avx2(im, 16, s_256[0]); |
586 | | |
587 | 7.71k | do { |
588 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 128, s_256[0] + 0, |
589 | 7.71k | s_256[1] + 0, dst); |
590 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 160, s_256[0] + 2, |
591 | 7.71k | s_256[1] + 2, dst + 1 * 32); |
592 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 192, s_256[0] + 4, |
593 | 7.71k | s_256[1] + 4, dst + 2 * 32); |
594 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 224, s_256[0] + 6, |
595 | 7.71k | s_256[1] + 6, dst + 3 * 32); |
596 | 7.71k | im += 2 * 128; |
597 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
598 | 7.71k | dst + dst_stride); |
599 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
600 | 7.71k | im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 1 * 32); |
601 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
602 | 7.71k | im + 64, s_256[1] + 4, s_256[0] + 4, dst + dst_stride + 2 * 32); |
603 | 7.71k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
604 | 7.71k | im + 96, s_256[1] + 6, s_256[0] + 6, dst + dst_stride + 3 * 32); |
605 | 7.71k | dst += 2 * dst_stride; |
606 | 7.71k | y -= 2; |
607 | 7.71k | } while (y); |
608 | 191 | } |
609 | 21.8k | } |
610 | | |
611 | | static void convolve_2d_sr_ver_4tap_avx2( |
612 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
613 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
614 | 1.01M | uint8_t *dst, const int32_t dst_stride) { |
615 | 1.01M | const int16_t *im = im_block; |
616 | 1.01M | int32_t y = h; |
617 | | |
618 | 1.01M | if (w == 2) { |
619 | 94.8k | __m128i coeffs_128[2], s_32[4], ss_128[2]; |
620 | | |
621 | 94.8k | prepare_coeffs_4tap_sse2(filter_params_y, subpel_y_q4, coeffs_128); |
622 | | |
623 | 94.8k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
624 | 94.8k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
625 | 94.8k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
626 | | |
627 | 94.8k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
628 | 94.8k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
629 | | |
630 | 94.8k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
631 | | |
632 | 161k | do { |
633 | 161k | const __m128i res = |
634 | 161k | xy_y_convolve_4tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
635 | 161k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
636 | 161k | im += 2 * 2; |
637 | 161k | dst += 2 * dst_stride; |
638 | 161k | y -= 2; |
639 | 161k | } while (y); |
640 | 921k | } else { |
641 | 921k | __m256i coeffs_256[2]; |
642 | | |
643 | 921k | prepare_coeffs_4tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
644 | | |
645 | 921k | if (w == 4) { |
646 | 432k | __m128i s_64[4]; |
647 | 432k | __m256i s_256[2], ss_256[2]; |
648 | | |
649 | 432k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
650 | 432k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
651 | 432k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
652 | | |
653 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
654 | 432k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
655 | 432k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
656 | | |
657 | 432k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
658 | | |
659 | 825k | do { |
660 | 825k | const __m256i res = |
661 | 825k | xy_y_convolve_4tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
662 | 825k | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
663 | 825k | im += 2 * 4; |
664 | 825k | dst += 2 * dst_stride; |
665 | 825k | y -= 2; |
666 | 825k | } while (y); |
667 | 488k | } else if (w == 8) { |
668 | 341k | __m256i s_256[4], r[2]; |
669 | | |
670 | 341k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
671 | 341k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
672 | | |
673 | 341k | if (subpel_y_q4 != 8) { |
674 | 285k | __m256i ss_256[4]; |
675 | | |
676 | 285k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
677 | 285k | ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
678 | | |
679 | 536k | do { |
680 | 536k | xy_y_convolve_4tap_8x2_avx2(im, ss_256, coeffs_256, r); |
681 | 536k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
682 | 536k | im += 2 * 8; |
683 | 536k | dst += 2 * dst_stride; |
684 | 536k | y -= 2; |
685 | 536k | } while (y); |
686 | 285k | } else { |
687 | 98.2k | do { |
688 | 98.2k | xy_y_convolve_4tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
689 | 98.2k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
690 | 98.2k | im += 2 * 8; |
691 | 98.2k | dst += 2 * dst_stride; |
692 | 98.2k | y -= 2; |
693 | 98.2k | } while (y); |
694 | 56.0k | } |
695 | 341k | } else if (w == 16) { |
696 | 139k | __m256i s_256[5]; |
697 | | |
698 | 139k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
699 | 139k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
700 | 139k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
701 | | |
702 | 139k | if (subpel_y_q4 != 8) { |
703 | 110k | __m256i ss_256[4], tt_256[4], r[4]; |
704 | | |
705 | 110k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
706 | 110k | ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
707 | | |
708 | 110k | tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]); |
709 | 110k | tt_256[2] = _mm256_unpackhi_epi16(s_256[1], s_256[2]); |
710 | | |
711 | 271k | do { |
712 | 271k | xy_y_convolve_4tap_16x2_avx2(im, s_256, ss_256, tt_256, coeffs_256, |
713 | 271k | r); |
714 | 271k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
715 | 271k | im += 2 * 16; |
716 | 271k | dst += 2 * dst_stride; |
717 | 271k | y -= 2; |
718 | 271k | } while (y); |
719 | 110k | } else { |
720 | 28.2k | __m256i r[4]; |
721 | | |
722 | 56.5k | do { |
723 | 56.5k | xy_y_convolve_4tap_16x2_half_pelavx2(im, s_256, coeffs_256, r); |
724 | 56.5k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
725 | 56.5k | im += 2 * 16; |
726 | 56.5k | dst += 2 * dst_stride; |
727 | 56.5k | y -= 2; |
728 | 56.5k | } while (y); |
729 | 28.2k | } |
730 | 139k | } else { |
731 | | /*It's a special condition for OBMC. A/c to Av1 spec 4-tap won't |
732 | | support for width(w)>16, but for OBMC while predicting above block |
733 | | it reduces size block to Wx(h/2), for example, if above block size |
734 | | is 32x8, we get block size as 32x4 for OBMC.*/ |
735 | 8.24k | int32_t x = 0; |
736 | | |
737 | 8.24k | assert(!(w % 32)); |
738 | | |
739 | 8.35k | __m256i s_256[2][4], ss_256[2][4], tt_256[2][4], r0[4], r1[4]; |
740 | 10.9k | do { |
741 | 10.9k | const int16_t *s = im + x; |
742 | 10.9k | uint8_t *d = dst + x; |
743 | | |
744 | 10.9k | loadu_unpack_16bit_3rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]); |
745 | 10.9k | loadu_unpack_16bit_3rows_avx2(s + 16, w, s_256[1], ss_256[1], |
746 | 10.9k | tt_256[1]); |
747 | | |
748 | 10.9k | y = h; |
749 | 217k | do { |
750 | 217k | xy_y_convolve_4tap_32x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0], |
751 | 217k | coeffs_256, r0); |
752 | 217k | xy_y_convolve_4tap_32x2_avx2(s + 16, w, s_256[1], ss_256[1], |
753 | 217k | tt_256[1], coeffs_256, r1); |
754 | | |
755 | 217k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
756 | 217k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
757 | | |
758 | 217k | s += 2 * w; |
759 | 217k | d += 2 * dst_stride; |
760 | 217k | y -= 2; |
761 | 217k | } while (y); |
762 | | |
763 | 10.9k | x += 32; |
764 | 10.9k | } while (x < w); |
765 | 8.35k | } |
766 | 921k | } |
767 | 1.01M | } |
768 | | |
769 | | static void convolve_2d_sr_ver_6tap_avx2( |
770 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
771 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
772 | 787k | uint8_t *dst, const int32_t dst_stride) { |
773 | 787k | const int16_t *im = im_block; |
774 | 787k | int32_t y; |
775 | | |
776 | 787k | if (w == 2) { |
777 | 65.4k | __m128i coeffs_128[3], s_32[6], ss_128[3]; |
778 | | |
779 | 65.4k | prepare_coeffs_6tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128); |
780 | | |
781 | 65.4k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
782 | 65.4k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
783 | 65.4k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
784 | 65.4k | s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2)); |
785 | 65.4k | s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2)); |
786 | | |
787 | 65.4k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
788 | 65.4k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
789 | 65.4k | const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]); |
790 | 65.4k | const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]); |
791 | | |
792 | 65.4k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
793 | 65.4k | ss_128[1] = _mm_unpacklo_epi16(src23, src34); |
794 | | |
795 | 65.4k | y = h; |
796 | 261k | do { |
797 | 261k | const __m128i res = |
798 | 261k | xy_y_convolve_6tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
799 | 261k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
800 | 261k | im += 2 * 2; |
801 | 261k | dst += 2 * dst_stride; |
802 | 261k | y -= 2; |
803 | 261k | } while (y); |
804 | 721k | } else { |
805 | 721k | __m256i coeffs_256[3]; |
806 | | |
807 | 721k | prepare_coeffs_6tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
808 | | |
809 | 721k | if (w == 4) { |
810 | 211k | __m128i s_64[6]; |
811 | 211k | __m256i s_256[6], ss_256[3]; |
812 | | |
813 | 211k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
814 | 211k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
815 | 211k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
816 | 211k | s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4)); |
817 | 211k | s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4)); |
818 | | |
819 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
820 | 211k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
821 | 211k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
822 | 211k | s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]); |
823 | 211k | s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]); |
824 | | |
825 | 211k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
826 | 211k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
827 | | |
828 | 211k | y = h; |
829 | 1.12M | do { |
830 | 1.12M | const __m256i res = |
831 | 1.12M | xy_y_convolve_6tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
832 | 1.12M | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
833 | 1.12M | im += 2 * 4; |
834 | 1.12M | dst += 2 * dst_stride; |
835 | 1.12M | y -= 2; |
836 | 1.12M | } while (y); |
837 | 510k | } else if (w == 8) { |
838 | 264k | __m256i s_256[6], r[2]; |
839 | | |
840 | 264k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
841 | 264k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
842 | 264k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8)); |
843 | 264k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8)); |
844 | 264k | y = h; |
845 | | |
846 | 264k | if (subpel_y_q4 != 8) { |
847 | 206k | __m256i ss_256[6]; |
848 | | |
849 | 206k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
850 | 206k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
851 | | |
852 | 206k | ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
853 | 206k | ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]); |
854 | | |
855 | 1.09M | do { |
856 | 1.09M | xy_y_convolve_6tap_8x2_avx2(im, ss_256, coeffs_256, r); |
857 | 1.09M | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
858 | 1.09M | im += 2 * 8; |
859 | 1.09M | dst += 2 * dst_stride; |
860 | 1.09M | y -= 2; |
861 | 1.09M | } while (y); |
862 | 206k | } else { |
863 | 321k | do { |
864 | 321k | xy_y_convolve_6tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
865 | 321k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
866 | 321k | im += 2 * 8; |
867 | 321k | dst += 2 * dst_stride; |
868 | 321k | y -= 2; |
869 | 321k | } while (y); |
870 | 57.5k | } |
871 | 264k | } else if (w == 16) { |
872 | 173k | __m256i s_256[6]; |
873 | | |
874 | 173k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
875 | 173k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
876 | 173k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
877 | 173k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
878 | 173k | s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 16)); |
879 | 173k | y = h; |
880 | | |
881 | 173k | if (subpel_y_q4 != 8) { |
882 | 130k | __m256i ss_256[6], tt_256[6], r[4]; |
883 | | |
884 | 130k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
885 | 130k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
886 | 130k | ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
887 | 130k | ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]); |
888 | | |
889 | 130k | tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]); |
890 | 130k | tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[4]); |
891 | 130k | tt_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]); |
892 | 130k | tt_256[4] = _mm256_unpackhi_epi16(s_256[3], s_256[4]); |
893 | | |
894 | 911k | do { |
895 | 911k | xy_y_convolve_6tap_16x2_avx2(im, 16, s_256, ss_256, tt_256, |
896 | 911k | coeffs_256, r); |
897 | 911k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
898 | 911k | im += 2 * 16; |
899 | 911k | dst += 2 * dst_stride; |
900 | 911k | y -= 2; |
901 | 911k | } while (y); |
902 | 130k | } else { |
903 | 43.0k | __m256i ss_256[4], r[4]; |
904 | | |
905 | 302k | do { |
906 | 302k | xy_y_convolve_6tap_16x2_half_pel_avx2(im, 16, s_256, ss_256, |
907 | 302k | coeffs_256, r); |
908 | 302k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
909 | | |
910 | 302k | im += 2 * 16; |
911 | 302k | dst += 2 * dst_stride; |
912 | 302k | y -= 2; |
913 | 302k | } while (y); |
914 | 43.0k | } |
915 | 173k | } else { |
916 | 72.3k | int32_t x = 0; |
917 | | |
918 | 72.3k | assert(!(w % 32)); |
919 | | |
920 | 72.4k | __m256i s_256[2][6], ss_256[2][6], tt_256[2][6], r0[4], r1[4]; |
921 | | |
922 | 87.9k | do { |
923 | 87.9k | const int16_t *s = im + x; |
924 | 87.9k | uint8_t *d = dst + x; |
925 | | |
926 | 87.9k | loadu_unpack_16bit_5rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]); |
927 | 87.9k | loadu_unpack_16bit_5rows_avx2(s + 16, w, s_256[1], ss_256[1], |
928 | 87.9k | tt_256[1]); |
929 | | |
930 | 87.9k | y = h; |
931 | 1.52M | do { |
932 | 1.52M | xy_y_convolve_6tap_16x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0], |
933 | 1.52M | coeffs_256, r0); |
934 | 1.52M | xy_y_convolve_6tap_16x2_avx2(s + 16, w, s_256[1], ss_256[1], |
935 | 1.52M | tt_256[1], coeffs_256, r1); |
936 | | |
937 | 1.52M | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
938 | 1.52M | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
939 | | |
940 | 1.52M | s += 2 * w; |
941 | 1.52M | d += 2 * dst_stride; |
942 | 1.52M | y -= 2; |
943 | 1.52M | } while (y); |
944 | | |
945 | 87.9k | x += 32; |
946 | 87.9k | } while (x < w); |
947 | 72.4k | } |
948 | 721k | } |
949 | 787k | } |
950 | | |
951 | | static void convolve_2d_sr_ver_8tap_avx2( |
952 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
953 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
954 | 65.1k | uint8_t *dst, const int32_t dst_stride) { |
955 | 65.1k | const int16_t *im = im_block; |
956 | 65.1k | int32_t y; |
957 | | |
958 | 65.1k | if (w == 2) { |
959 | 4.50k | __m128i coeffs_128[4], s_32[8], ss_128[4]; |
960 | | |
961 | 4.50k | prepare_coeffs_8tap_sse2(filter_params_y, subpel_y_q4, coeffs_128); |
962 | | |
963 | 4.50k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
964 | 4.50k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
965 | 4.50k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
966 | 4.50k | s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2)); |
967 | 4.50k | s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2)); |
968 | 4.50k | s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(im + 5 * 2)); |
969 | 4.50k | s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(im + 6 * 2)); |
970 | | |
971 | 4.50k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
972 | 4.50k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
973 | 4.50k | const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]); |
974 | 4.50k | const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]); |
975 | 4.50k | const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]); |
976 | 4.50k | const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[6]); |
977 | | |
978 | 4.50k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
979 | 4.50k | ss_128[1] = _mm_unpacklo_epi16(src23, src34); |
980 | 4.50k | ss_128[2] = _mm_unpacklo_epi16(src45, src56); |
981 | | |
982 | 4.50k | y = h; |
983 | 18.0k | do { |
984 | 18.0k | const __m128i res = |
985 | 18.0k | xy_y_convolve_8tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
986 | 18.0k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
987 | 18.0k | im += 2 * 2; |
988 | 18.0k | dst += 2 * dst_stride; |
989 | 18.0k | y -= 2; |
990 | 18.0k | } while (y); |
991 | 60.6k | } else { |
992 | 60.6k | __m256i coeffs_256[4]; |
993 | | |
994 | 60.6k | prepare_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
995 | | |
996 | 60.6k | if (w == 4) { |
997 | 14.2k | __m128i s_64[8]; |
998 | 14.2k | __m256i s_256[8], ss_256[4]; |
999 | | |
1000 | 14.2k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
1001 | 14.2k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
1002 | 14.2k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
1003 | 14.2k | s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4)); |
1004 | 14.2k | s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4)); |
1005 | 14.2k | s_64[5] = _mm_loadl_epi64((__m128i *)(im + 5 * 4)); |
1006 | 14.2k | s_64[6] = _mm_loadl_epi64((__m128i *)(im + 6 * 4)); |
1007 | | |
1008 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
1009 | 14.2k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
1010 | 14.2k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
1011 | 14.2k | s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]); |
1012 | 14.2k | s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]); |
1013 | 14.2k | s_256[4] = _mm256_setr_m128i(s_64[4], s_64[5]); |
1014 | 14.2k | s_256[5] = _mm256_setr_m128i(s_64[5], s_64[6]); |
1015 | | |
1016 | 14.2k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
1017 | 14.2k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
1018 | 14.2k | ss_256[2] = _mm256_unpacklo_epi16(s_256[4], s_256[5]); |
1019 | | |
1020 | 14.2k | y = h; |
1021 | 74.7k | do { |
1022 | 74.7k | const __m256i res = |
1023 | 74.7k | xy_y_convolve_8tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
1024 | 74.7k | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
1025 | 74.7k | im += 2 * 4; |
1026 | 74.7k | dst += 2 * dst_stride; |
1027 | 74.7k | y -= 2; |
1028 | 74.7k | } while (y); |
1029 | 46.4k | } else if (w == 8) { |
1030 | 13.8k | __m256i s_256[8], r[2]; |
1031 | | |
1032 | 13.8k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
1033 | 13.8k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
1034 | 13.8k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8)); |
1035 | 13.8k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8)); |
1036 | 13.8k | s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 8)); |
1037 | 13.8k | s_256[5] = _mm256_loadu_si256((__m256i *)(im + 5 * 8)); |
1038 | 13.8k | y = h; |
1039 | | |
1040 | 13.8k | if (subpel_y_q4 != 8) { |
1041 | 9.14k | __m256i ss_256[8]; |
1042 | | |
1043 | 9.14k | convolve_8tap_unpack_avx2(s_256, ss_256); |
1044 | | |
1045 | 50.5k | do { |
1046 | 50.5k | xy_y_convolve_8tap_8x2_avx2(im, ss_256, coeffs_256, r); |
1047 | 50.5k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
1048 | 50.5k | im += 2 * 8; |
1049 | 50.5k | dst += 2 * dst_stride; |
1050 | 50.5k | y -= 2; |
1051 | 50.5k | } while (y); |
1052 | 9.14k | } else { |
1053 | 28.2k | do { |
1054 | 28.2k | xy_y_convolve_8tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
1055 | 28.2k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
1056 | 28.2k | im += 2 * 8; |
1057 | 28.2k | dst += 2 * dst_stride; |
1058 | 28.2k | y -= 2; |
1059 | 28.2k | } while (y); |
1060 | 4.68k | } |
1061 | 32.6k | } else if (w == 16) { |
1062 | 9.18k | __m256i s_256[8], r[4]; |
1063 | | |
1064 | 9.18k | load_16bit_7rows_avx2(im, 16, s_256); |
1065 | 9.18k | y = h; |
1066 | | |
1067 | 9.18k | if (subpel_y_q4 != 8) { |
1068 | 5.70k | __m256i ss_256[8], tt_256[8]; |
1069 | | |
1070 | 5.70k | convolve_8tap_unpack_avx2(s_256, ss_256); |
1071 | 5.70k | convolve_8tap_unpack_avx2(s_256 + 1, tt_256); |
1072 | | |
1073 | 42.3k | do { |
1074 | 42.3k | xy_y_convolve_8tap_16x2_avx2(im, 16, coeffs_256, s_256, ss_256, |
1075 | 42.3k | tt_256, r); |
1076 | 42.3k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
1077 | | |
1078 | 42.3k | im += 2 * 16; |
1079 | 42.3k | dst += 2 * dst_stride; |
1080 | 42.3k | y -= 2; |
1081 | 42.3k | } while (y); |
1082 | 5.70k | } else { |
1083 | 24.8k | do { |
1084 | 24.8k | xy_y_convolve_8tap_16x2_half_pel_avx2(im, 16, coeffs_256, s_256, r); |
1085 | 24.8k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
1086 | | |
1087 | 24.8k | im += 2 * 16; |
1088 | 24.8k | dst += 2 * dst_stride; |
1089 | 24.8k | y -= 2; |
1090 | 24.8k | } while (y); |
1091 | 3.48k | } |
1092 | 23.4k | } else { |
1093 | 23.4k | int32_t x = 0; |
1094 | 23.4k | __m256i s_256[2][8], r0[4], r1[4]; |
1095 | | |
1096 | 23.4k | assert(!(w % 32)); |
1097 | | |
1098 | 23.4k | __m256i ss_256[2][8], tt_256[2][8]; |
1099 | | |
1100 | 31.5k | do { |
1101 | 31.5k | const int16_t *s = im + x; |
1102 | 31.5k | uint8_t *d = dst + x; |
1103 | | |
1104 | 31.5k | load_16bit_7rows_avx2(s, w, s_256[0]); |
1105 | 31.5k | convolve_8tap_unpack_avx2(s_256[0], ss_256[0]); |
1106 | 31.5k | convolve_8tap_unpack_avx2(s_256[0] + 1, tt_256[0]); |
1107 | | |
1108 | 31.5k | load_16bit_7rows_avx2(s + 16, w, s_256[1]); |
1109 | 31.5k | convolve_8tap_unpack_avx2(s_256[1], ss_256[1]); |
1110 | 31.5k | convolve_8tap_unpack_avx2(s_256[1] + 1, tt_256[1]); |
1111 | | |
1112 | 31.5k | y = h; |
1113 | 460k | do { |
1114 | 460k | xy_y_convolve_8tap_16x2_avx2(s, w, coeffs_256, s_256[0], ss_256[0], |
1115 | 460k | tt_256[0], r0); |
1116 | 460k | xy_y_convolve_8tap_16x2_avx2(s + 16, w, coeffs_256, s_256[1], |
1117 | 460k | ss_256[1], tt_256[1], r1); |
1118 | 460k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
1119 | 460k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
1120 | | |
1121 | 460k | s += 2 * w; |
1122 | 460k | d += 2 * dst_stride; |
1123 | 460k | y -= 2; |
1124 | 460k | } while (y); |
1125 | | |
1126 | 31.5k | x += 32; |
1127 | 31.5k | } while (x < w); |
1128 | 23.4k | } |
1129 | 60.6k | } |
1130 | 65.1k | } |
1131 | | |
1132 | | typedef void (*Convolve2dSrHorTapFunc)( |
1133 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
1134 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
1135 | | const int32_t subpel_x_q4, int16_t *const im_block); |
1136 | | |
1137 | | typedef void (*Convolve2dSrVerTapFunc)( |
1138 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
1139 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
1140 | | uint8_t *dst, const int32_t dst_stride); |
1141 | | |
1142 | | static AOM_FORCE_INLINE void av1_convolve_2d_sr_specialized_avx2( |
1143 | | const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, |
1144 | | int32_t w, int32_t h, const InterpFilterParams *filter_params_x, |
1145 | | const InterpFilterParams *filter_params_y, const int32_t subpel_x_q4, |
1146 | 1.94M | const int32_t subpel_y_q4, ConvolveParams *conv_params) { |
1147 | 1.94M | static const Convolve2dSrHorTapFunc |
1148 | 1.94M | convolve_2d_sr_hor_tap_func_table[MAX_FILTER_TAP + 1] = { |
1149 | 1.94M | NULL, |
1150 | 1.94M | NULL, |
1151 | 1.94M | convolve_2d_sr_hor_2tap_avx2, |
1152 | 1.94M | NULL, |
1153 | 1.94M | convolve_2d_sr_hor_4tap_ssse3, |
1154 | 1.94M | NULL, |
1155 | 1.94M | convolve_2d_sr_hor_6tap_avx2, |
1156 | 1.94M | NULL, |
1157 | 1.94M | convolve_2d_sr_hor_8tap_avx2 |
1158 | 1.94M | }; |
1159 | 1.94M | static const Convolve2dSrVerTapFunc |
1160 | 1.94M | convolve_2d_sr_ver_tap_func_table[MAX_FILTER_TAP + 1] = { |
1161 | 1.94M | NULL, |
1162 | 1.94M | convolve_2d_sr_ver_2tap_half_avx2, |
1163 | 1.94M | convolve_2d_sr_ver_2tap_avx2, |
1164 | 1.94M | convolve_2d_sr_ver_4tap_avx2, |
1165 | 1.94M | convolve_2d_sr_ver_4tap_avx2, |
1166 | 1.94M | convolve_2d_sr_ver_6tap_avx2, |
1167 | 1.94M | convolve_2d_sr_ver_6tap_avx2, |
1168 | 1.94M | convolve_2d_sr_ver_8tap_avx2, |
1169 | 1.94M | convolve_2d_sr_ver_8tap_avx2 |
1170 | 1.94M | }; |
1171 | 1.94M | const int32_t tap_x = get_filter_tap(filter_params_x, subpel_x_q4); |
1172 | 1.94M | const int32_t tap_y = get_filter_tap(filter_params_y, subpel_y_q4); |
1173 | | |
1174 | 1.94M | assert(tap_x != 12 && tap_y != 12); |
1175 | | |
1176 | 1.94M | const uint8_t *src_ptr = src - ((tap_y >> 1) - 1) * src_stride; |
1177 | | // Note: im_block is 8-pixel interlaced for width 32 and up, to avoid data |
1178 | | // permutation. |
1179 | 1.94M | DECLARE_ALIGNED(32, int16_t, |
1180 | 1.94M | im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); |
1181 | | |
1182 | 1.94M | (void)conv_params; |
1183 | | |
1184 | 1.94M | assert(conv_params->round_0 == 3); |
1185 | 1.94M | assert(conv_params->round_1 == 11); |
1186 | | |
1187 | | // horizontal filter |
1188 | 1.94M | int32_t hh = h + tap_y; |
1189 | 1.94M | assert(!(hh % 2)); |
1190 | | |
1191 | 1.94M | convolve_2d_sr_hor_tap_func_table[tap_x]( |
1192 | 1.94M | src_ptr, src_stride, w, hh, filter_params_x, subpel_x_q4, im_block); |
1193 | | |
1194 | | // vertical filter |
1195 | 1.94M | convolve_2d_sr_ver_tap_func_table[tap_y - (subpel_y_q4 == 8)]( |
1196 | 1.94M | im_block, w, h, filter_params_y, subpel_y_q4, dst, dst_stride); |
1197 | 1.94M | } |
1198 | | |
1199 | | #endif // THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |