/src/aom/third_party/SVT-AV1/convolve_2d_avx2.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #ifndef THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |
13 | | #define THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |
14 | | |
15 | | #include "convolve_avx2.h" |
16 | | |
17 | | static void convolve_2d_sr_hor_2tap_avx2( |
18 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
19 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
20 | 85.6k | const int32_t subpel_x_q4, int16_t *const im_block) { |
21 | 85.6k | const uint8_t *src_ptr = src; |
22 | 85.6k | int32_t y = h; |
23 | 85.6k | int16_t *im = im_block; |
24 | | |
25 | 85.6k | if (w <= 8) { |
26 | 70.1k | __m128i coeffs_128; |
27 | | |
28 | 70.1k | prepare_half_coeffs_2tap_ssse3(filter_params_x, subpel_x_q4, &coeffs_128); |
29 | | |
30 | 70.1k | if (w == 2) { |
31 | 33.6k | do { |
32 | 33.6k | const __m128i r = |
33 | 33.6k | x_convolve_2tap_2x2_sse4_1(src_ptr, src_stride, &coeffs_128); |
34 | 33.6k | xy_x_round_store_2x2_sse2(r, im); |
35 | 33.6k | src_ptr += 2 * src_stride; |
36 | 33.6k | im += 2 * 2; |
37 | 33.6k | y -= 2; |
38 | 33.6k | } while (y); |
39 | 59.5k | } else if (w == 4) { |
40 | 140k | do { |
41 | 140k | const __m128i r = |
42 | 140k | x_convolve_2tap_4x2_ssse3(src_ptr, src_stride, &coeffs_128); |
43 | 140k | xy_x_round_store_4x2_sse2(r, im); |
44 | 140k | src_ptr += 2 * src_stride; |
45 | 140k | im += 2 * 4; |
46 | 140k | y -= 2; |
47 | 140k | } while (y); |
48 | 32.7k | } else { |
49 | 26.8k | assert(w == 8); |
50 | | |
51 | 141k | do { |
52 | 141k | __m128i r[2]; |
53 | | |
54 | 141k | x_convolve_2tap_8x2_ssse3(src_ptr, src_stride, &coeffs_128, r); |
55 | 141k | xy_x_round_store_8x2_sse2(r, im); |
56 | 141k | src_ptr += 2 * src_stride; |
57 | 141k | im += 2 * 8; |
58 | 141k | y -= 2; |
59 | 141k | } while (y); |
60 | 26.8k | } |
61 | 70.1k | } else { |
62 | 15.4k | __m256i coeffs_256; |
63 | | |
64 | 15.4k | prepare_half_coeffs_2tap_avx2(filter_params_x, subpel_x_q4, &coeffs_256); |
65 | | |
66 | 15.4k | if (w == 16) { |
67 | 79.8k | do { |
68 | 79.8k | __m256i r[2]; |
69 | | |
70 | 79.8k | x_convolve_2tap_16x2_avx2(src_ptr, src_stride, &coeffs_256, r); |
71 | 79.8k | xy_x_round_store_32_avx2(r, im); |
72 | 79.8k | src_ptr += 2 * src_stride; |
73 | 79.8k | im += 2 * 16; |
74 | 79.8k | y -= 2; |
75 | 79.8k | } while (y); |
76 | 10.7k | } else if (w == 32) { |
77 | 60.9k | do { |
78 | 60.9k | xy_x_2tap_32_avx2(src_ptr, &coeffs_256, im); |
79 | 60.9k | src_ptr += src_stride; |
80 | 60.9k | im += 32; |
81 | 60.9k | } while (--y); |
82 | 2.78k | } else if (w == 64) { |
83 | 70.2k | do { |
84 | 70.2k | xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32); |
85 | 70.2k | xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32); |
86 | 70.2k | src_ptr += src_stride; |
87 | 70.2k | im += 64; |
88 | 70.2k | } while (--y); |
89 | 1.53k | } else { |
90 | 392 | assert(w == 128); |
91 | | |
92 | 34.3k | do { |
93 | 34.3k | xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32); |
94 | 34.3k | xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32); |
95 | 34.3k | xy_x_2tap_32_avx2(src_ptr + 2 * 32, &coeffs_256, im + 2 * 32); |
96 | 34.3k | xy_x_2tap_32_avx2(src_ptr + 3 * 32, &coeffs_256, im + 3 * 32); |
97 | 34.3k | src_ptr += src_stride; |
98 | 34.3k | im += 128; |
99 | 34.3k | } while (--y); |
100 | 392 | } |
101 | 15.4k | } |
102 | 85.6k | } |
103 | | |
104 | | static void convolve_2d_sr_hor_4tap_ssse3( |
105 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
106 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
107 | 1.88M | const int32_t subpel_x_q4, int16_t *const im_block) { |
108 | 1.88M | const uint8_t *src_ptr = src - 1; |
109 | 1.88M | int32_t y = h; |
110 | 1.88M | int16_t *im = im_block; |
111 | | |
112 | 1.88M | if (w <= 4) { |
113 | 1.75M | __m128i coeffs_128[2]; |
114 | | |
115 | 1.75M | prepare_half_coeffs_4tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128); |
116 | 1.75M | if (w == 2) { |
117 | 1.87M | do { |
118 | 1.87M | const __m128i r = |
119 | 1.87M | x_convolve_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128); |
120 | 1.87M | xy_x_round_store_2x2_sse2(r, im); |
121 | 1.87M | src_ptr += 2 * src_stride; |
122 | 1.87M | im += 2 * 2; |
123 | 1.87M | y -= 2; |
124 | 1.87M | } while (y); |
125 | 1.37M | } else if (w == 4) { |
126 | 7.84M | do { |
127 | 7.84M | const __m128i r = |
128 | 7.84M | x_convolve_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128); |
129 | 7.84M | xy_x_round_store_4x2_sse2(r, im); |
130 | 7.84M | src_ptr += 2 * src_stride; |
131 | 7.84M | im += 2 * 4; |
132 | 7.84M | y -= 2; |
133 | 7.84M | } while (y); |
134 | 1.37M | } |
135 | 1.75M | } else { |
136 | | // TODO(chiyotsai@google.com): Add better optimization |
137 | 122k | __m256i coeffs_256[2], filt_256[2]; |
138 | | |
139 | 122k | prepare_half_coeffs_4tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
140 | 122k | filt_256[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2); |
141 | 122k | filt_256[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2); |
142 | | |
143 | 122k | if (w == 8) { |
144 | 438k | do { |
145 | 438k | __m256i res = |
146 | 438k | x_convolve_4tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
147 | 438k | xy_x_round_store_8x2_avx2(res, im); |
148 | | |
149 | 438k | src_ptr += 2 * src_stride; |
150 | 438k | im += 2 * 8; |
151 | 438k | y -= 2; |
152 | 438k | } while (y); |
153 | 74.9k | } else if (w == 16) { |
154 | 286k | do { |
155 | 286k | __m256i r[2]; |
156 | | |
157 | 286k | x_convolve_4tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
158 | 286k | xy_x_round_store_32_avx2(r, im); |
159 | 286k | src_ptr += 2 * src_stride; |
160 | 286k | im += 2 * 16; |
161 | 286k | y -= 2; |
162 | 286k | } while (y); |
163 | 34.7k | } else if (w == 32) { |
164 | 279k | do { |
165 | 279k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
166 | | |
167 | 279k | src_ptr += src_stride; |
168 | 279k | im += 32; |
169 | 279k | } while (--y); |
170 | 10.0k | } else if (w == 64) { |
171 | 130k | do { |
172 | 130k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
173 | 130k | xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
174 | 130k | src_ptr += src_stride; |
175 | 130k | im += 64; |
176 | 130k | } while (--y); |
177 | 2.57k | } else { |
178 | 243 | assert(w == 128); |
179 | | |
180 | 33.3k | do { |
181 | 33.3k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
182 | 33.3k | xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
183 | 33.3k | xy_x_4tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
184 | 33.3k | xy_x_4tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
185 | 33.3k | src_ptr += src_stride; |
186 | 33.3k | im += 128; |
187 | 33.3k | } while (--y); |
188 | 243 | } |
189 | 122k | } |
190 | 1.88M | } |
191 | | |
192 | | static void convolve_2d_sr_hor_6tap_avx2( |
193 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
194 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
195 | 1.81M | const int32_t subpel_x_q4, int16_t *const im_block) { |
196 | 1.81M | const uint8_t *src_ptr = src - 2; |
197 | 1.81M | int32_t y = h; |
198 | 1.81M | int16_t *im = im_block; |
199 | | |
200 | 1.81M | if (w <= 4) { |
201 | 0 | __m128i coeffs_128[3]; |
202 | |
|
203 | 0 | prepare_half_coeffs_6tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128); |
204 | 0 | if (w == 2) { |
205 | 0 | do { |
206 | 0 | const __m128i r = |
207 | 0 | x_convolve_6tap_2x2_ssse3(src_ptr, src_stride, coeffs_128); |
208 | 0 | xy_x_round_store_2x2_sse2(r, im); |
209 | 0 | src_ptr += 2 * src_stride; |
210 | 0 | im += 2 * 2; |
211 | 0 | y -= 2; |
212 | 0 | } while (y); |
213 | 0 | } else if (w == 4) { |
214 | 0 | do { |
215 | 0 | const __m128i r = |
216 | 0 | x_convolve_6tap_4x2_ssse3(src_ptr, src_stride, coeffs_128); |
217 | 0 | xy_x_round_store_4x2_sse2(r, im); |
218 | 0 | src_ptr += 2 * src_stride; |
219 | 0 | im += 2 * 4; |
220 | 0 | y -= 2; |
221 | 0 | } while (y); |
222 | 0 | } |
223 | 1.81M | } else { |
224 | 1.81M | __m256i coeffs_256[3], filt_256[3]; |
225 | | |
226 | 1.81M | filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2); |
227 | 1.81M | filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2); |
228 | 1.81M | filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2); |
229 | | |
230 | 1.81M | prepare_half_coeffs_6tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
231 | | |
232 | 1.81M | if (w == 8) { |
233 | 7.32M | do { |
234 | 7.32M | const __m256i res = |
235 | 7.32M | x_convolve_6tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
236 | 7.32M | xy_x_round_store_8x2_avx2(res, im); |
237 | | |
238 | 7.32M | src_ptr += 2 * src_stride; |
239 | 7.32M | im += 2 * 8; |
240 | 7.32M | y -= 2; |
241 | 7.32M | } while (y); |
242 | 1.14M | } else if (w == 16) { |
243 | 4.29M | do { |
244 | 4.29M | __m256i r[2]; |
245 | | |
246 | 4.29M | x_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
247 | 4.29M | xy_x_round_store_32_avx2(r, im); |
248 | 4.29M | src_ptr += 2 * src_stride; |
249 | 4.29M | im += 2 * 16; |
250 | 4.29M | y -= 2; |
251 | 4.29M | } while (y); |
252 | 527k | } else if (w == 32) { |
253 | 3.43M | do { |
254 | 3.43M | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
255 | 3.43M | src_ptr += src_stride; |
256 | 3.43M | im += 32; |
257 | 3.43M | } while (--y); |
258 | 123k | } else if (w == 64) { |
259 | 1.29M | do { |
260 | 1.29M | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
261 | 1.29M | xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
262 | 1.29M | src_ptr += src_stride; |
263 | 1.29M | im += 64; |
264 | 1.29M | } while (--y); |
265 | 23.5k | } else { |
266 | 467 | assert(w == 128); |
267 | | |
268 | 251k | do { |
269 | 251k | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
270 | 251k | xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
271 | 251k | xy_x_6tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
272 | 251k | xy_x_6tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
273 | 251k | src_ptr += src_stride; |
274 | 251k | im += 128; |
275 | 251k | } while (--y); |
276 | 467 | } |
277 | 1.81M | } |
278 | 1.81M | } |
279 | | |
280 | | static void convolve_2d_sr_hor_8tap_avx2( |
281 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
282 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
283 | 94.7k | const int32_t subpel_x_q4, int16_t *const im_block) { |
284 | 94.7k | const uint8_t *src_ptr = src - 3; |
285 | 94.7k | int32_t y = h; |
286 | 94.7k | int16_t *im = im_block; |
287 | 94.7k | __m256i coeffs_256[4], filt_256[4]; |
288 | | |
289 | 94.7k | filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2); |
290 | 94.7k | filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2); |
291 | 94.7k | filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2); |
292 | 94.7k | filt_256[3] = _mm256_loadu_si256((__m256i const *)filt4_global_avx2); |
293 | | |
294 | 94.7k | prepare_half_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
295 | | |
296 | 94.7k | if (w == 8) { |
297 | 361k | do { |
298 | 361k | const __m256i res = |
299 | 361k | x_convolve_8tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
300 | 361k | xy_x_round_store_8x2_avx2(res, im); |
301 | 361k | src_ptr += 2 * src_stride; |
302 | 361k | im += 2 * 8; |
303 | 361k | y -= 2; |
304 | 361k | } while (y); |
305 | 52.5k | } else if (w == 16) { |
306 | 213k | do { |
307 | 213k | __m256i r[2]; |
308 | | |
309 | 213k | x_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
310 | 213k | xy_x_round_store_32_avx2(r, im); |
311 | 213k | src_ptr += 2 * src_stride; |
312 | 213k | im += 2 * 16; |
313 | 213k | y -= 2; |
314 | 213k | } while (y); |
315 | 23.0k | } else if (w == 32) { |
316 | 360k | do { |
317 | 360k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
318 | 360k | src_ptr += src_stride; |
319 | 360k | im += 32; |
320 | 360k | } while (--y); |
321 | 13.3k | } else if (w == 64) { |
322 | 248k | do { |
323 | 248k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
324 | 248k | xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
325 | 248k | src_ptr += src_stride; |
326 | 248k | im += 64; |
327 | 248k | } while (--y); |
328 | 5.48k | } else { |
329 | 343 | assert(w == 128); |
330 | | |
331 | 36.8k | do { |
332 | 36.8k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
333 | 36.8k | xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
334 | 36.8k | xy_x_8tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
335 | 36.8k | xy_x_8tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
336 | 36.8k | src_ptr += src_stride; |
337 | 36.8k | im += 128; |
338 | 36.8k | } while (--y); |
339 | 343 | } |
340 | 94.7k | } |
341 | | |
342 | | static void convolve_2d_sr_ver_2tap_avx2( |
343 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
344 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
345 | 69.7k | uint8_t *dst, const int32_t dst_stride) { |
346 | 69.7k | const int16_t *im = im_block; |
347 | 69.7k | int32_t y = h; |
348 | | |
349 | 69.7k | if (w <= 4) { |
350 | 36.2k | __m128i coeffs_128; |
351 | | |
352 | 36.2k | prepare_coeffs_2tap_sse2(filter_params_y, subpel_y_q4, &coeffs_128); |
353 | | |
354 | 36.2k | if (w == 2) { |
355 | 8.43k | __m128i s_32[2]; |
356 | | |
357 | 8.43k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im); |
358 | | |
359 | 18.1k | do { |
360 | 18.1k | const __m128i res = xy_y_convolve_2tap_2x2_sse2(im, s_32, &coeffs_128); |
361 | 18.1k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
362 | 18.1k | im += 2 * 2; |
363 | 18.1k | dst += 2 * dst_stride; |
364 | 18.1k | y -= 2; |
365 | 18.1k | } while (y); |
366 | 27.8k | } else { |
367 | 27.8k | __m128i s_64[2], r[2]; |
368 | | |
369 | 27.8k | assert(w == 4); |
370 | | |
371 | 0 | s_64[0] = _mm_loadl_epi64((__m128i *)im); |
372 | | |
373 | 92.4k | do { |
374 | 92.4k | xy_y_convolve_2tap_4x2_sse2(im, s_64, &coeffs_128, r); |
375 | 92.4k | r[0] = xy_y_round_sse2(r[0]); |
376 | 92.4k | r[1] = xy_y_round_sse2(r[1]); |
377 | 92.4k | const __m128i rr = _mm_packs_epi32(r[0], r[1]); |
378 | 92.4k | pack_store_4x2_sse2(rr, dst, dst_stride); |
379 | 92.4k | im += 2 * 4; |
380 | 92.4k | dst += 2 * dst_stride; |
381 | 92.4k | y -= 2; |
382 | 92.4k | } while (y); |
383 | 27.8k | } |
384 | 36.2k | } else { |
385 | 33.4k | __m256i coeffs_256; |
386 | | |
387 | 33.4k | prepare_coeffs_2tap_avx2(filter_params_y, subpel_y_q4, &coeffs_256); |
388 | | |
389 | 33.4k | if (w == 8) { |
390 | 21.9k | __m128i s_128[2]; |
391 | 21.9k | __m256i r[2]; |
392 | | |
393 | 21.9k | s_128[0] = _mm_loadu_si128((__m128i *)im); |
394 | | |
395 | 91.1k | do { |
396 | 91.1k | xy_y_convolve_2tap_8x2_avx2(im, s_128, &coeffs_256, r); |
397 | 91.1k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
398 | 91.1k | im += 2 * 8; |
399 | 91.1k | dst += 2 * dst_stride; |
400 | 91.1k | y -= 2; |
401 | 91.1k | } while (y); |
402 | 21.9k | } else if (w == 16) { |
403 | 8.22k | __m256i s_256[2], r[4]; |
404 | | |
405 | 8.22k | s_256[0] = _mm256_loadu_si256((__m256i *)im); |
406 | | |
407 | 48.2k | do { |
408 | 48.2k | xy_y_convolve_2tap_16x2_avx2(im, s_256, &coeffs_256, r); |
409 | 48.2k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
410 | 48.2k | im += 2 * 16; |
411 | 48.2k | dst += 2 * dst_stride; |
412 | 48.2k | y -= 2; |
413 | 48.2k | } while (y); |
414 | 8.22k | } else if (w == 32) { |
415 | 1.94k | __m256i s_256[2][2]; |
416 | | |
417 | 1.94k | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
418 | 1.94k | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
419 | | |
420 | 19.3k | do { |
421 | 19.3k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[0], s_256[1], &coeffs_256, |
422 | 19.3k | dst); |
423 | 19.3k | im += 2 * 32; |
424 | 19.3k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1], s_256[0], &coeffs_256, |
425 | 19.3k | dst + dst_stride); |
426 | 19.3k | dst += 2 * dst_stride; |
427 | 19.3k | y -= 2; |
428 | 19.3k | } while (y); |
429 | 1.94k | } else if (w == 64) { |
430 | 1.09k | __m256i s_256[2][4]; |
431 | | |
432 | 1.09k | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
433 | 1.09k | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
434 | 1.09k | s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
435 | 1.09k | s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
436 | | |
437 | 23.8k | do { |
438 | 23.8k | xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[0] + 0, s_256[1] + 0, |
439 | 23.8k | &coeffs_256, dst); |
440 | 23.8k | xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[0] + 2, s_256[1] + 2, |
441 | 23.8k | &coeffs_256, dst + 32); |
442 | 23.8k | im += 2 * 64; |
443 | 23.8k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
444 | 23.8k | &coeffs_256, dst + dst_stride); |
445 | 23.8k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2, |
446 | 23.8k | &coeffs_256, dst + dst_stride + 32); |
447 | 23.8k | dst += 2 * dst_stride; |
448 | 23.8k | y -= 2; |
449 | 23.8k | } while (y); |
450 | 1.09k | } else { |
451 | 217 | __m256i s_256[2][8]; |
452 | | |
453 | 217 | assert(w == 128); |
454 | | |
455 | 0 | load_16bit_8rows_avx2(im, 16, s_256[0]); |
456 | | |
457 | 9.12k | do { |
458 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 128, s_256[0] + 0, s_256[1] + 0, |
459 | 9.12k | &coeffs_256, dst); |
460 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 160, s_256[0] + 2, s_256[1] + 2, |
461 | 9.12k | &coeffs_256, dst + 1 * 32); |
462 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 192, s_256[0] + 4, s_256[1] + 4, |
463 | 9.12k | &coeffs_256, dst + 2 * 32); |
464 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 224, s_256[0] + 6, s_256[1] + 6, |
465 | 9.12k | &coeffs_256, dst + 3 * 32); |
466 | 9.12k | im += 2 * 128; |
467 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
468 | 9.12k | &coeffs_256, dst + dst_stride); |
469 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2, |
470 | 9.12k | &coeffs_256, dst + dst_stride + 1 * 32); |
471 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[1] + 4, s_256[0] + 4, |
472 | 9.12k | &coeffs_256, dst + dst_stride + 2 * 32); |
473 | 9.12k | xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[1] + 6, s_256[0] + 6, |
474 | 9.12k | &coeffs_256, dst + dst_stride + 3 * 32); |
475 | 9.12k | dst += 2 * dst_stride; |
476 | 9.12k | y -= 2; |
477 | 9.12k | } while (y); |
478 | 217 | } |
479 | 33.4k | } |
480 | 69.7k | } |
481 | | |
482 | | static void convolve_2d_sr_ver_2tap_half_avx2( |
483 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
484 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
485 | 15.9k | uint8_t *dst, const int32_t dst_stride) { |
486 | 15.9k | const int16_t *im = im_block; |
487 | 15.9k | int32_t y = h; |
488 | | |
489 | 15.9k | (void)filter_params_y; |
490 | 15.9k | (void)subpel_y_q4; |
491 | | |
492 | 15.9k | if (w == 2) { |
493 | 2.14k | __m128i s_32[2]; |
494 | | |
495 | 2.14k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im); |
496 | | |
497 | 4.84k | do { |
498 | 4.84k | const __m128i res = xy_y_convolve_2tap_2x2_half_pel_sse2(im, s_32); |
499 | 4.84k | const __m128i r = xy_y_round_half_pel_sse2(res); |
500 | 4.84k | pack_store_2x2_sse2(r, dst, dst_stride); |
501 | 4.84k | im += 2 * 2; |
502 | 4.84k | dst += 2 * dst_stride; |
503 | 4.84k | y -= 2; |
504 | 4.84k | } while (y); |
505 | 13.7k | } else if (w == 4) { |
506 | 4.92k | __m128i s_64[2]; |
507 | | |
508 | 4.92k | s_64[0] = _mm_loadl_epi64((__m128i *)im); |
509 | | |
510 | 15.6k | do { |
511 | 15.6k | const __m128i res = xy_y_convolve_2tap_4x2_half_pel_sse2(im, s_64); |
512 | 15.6k | const __m128i r = xy_y_round_half_pel_sse2(res); |
513 | 15.6k | pack_store_4x2_sse2(r, dst, dst_stride); |
514 | 15.6k | im += 2 * 4; |
515 | 15.6k | dst += 2 * dst_stride; |
516 | 15.6k | y -= 2; |
517 | 15.6k | } while (y); |
518 | 8.86k | } else if (w == 8) { |
519 | 4.85k | __m128i s_128[2]; |
520 | | |
521 | 4.85k | s_128[0] = _mm_loadu_si128((__m128i *)im); |
522 | | |
523 | 23.3k | do { |
524 | 23.3k | const __m256i res = xy_y_convolve_2tap_8x2_half_pel_avx2(im, s_128); |
525 | 23.3k | const __m256i r = xy_y_round_half_pel_avx2(res); |
526 | 23.3k | pack_store_8x2_avx2(r, dst, dst_stride); |
527 | 23.3k | im += 2 * 8; |
528 | 23.3k | dst += 2 * dst_stride; |
529 | 23.3k | y -= 2; |
530 | 23.3k | } while (y); |
531 | 4.85k | } else if (w == 16) { |
532 | 2.55k | __m256i s_256[2], r[2]; |
533 | | |
534 | 2.55k | s_256[0] = _mm256_loadu_si256((__m256i *)im); |
535 | | |
536 | 20.8k | do { |
537 | 20.8k | xy_y_convolve_2tap_16x2_half_pel_avx2(im, s_256, r); |
538 | 20.8k | r[0] = xy_y_round_half_pel_avx2(r[0]); |
539 | 20.8k | r[1] = xy_y_round_half_pel_avx2(r[1]); |
540 | 20.8k | xy_y_pack_store_16x2_avx2(r[0], r[1], dst, dst_stride); |
541 | 20.8k | im += 2 * 16; |
542 | 20.8k | dst += 2 * dst_stride; |
543 | 20.8k | y -= 2; |
544 | 20.8k | } while (y); |
545 | 2.55k | } else if (w == 32) { |
546 | 835 | __m256i s_256[2][2]; |
547 | | |
548 | 835 | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
549 | 835 | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
550 | | |
551 | 8.31k | do { |
552 | 8.31k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 32, s_256[0], s_256[1], dst); |
553 | 8.31k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 2 * 32, s_256[1], s_256[0], |
554 | 8.31k | dst + dst_stride); |
555 | 8.31k | im += 2 * 32; |
556 | 8.31k | dst += 2 * dst_stride; |
557 | 8.31k | y -= 2; |
558 | 8.31k | } while (y); |
559 | 835 | } else if (w == 64) { |
560 | 441 | __m256i s_256[2][4]; |
561 | | |
562 | 441 | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
563 | 441 | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
564 | 441 | s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
565 | 441 | s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
566 | | |
567 | 9.76k | do { |
568 | 9.76k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 64, s_256[0] + 0, |
569 | 9.76k | s_256[1] + 0, dst); |
570 | 9.76k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 96, s_256[0] + 2, |
571 | 9.76k | s_256[1] + 2, dst + 32); |
572 | 9.76k | im += 2 * 64; |
573 | 9.76k | xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
574 | 9.76k | dst + dst_stride); |
575 | 9.76k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
576 | 9.76k | im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 32); |
577 | 9.76k | dst += 2 * dst_stride; |
578 | 9.76k | y -= 2; |
579 | 9.76k | } while (y); |
580 | 441 | } else { |
581 | 175 | __m256i s_256[2][8]; |
582 | | |
583 | 175 | assert(w == 128); |
584 | | |
585 | 0 | load_16bit_8rows_avx2(im, 16, s_256[0]); |
586 | | |
587 | 7.68k | do { |
588 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 128, s_256[0] + 0, |
589 | 7.68k | s_256[1] + 0, dst); |
590 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 160, s_256[0] + 2, |
591 | 7.68k | s_256[1] + 2, dst + 1 * 32); |
592 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 192, s_256[0] + 4, |
593 | 7.68k | s_256[1] + 4, dst + 2 * 32); |
594 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 224, s_256[0] + 6, |
595 | 7.68k | s_256[1] + 6, dst + 3 * 32); |
596 | 7.68k | im += 2 * 128; |
597 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
598 | 7.68k | dst + dst_stride); |
599 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
600 | 7.68k | im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 1 * 32); |
601 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
602 | 7.68k | im + 64, s_256[1] + 4, s_256[0] + 4, dst + dst_stride + 2 * 32); |
603 | 7.68k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
604 | 7.68k | im + 96, s_256[1] + 6, s_256[0] + 6, dst + dst_stride + 3 * 32); |
605 | 7.68k | dst += 2 * dst_stride; |
606 | 7.68k | y -= 2; |
607 | 7.68k | } while (y); |
608 | 175 | } |
609 | 15.9k | } |
610 | | |
611 | | static void convolve_2d_sr_ver_4tap_avx2( |
612 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
613 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
614 | 1.94M | uint8_t *dst, const int32_t dst_stride) { |
615 | 1.94M | const int16_t *im = im_block; |
616 | 1.94M | int32_t y = h; |
617 | | |
618 | 1.94M | if (w == 2) { |
619 | 251k | __m128i coeffs_128[2], s_32[4], ss_128[2]; |
620 | | |
621 | 251k | prepare_coeffs_4tap_sse2(filter_params_y, subpel_y_q4, coeffs_128); |
622 | | |
623 | 251k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
624 | 251k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
625 | 251k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
626 | | |
627 | 251k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
628 | 251k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
629 | | |
630 | 251k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
631 | | |
632 | 419k | do { |
633 | 419k | const __m128i res = |
634 | 419k | xy_y_convolve_4tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
635 | 419k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
636 | 419k | im += 2 * 2; |
637 | 419k | dst += 2 * dst_stride; |
638 | 419k | y -= 2; |
639 | 419k | } while (y); |
640 | 1.68M | } else { |
641 | 1.68M | __m256i coeffs_256[2]; |
642 | | |
643 | 1.68M | prepare_coeffs_4tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
644 | | |
645 | 1.68M | if (w == 4) { |
646 | 823k | __m128i s_64[4]; |
647 | 823k | __m256i s_256[2], ss_256[2]; |
648 | | |
649 | 823k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
650 | 823k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
651 | 823k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
652 | | |
653 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
654 | 823k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
655 | 823k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
656 | | |
657 | 823k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
658 | | |
659 | 1.60M | do { |
660 | 1.60M | const __m256i res = |
661 | 1.60M | xy_y_convolve_4tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
662 | 1.60M | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
663 | 1.60M | im += 2 * 4; |
664 | 1.60M | dst += 2 * dst_stride; |
665 | 1.60M | y -= 2; |
666 | 1.60M | } while (y); |
667 | 865k | } else if (w == 8) { |
668 | 639k | __m256i s_256[4], r[2]; |
669 | | |
670 | 639k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
671 | 639k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
672 | | |
673 | 639k | if (subpel_y_q4 != 8) { |
674 | 532k | __m256i ss_256[4]; |
675 | | |
676 | 532k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
677 | 532k | ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
678 | | |
679 | 1.07M | do { |
680 | 1.07M | xy_y_convolve_4tap_8x2_avx2(im, ss_256, coeffs_256, r); |
681 | 1.07M | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
682 | 1.07M | im += 2 * 8; |
683 | 1.07M | dst += 2 * dst_stride; |
684 | 1.07M | y -= 2; |
685 | 1.07M | } while (y); |
686 | 532k | } else { |
687 | 190k | do { |
688 | 190k | xy_y_convolve_4tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
689 | 190k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
690 | 190k | im += 2 * 8; |
691 | 190k | dst += 2 * dst_stride; |
692 | 190k | y -= 2; |
693 | 190k | } while (y); |
694 | 107k | } |
695 | 639k | } else if (w == 16) { |
696 | 209k | __m256i s_256[5]; |
697 | | |
698 | 209k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
699 | 209k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
700 | 209k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
701 | | |
702 | 209k | if (subpel_y_q4 != 8) { |
703 | 168k | __m256i ss_256[4], tt_256[4], r[4]; |
704 | | |
705 | 168k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
706 | 168k | ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
707 | | |
708 | 168k | tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]); |
709 | 168k | tt_256[2] = _mm256_unpackhi_epi16(s_256[1], s_256[2]); |
710 | | |
711 | 478k | do { |
712 | 478k | xy_y_convolve_4tap_16x2_avx2(im, s_256, ss_256, tt_256, coeffs_256, |
713 | 478k | r); |
714 | 478k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
715 | 478k | im += 2 * 16; |
716 | 478k | dst += 2 * dst_stride; |
717 | 478k | y -= 2; |
718 | 478k | } while (y); |
719 | 168k | } else { |
720 | 40.7k | __m256i r[4]; |
721 | | |
722 | 81.4k | do { |
723 | 81.4k | xy_y_convolve_4tap_16x2_half_pelavx2(im, s_256, coeffs_256, r); |
724 | 81.4k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
725 | 81.4k | im += 2 * 16; |
726 | 81.4k | dst += 2 * dst_stride; |
727 | 81.4k | y -= 2; |
728 | 81.4k | } while (y); |
729 | 40.7k | } |
730 | 209k | } else { |
731 | | /*It's a special condition for OBMC. A/c to Av1 spec 4-tap won't |
732 | | support for width(w)>16, but for OBMC while predicting above block |
733 | | it reduces size block to Wx(h/2), for example, if above block size |
734 | | is 32x8, we get block size as 32x4 for OBMC.*/ |
735 | 15.7k | int32_t x = 0; |
736 | | |
737 | 15.7k | assert(!(w % 32)); |
738 | | |
739 | 0 | __m256i s_256[2][4], ss_256[2][4], tt_256[2][4], r0[4], r1[4]; |
740 | 18.8k | do { |
741 | 18.8k | const int16_t *s = im + x; |
742 | 18.8k | uint8_t *d = dst + x; |
743 | | |
744 | 18.8k | loadu_unpack_16bit_3rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]); |
745 | 18.8k | loadu_unpack_16bit_3rows_avx2(s + 16, w, s_256[1], ss_256[1], |
746 | 18.8k | tt_256[1]); |
747 | | |
748 | 18.8k | y = h; |
749 | 283k | do { |
750 | 283k | xy_y_convolve_4tap_32x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0], |
751 | 283k | coeffs_256, r0); |
752 | 283k | xy_y_convolve_4tap_32x2_avx2(s + 16, w, s_256[1], ss_256[1], |
753 | 283k | tt_256[1], coeffs_256, r1); |
754 | | |
755 | 283k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
756 | 283k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
757 | | |
758 | 283k | s += 2 * w; |
759 | 283k | d += 2 * dst_stride; |
760 | 283k | y -= 2; |
761 | 283k | } while (y); |
762 | | |
763 | 18.8k | x += 32; |
764 | 18.8k | } while (x < w); |
765 | 15.7k | } |
766 | 1.68M | } |
767 | 1.94M | } |
768 | | |
769 | | static void convolve_2d_sr_ver_6tap_avx2( |
770 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
771 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
772 | 1.76M | uint8_t *dst, const int32_t dst_stride) { |
773 | 1.76M | const int16_t *im = im_block; |
774 | 1.76M | int32_t y; |
775 | | |
776 | 1.76M | if (w == 2) { |
777 | 128k | __m128i coeffs_128[3], s_32[6], ss_128[3]; |
778 | | |
779 | 128k | prepare_coeffs_6tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128); |
780 | | |
781 | 128k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
782 | 128k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
783 | 128k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
784 | 128k | s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2)); |
785 | 128k | s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2)); |
786 | | |
787 | 128k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
788 | 128k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
789 | 128k | const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]); |
790 | 128k | const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]); |
791 | | |
792 | 128k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
793 | 128k | ss_128[1] = _mm_unpacklo_epi16(src23, src34); |
794 | | |
795 | 128k | y = h; |
796 | 514k | do { |
797 | 514k | const __m128i res = |
798 | 514k | xy_y_convolve_6tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
799 | 514k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
800 | 514k | im += 2 * 2; |
801 | 514k | dst += 2 * dst_stride; |
802 | 514k | y -= 2; |
803 | 514k | } while (y); |
804 | 1.63M | } else { |
805 | 1.63M | __m256i coeffs_256[3]; |
806 | | |
807 | 1.63M | prepare_coeffs_6tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
808 | | |
809 | 1.63M | if (w == 4) { |
810 | 526k | __m128i s_64[6]; |
811 | 526k | __m256i s_256[6], ss_256[3]; |
812 | | |
813 | 526k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
814 | 526k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
815 | 526k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
816 | 526k | s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4)); |
817 | 526k | s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4)); |
818 | | |
819 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
820 | 526k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
821 | 526k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
822 | 526k | s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]); |
823 | 526k | s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]); |
824 | | |
825 | 526k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
826 | 526k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
827 | | |
828 | 526k | y = h; |
829 | 2.80M | do { |
830 | 2.80M | const __m256i res = |
831 | 2.80M | xy_y_convolve_6tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
832 | 2.80M | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
833 | 2.80M | im += 2 * 4; |
834 | 2.80M | dst += 2 * dst_stride; |
835 | 2.80M | y -= 2; |
836 | 2.80M | } while (y); |
837 | 1.11M | } else if (w == 8) { |
838 | 605k | __m256i s_256[6], r[2]; |
839 | | |
840 | 605k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
841 | 605k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
842 | 605k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8)); |
843 | 605k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8)); |
844 | 605k | y = h; |
845 | | |
846 | 605k | if (subpel_y_q4 != 8) { |
847 | 483k | __m256i ss_256[6]; |
848 | | |
849 | 483k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
850 | 483k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
851 | | |
852 | 483k | ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
853 | 483k | ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]); |
854 | | |
855 | 2.81M | do { |
856 | 2.81M | xy_y_convolve_6tap_8x2_avx2(im, ss_256, coeffs_256, r); |
857 | 2.81M | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
858 | 2.81M | im += 2 * 8; |
859 | 2.81M | dst += 2 * dst_stride; |
860 | 2.81M | y -= 2; |
861 | 2.81M | } while (y); |
862 | 483k | } else { |
863 | 701k | do { |
864 | 701k | xy_y_convolve_6tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
865 | 701k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
866 | 701k | im += 2 * 8; |
867 | 701k | dst += 2 * dst_stride; |
868 | 701k | y -= 2; |
869 | 701k | } while (y); |
870 | 121k | } |
871 | 605k | } else if (w == 16) { |
872 | 361k | __m256i s_256[6]; |
873 | | |
874 | 361k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
875 | 361k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
876 | 361k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
877 | 361k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
878 | 361k | s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 16)); |
879 | 361k | y = h; |
880 | | |
881 | 361k | if (subpel_y_q4 != 8) { |
882 | 281k | __m256i ss_256[6], tt_256[6], r[4]; |
883 | | |
884 | 281k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
885 | 281k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
886 | 281k | ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
887 | 281k | ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]); |
888 | | |
889 | 281k | tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]); |
890 | 281k | tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[4]); |
891 | 281k | tt_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]); |
892 | 281k | tt_256[4] = _mm256_unpackhi_epi16(s_256[3], s_256[4]); |
893 | | |
894 | 1.99M | do { |
895 | 1.99M | xy_y_convolve_6tap_16x2_avx2(im, 16, s_256, ss_256, tt_256, |
896 | 1.99M | coeffs_256, r); |
897 | 1.99M | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
898 | 1.99M | im += 2 * 16; |
899 | 1.99M | dst += 2 * dst_stride; |
900 | 1.99M | y -= 2; |
901 | 1.99M | } while (y); |
902 | 281k | } else { |
903 | 80.1k | __m256i ss_256[4], r[4]; |
904 | | |
905 | 567k | do { |
906 | 567k | xy_y_convolve_6tap_16x2_half_pel_avx2(im, 16, s_256, ss_256, |
907 | 567k | coeffs_256, r); |
908 | 567k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
909 | | |
910 | 567k | im += 2 * 16; |
911 | 567k | dst += 2 * dst_stride; |
912 | 567k | y -= 2; |
913 | 567k | } while (y); |
914 | 80.1k | } |
915 | 361k | } else { |
916 | 145k | int32_t x = 0; |
917 | | |
918 | 145k | assert(!(w % 32)); |
919 | | |
920 | 0 | __m256i s_256[2][6], ss_256[2][6], tt_256[2][6], r0[4], r1[4]; |
921 | | |
922 | 176k | do { |
923 | 176k | const int16_t *s = im + x; |
924 | 176k | uint8_t *d = dst + x; |
925 | | |
926 | 176k | loadu_unpack_16bit_5rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]); |
927 | 176k | loadu_unpack_16bit_5rows_avx2(s + 16, w, s_256[1], ss_256[1], |
928 | 176k | tt_256[1]); |
929 | | |
930 | 176k | y = h; |
931 | 2.99M | do { |
932 | 2.99M | xy_y_convolve_6tap_16x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0], |
933 | 2.99M | coeffs_256, r0); |
934 | 2.99M | xy_y_convolve_6tap_16x2_avx2(s + 16, w, s_256[1], ss_256[1], |
935 | 2.99M | tt_256[1], coeffs_256, r1); |
936 | | |
937 | 2.99M | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
938 | 2.99M | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
939 | | |
940 | 2.99M | s += 2 * w; |
941 | 2.99M | d += 2 * dst_stride; |
942 | 2.99M | y -= 2; |
943 | 2.99M | } while (y); |
944 | | |
945 | 176k | x += 32; |
946 | 176k | } while (x < w); |
947 | 145k | } |
948 | 1.63M | } |
949 | 1.76M | } |
950 | | |
951 | | static void convolve_2d_sr_ver_8tap_avx2( |
952 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
953 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
954 | 85.7k | uint8_t *dst, const int32_t dst_stride) { |
955 | 85.7k | const int16_t *im = im_block; |
956 | 85.7k | int32_t y; |
957 | | |
958 | 85.7k | if (w == 2) { |
959 | 6.97k | __m128i coeffs_128[4], s_32[8], ss_128[4]; |
960 | | |
961 | 6.97k | prepare_coeffs_8tap_sse2(filter_params_y, subpel_y_q4, coeffs_128); |
962 | | |
963 | 6.97k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
964 | 6.97k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
965 | 6.97k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
966 | 6.97k | s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2)); |
967 | 6.97k | s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2)); |
968 | 6.97k | s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(im + 5 * 2)); |
969 | 6.97k | s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(im + 6 * 2)); |
970 | | |
971 | 6.97k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
972 | 6.97k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
973 | 6.97k | const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]); |
974 | 6.97k | const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]); |
975 | 6.97k | const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]); |
976 | 6.97k | const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[6]); |
977 | | |
978 | 6.97k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
979 | 6.97k | ss_128[1] = _mm_unpacklo_epi16(src23, src34); |
980 | 6.97k | ss_128[2] = _mm_unpacklo_epi16(src45, src56); |
981 | | |
982 | 6.97k | y = h; |
983 | 27.9k | do { |
984 | 27.9k | const __m128i res = |
985 | 27.9k | xy_y_convolve_8tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
986 | 27.9k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
987 | 27.9k | im += 2 * 2; |
988 | 27.9k | dst += 2 * dst_stride; |
989 | 27.9k | y -= 2; |
990 | 27.9k | } while (y); |
991 | 78.7k | } else { |
992 | 78.7k | __m256i coeffs_256[4]; |
993 | | |
994 | 78.7k | prepare_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
995 | | |
996 | 78.7k | if (w == 4) { |
997 | 22.9k | __m128i s_64[8]; |
998 | 22.9k | __m256i s_256[8], ss_256[4]; |
999 | | |
1000 | 22.9k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
1001 | 22.9k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
1002 | 22.9k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
1003 | 22.9k | s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4)); |
1004 | 22.9k | s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4)); |
1005 | 22.9k | s_64[5] = _mm_loadl_epi64((__m128i *)(im + 5 * 4)); |
1006 | 22.9k | s_64[6] = _mm_loadl_epi64((__m128i *)(im + 6 * 4)); |
1007 | | |
1008 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
1009 | 22.9k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
1010 | 22.9k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
1011 | 22.9k | s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]); |
1012 | 22.9k | s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]); |
1013 | 22.9k | s_256[4] = _mm256_setr_m128i(s_64[4], s_64[5]); |
1014 | 22.9k | s_256[5] = _mm256_setr_m128i(s_64[5], s_64[6]); |
1015 | | |
1016 | 22.9k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
1017 | 22.9k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
1018 | 22.9k | ss_256[2] = _mm256_unpacklo_epi16(s_256[4], s_256[5]); |
1019 | | |
1020 | 22.9k | y = h; |
1021 | 126k | do { |
1022 | 126k | const __m256i res = |
1023 | 126k | xy_y_convolve_8tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
1024 | 126k | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
1025 | 126k | im += 2 * 4; |
1026 | 126k | dst += 2 * dst_stride; |
1027 | 126k | y -= 2; |
1028 | 126k | } while (y); |
1029 | 55.8k | } else if (w == 8) { |
1030 | 23.4k | __m256i s_256[8], r[2]; |
1031 | | |
1032 | 23.4k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
1033 | 23.4k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
1034 | 23.4k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8)); |
1035 | 23.4k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8)); |
1036 | 23.4k | s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 8)); |
1037 | 23.4k | s_256[5] = _mm256_loadu_si256((__m256i *)(im + 5 * 8)); |
1038 | 23.4k | y = h; |
1039 | | |
1040 | 23.4k | if (subpel_y_q4 != 8) { |
1041 | 17.6k | __m256i ss_256[8]; |
1042 | | |
1043 | 17.6k | convolve_8tap_unpack_avx2(s_256, ss_256); |
1044 | | |
1045 | 112k | do { |
1046 | 112k | xy_y_convolve_8tap_8x2_avx2(im, ss_256, coeffs_256, r); |
1047 | 112k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
1048 | 112k | im += 2 * 8; |
1049 | 112k | dst += 2 * dst_stride; |
1050 | 112k | y -= 2; |
1051 | 112k | } while (y); |
1052 | 17.6k | } else { |
1053 | 44.0k | do { |
1054 | 44.0k | xy_y_convolve_8tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
1055 | 44.0k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
1056 | 44.0k | im += 2 * 8; |
1057 | 44.0k | dst += 2 * dst_stride; |
1058 | 44.0k | y -= 2; |
1059 | 44.0k | } while (y); |
1060 | 5.87k | } |
1061 | 32.3k | } else if (w == 16) { |
1062 | 13.5k | __m256i s_256[8], r[4]; |
1063 | | |
1064 | 13.5k | load_16bit_7rows_avx2(im, 16, s_256); |
1065 | 13.5k | y = h; |
1066 | | |
1067 | 13.5k | if (subpel_y_q4 != 8) { |
1068 | 9.88k | __m256i ss_256[8], tt_256[8]; |
1069 | | |
1070 | 9.88k | convolve_8tap_unpack_avx2(s_256, ss_256); |
1071 | 9.88k | convolve_8tap_unpack_avx2(s_256 + 1, tt_256); |
1072 | | |
1073 | 81.4k | do { |
1074 | 81.4k | xy_y_convolve_8tap_16x2_avx2(im, 16, coeffs_256, s_256, ss_256, |
1075 | 81.4k | tt_256, r); |
1076 | 81.4k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
1077 | | |
1078 | 81.4k | im += 2 * 16; |
1079 | 81.4k | dst += 2 * dst_stride; |
1080 | 81.4k | y -= 2; |
1081 | 81.4k | } while (y); |
1082 | 9.88k | } else { |
1083 | 35.5k | do { |
1084 | 35.5k | xy_y_convolve_8tap_16x2_half_pel_avx2(im, 16, coeffs_256, s_256, r); |
1085 | 35.5k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
1086 | | |
1087 | 35.5k | im += 2 * 16; |
1088 | 35.5k | dst += 2 * dst_stride; |
1089 | 35.5k | y -= 2; |
1090 | 35.5k | } while (y); |
1091 | 3.63k | } |
1092 | 18.8k | } else { |
1093 | 18.8k | int32_t x = 0; |
1094 | 18.8k | __m256i s_256[2][8], r0[4], r1[4]; |
1095 | | |
1096 | 18.8k | assert(!(w % 32)); |
1097 | | |
1098 | 0 | __m256i ss_256[2][8], tt_256[2][8]; |
1099 | | |
1100 | 25.3k | do { |
1101 | 25.3k | const int16_t *s = im + x; |
1102 | 25.3k | uint8_t *d = dst + x; |
1103 | | |
1104 | 25.3k | load_16bit_7rows_avx2(s, w, s_256[0]); |
1105 | 25.3k | convolve_8tap_unpack_avx2(s_256[0], ss_256[0]); |
1106 | 25.3k | convolve_8tap_unpack_avx2(s_256[0] + 1, tt_256[0]); |
1107 | | |
1108 | 25.3k | load_16bit_7rows_avx2(s + 16, w, s_256[1]); |
1109 | 25.3k | convolve_8tap_unpack_avx2(s_256[1], ss_256[1]); |
1110 | 25.3k | convolve_8tap_unpack_avx2(s_256[1] + 1, tt_256[1]); |
1111 | | |
1112 | 25.3k | y = h; |
1113 | 398k | do { |
1114 | 398k | xy_y_convolve_8tap_16x2_avx2(s, w, coeffs_256, s_256[0], ss_256[0], |
1115 | 398k | tt_256[0], r0); |
1116 | 398k | xy_y_convolve_8tap_16x2_avx2(s + 16, w, coeffs_256, s_256[1], |
1117 | 398k | ss_256[1], tt_256[1], r1); |
1118 | 398k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
1119 | 398k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
1120 | | |
1121 | 398k | s += 2 * w; |
1122 | 398k | d += 2 * dst_stride; |
1123 | 398k | y -= 2; |
1124 | 398k | } while (y); |
1125 | | |
1126 | 25.3k | x += 32; |
1127 | 25.3k | } while (x < w); |
1128 | 18.8k | } |
1129 | 78.7k | } |
1130 | 85.7k | } |
1131 | | |
1132 | | typedef void (*Convolve2dSrHorTapFunc)( |
1133 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
1134 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
1135 | | const int32_t subpel_x_q4, int16_t *const im_block); |
1136 | | |
1137 | | typedef void (*Convolve2dSrVerTapFunc)( |
1138 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
1139 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
1140 | | uint8_t *dst, const int32_t dst_stride); |
1141 | | |
1142 | | static AOM_FORCE_INLINE void av1_convolve_2d_sr_specialized_avx2( |
1143 | | const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, |
1144 | | int32_t w, int32_t h, const InterpFilterParams *filter_params_x, |
1145 | | const InterpFilterParams *filter_params_y, const int32_t subpel_x_q4, |
1146 | 3.87M | const int32_t subpel_y_q4, ConvolveParams *conv_params) { |
1147 | 3.87M | static const Convolve2dSrHorTapFunc |
1148 | 3.87M | convolve_2d_sr_hor_tap_func_table[MAX_FILTER_TAP + 1] = { |
1149 | 3.87M | NULL, |
1150 | 3.87M | NULL, |
1151 | 3.87M | convolve_2d_sr_hor_2tap_avx2, |
1152 | 3.87M | NULL, |
1153 | 3.87M | convolve_2d_sr_hor_4tap_ssse3, |
1154 | 3.87M | NULL, |
1155 | 3.87M | convolve_2d_sr_hor_6tap_avx2, |
1156 | 3.87M | NULL, |
1157 | 3.87M | convolve_2d_sr_hor_8tap_avx2 |
1158 | 3.87M | }; |
1159 | 3.87M | static const Convolve2dSrVerTapFunc |
1160 | 3.87M | convolve_2d_sr_ver_tap_func_table[MAX_FILTER_TAP + 1] = { |
1161 | 3.87M | NULL, |
1162 | 3.87M | convolve_2d_sr_ver_2tap_half_avx2, |
1163 | 3.87M | convolve_2d_sr_ver_2tap_avx2, |
1164 | 3.87M | convolve_2d_sr_ver_4tap_avx2, |
1165 | 3.87M | convolve_2d_sr_ver_4tap_avx2, |
1166 | 3.87M | convolve_2d_sr_ver_6tap_avx2, |
1167 | 3.87M | convolve_2d_sr_ver_6tap_avx2, |
1168 | 3.87M | convolve_2d_sr_ver_8tap_avx2, |
1169 | 3.87M | convolve_2d_sr_ver_8tap_avx2 |
1170 | 3.87M | }; |
1171 | 3.87M | const int32_t tap_x = get_filter_tap(filter_params_x, subpel_x_q4); |
1172 | 3.87M | const int32_t tap_y = get_filter_tap(filter_params_y, subpel_y_q4); |
1173 | | |
1174 | 3.87M | assert(tap_x != 12 && tap_y != 12); |
1175 | | |
1176 | 0 | const uint8_t *src_ptr = src - ((tap_y >> 1) - 1) * src_stride; |
1177 | | // Note: im_block is 8-pixel interlaced for width 32 and up, to avoid data |
1178 | | // permutation. |
1179 | 3.87M | DECLARE_ALIGNED(32, int16_t, |
1180 | 3.87M | im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); |
1181 | | |
1182 | 3.87M | (void)conv_params; |
1183 | | |
1184 | 3.87M | assert(conv_params->round_0 == 3); |
1185 | 0 | assert(conv_params->round_1 == 11); |
1186 | | |
1187 | | // horizontal filter |
1188 | 0 | int32_t hh = h + tap_y; |
1189 | 3.87M | assert(!(hh % 2)); |
1190 | | |
1191 | 0 | convolve_2d_sr_hor_tap_func_table[tap_x]( |
1192 | 3.87M | src_ptr, src_stride, w, hh, filter_params_x, subpel_x_q4, im_block); |
1193 | | |
1194 | | // vertical filter |
1195 | 3.87M | convolve_2d_sr_ver_tap_func_table[tap_y - (subpel_y_q4 == 8)]( |
1196 | 3.87M | im_block, w, h, filter_params_y, subpel_y_q4, dst, dst_stride); |
1197 | 3.87M | } |
1198 | | |
1199 | | #endif // THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |