/src/aom/third_party/SVT-AV1/convolve_2d_avx2.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2017, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #ifndef THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |
13 | | #define THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |
14 | | |
15 | | #include "convolve_avx2.h" |
16 | | |
17 | | static void convolve_2d_sr_hor_2tap_avx2( |
18 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
19 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
20 | 45.4k | const int32_t subpel_x_q4, int16_t *const im_block) { |
21 | 45.4k | const uint8_t *src_ptr = src; |
22 | 45.4k | int32_t y = h; |
23 | 45.4k | int16_t *im = im_block; |
24 | | |
25 | 45.4k | if (w <= 8) { |
26 | 36.7k | __m128i coeffs_128; |
27 | | |
28 | 36.7k | prepare_half_coeffs_2tap_ssse3(filter_params_x, subpel_x_q4, &coeffs_128); |
29 | | |
30 | 36.7k | if (w == 2) { |
31 | 17.7k | do { |
32 | 17.7k | const __m128i r = |
33 | 17.7k | x_convolve_2tap_2x2_sse4_1(src_ptr, src_stride, &coeffs_128); |
34 | 17.7k | xy_x_round_store_2x2_sse2(r, im); |
35 | 17.7k | src_ptr += 2 * src_stride; |
36 | 17.7k | im += 2 * 2; |
37 | 17.7k | y -= 2; |
38 | 17.7k | } while (y); |
39 | 30.6k | } else if (w == 4) { |
40 | 68.2k | do { |
41 | 68.2k | const __m128i r = |
42 | 68.2k | x_convolve_2tap_4x2_ssse3(src_ptr, src_stride, &coeffs_128); |
43 | 68.2k | xy_x_round_store_4x2_sse2(r, im); |
44 | 68.2k | src_ptr += 2 * src_stride; |
45 | 68.2k | im += 2 * 4; |
46 | 68.2k | y -= 2; |
47 | 68.2k | } while (y); |
48 | 17.6k | } else { |
49 | 12.9k | assert(w == 8); |
50 | | |
51 | 56.6k | do { |
52 | 56.6k | __m128i r[2]; |
53 | | |
54 | 56.6k | x_convolve_2tap_8x2_ssse3(src_ptr, src_stride, &coeffs_128, r); |
55 | 56.6k | xy_x_round_store_8x2_sse2(r, im); |
56 | 56.6k | src_ptr += 2 * src_stride; |
57 | 56.6k | im += 2 * 8; |
58 | 56.6k | y -= 2; |
59 | 56.6k | } while (y); |
60 | 12.9k | } |
61 | 36.7k | } else { |
62 | 8.67k | __m256i coeffs_256; |
63 | | |
64 | 8.67k | prepare_half_coeffs_2tap_avx2(filter_params_x, subpel_x_q4, &coeffs_256); |
65 | | |
66 | 8.67k | if (w == 16) { |
67 | 38.0k | do { |
68 | 38.0k | __m256i r[2]; |
69 | | |
70 | 38.0k | x_convolve_2tap_16x2_avx2(src_ptr, src_stride, &coeffs_256, r); |
71 | 38.0k | xy_x_round_store_32_avx2(r, im); |
72 | 38.0k | src_ptr += 2 * src_stride; |
73 | 38.0k | im += 2 * 16; |
74 | 38.0k | y -= 2; |
75 | 38.0k | } while (y); |
76 | 5.70k | } else if (w == 32) { |
77 | 55.4k | do { |
78 | 55.4k | xy_x_2tap_32_avx2(src_ptr, &coeffs_256, im); |
79 | 55.4k | src_ptr += src_stride; |
80 | 55.4k | im += 32; |
81 | 55.4k | } while (--y); |
82 | 1.96k | } else if (w == 64) { |
83 | 39.4k | do { |
84 | 39.4k | xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32); |
85 | 39.4k | xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32); |
86 | 39.4k | src_ptr += src_stride; |
87 | 39.4k | im += 64; |
88 | 39.4k | } while (--y); |
89 | 831 | } else { |
90 | 170 | assert(w == 128); |
91 | | |
92 | 15.7k | do { |
93 | 15.7k | xy_x_2tap_32_avx2(src_ptr + 0 * 32, &coeffs_256, im + 0 * 32); |
94 | 15.7k | xy_x_2tap_32_avx2(src_ptr + 1 * 32, &coeffs_256, im + 1 * 32); |
95 | 15.7k | xy_x_2tap_32_avx2(src_ptr + 2 * 32, &coeffs_256, im + 2 * 32); |
96 | 15.7k | xy_x_2tap_32_avx2(src_ptr + 3 * 32, &coeffs_256, im + 3 * 32); |
97 | 15.7k | src_ptr += src_stride; |
98 | 15.7k | im += 128; |
99 | 15.7k | } while (--y); |
100 | 170 | } |
101 | 8.67k | } |
102 | 45.4k | } |
103 | | |
104 | | static void convolve_2d_sr_hor_4tap_ssse3( |
105 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
106 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
107 | 484k | const int32_t subpel_x_q4, int16_t *const im_block) { |
108 | 484k | const uint8_t *src_ptr = src - 1; |
109 | 484k | int32_t y = h; |
110 | 484k | int16_t *im = im_block; |
111 | | |
112 | 484k | if (w <= 4) { |
113 | 445k | __m128i coeffs_128[2]; |
114 | | |
115 | 445k | prepare_half_coeffs_4tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128); |
116 | 445k | if (w == 2) { |
117 | 448k | do { |
118 | 448k | const __m128i r = |
119 | 448k | x_convolve_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128); |
120 | 448k | xy_x_round_store_2x2_sse2(r, im); |
121 | 448k | src_ptr += 2 * src_stride; |
122 | 448k | im += 2 * 2; |
123 | 448k | y -= 2; |
124 | 448k | } while (y); |
125 | 355k | } else if (w == 4) { |
126 | 1.93M | do { |
127 | 1.93M | const __m128i r = |
128 | 1.93M | x_convolve_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128); |
129 | 1.93M | xy_x_round_store_4x2_sse2(r, im); |
130 | 1.93M | src_ptr += 2 * src_stride; |
131 | 1.93M | im += 2 * 4; |
132 | 1.93M | y -= 2; |
133 | 1.93M | } while (y); |
134 | 355k | } |
135 | 445k | } else { |
136 | | // TODO(chiyotsai@google.com): Add better optimization |
137 | 38.2k | __m256i coeffs_256[2], filt_256[2]; |
138 | | |
139 | 38.2k | prepare_half_coeffs_4tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
140 | 38.2k | filt_256[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2); |
141 | 38.2k | filt_256[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2); |
142 | | |
143 | 38.2k | if (w == 8) { |
144 | 134k | do { |
145 | 134k | __m256i res = |
146 | 134k | x_convolve_4tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
147 | 134k | xy_x_round_store_8x2_avx2(res, im); |
148 | | |
149 | 134k | src_ptr += 2 * src_stride; |
150 | 134k | im += 2 * 8; |
151 | 134k | y -= 2; |
152 | 134k | } while (y); |
153 | 22.7k | } else if (w == 16) { |
154 | 86.6k | do { |
155 | 86.6k | __m256i r[2]; |
156 | | |
157 | 86.6k | x_convolve_4tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
158 | 86.6k | xy_x_round_store_32_avx2(r, im); |
159 | 86.6k | src_ptr += 2 * src_stride; |
160 | 86.6k | im += 2 * 16; |
161 | 86.6k | y -= 2; |
162 | 86.6k | } while (y); |
163 | 11.3k | } else if (w == 32) { |
164 | 80.1k | do { |
165 | 80.1k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
166 | | |
167 | 80.1k | src_ptr += src_stride; |
168 | 80.1k | im += 32; |
169 | 80.1k | } while (--y); |
170 | 3.12k | } else if (w == 64) { |
171 | 50.4k | do { |
172 | 50.4k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
173 | 50.4k | xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
174 | 50.4k | src_ptr += src_stride; |
175 | 50.4k | im += 64; |
176 | 50.4k | } while (--y); |
177 | 903 | } else { |
178 | 159 | assert(w == 128); |
179 | | |
180 | 22.2k | do { |
181 | 22.2k | xy_x_4tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
182 | 22.2k | xy_x_4tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
183 | 22.2k | xy_x_4tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
184 | 22.2k | xy_x_4tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
185 | 22.2k | src_ptr += src_stride; |
186 | 22.2k | im += 128; |
187 | 22.2k | } while (--y); |
188 | 179 | } |
189 | 38.2k | } |
190 | 484k | } |
191 | | |
192 | | static void convolve_2d_sr_hor_6tap_avx2( |
193 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
194 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
195 | 431k | const int32_t subpel_x_q4, int16_t *const im_block) { |
196 | 431k | const uint8_t *src_ptr = src - 2; |
197 | 431k | int32_t y = h; |
198 | 431k | int16_t *im = im_block; |
199 | | |
200 | 431k | if (w <= 4) { |
201 | 0 | __m128i coeffs_128[3]; |
202 | |
|
203 | 0 | prepare_half_coeffs_6tap_ssse3(filter_params_x, subpel_x_q4, coeffs_128); |
204 | 0 | if (w == 2) { |
205 | 0 | do { |
206 | 0 | const __m128i r = |
207 | 0 | x_convolve_6tap_2x2_ssse3(src_ptr, src_stride, coeffs_128); |
208 | 0 | xy_x_round_store_2x2_sse2(r, im); |
209 | 0 | src_ptr += 2 * src_stride; |
210 | 0 | im += 2 * 2; |
211 | 0 | y -= 2; |
212 | 0 | } while (y); |
213 | 0 | } else if (w == 4) { |
214 | 0 | do { |
215 | 0 | const __m128i r = |
216 | 0 | x_convolve_6tap_4x2_ssse3(src_ptr, src_stride, coeffs_128); |
217 | 0 | xy_x_round_store_4x2_sse2(r, im); |
218 | 0 | src_ptr += 2 * src_stride; |
219 | 0 | im += 2 * 4; |
220 | 0 | y -= 2; |
221 | 0 | } while (y); |
222 | 0 | } |
223 | 431k | } else { |
224 | 431k | __m256i coeffs_256[3], filt_256[3]; |
225 | | |
226 | 431k | filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2); |
227 | 431k | filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2); |
228 | 431k | filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2); |
229 | | |
230 | 431k | prepare_half_coeffs_6tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
231 | | |
232 | 431k | if (w == 8) { |
233 | 1.59M | do { |
234 | 1.59M | const __m256i res = |
235 | 1.59M | x_convolve_6tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
236 | 1.59M | xy_x_round_store_8x2_avx2(res, im); |
237 | | |
238 | 1.59M | src_ptr += 2 * src_stride; |
239 | 1.59M | im += 2 * 8; |
240 | 1.59M | y -= 2; |
241 | 1.59M | } while (y); |
242 | 261k | } else if (w == 16) { |
243 | 1.02M | do { |
244 | 1.02M | __m256i r[2]; |
245 | | |
246 | 1.02M | x_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
247 | 1.02M | xy_x_round_store_32_avx2(r, im); |
248 | 1.02M | src_ptr += 2 * src_stride; |
249 | 1.02M | im += 2 * 16; |
250 | 1.02M | y -= 2; |
251 | 1.02M | } while (y); |
252 | 131k | } else if (w == 32) { |
253 | 799k | do { |
254 | 799k | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
255 | 799k | src_ptr += src_stride; |
256 | 799k | im += 32; |
257 | 799k | } while (--y); |
258 | 31.4k | } else if (w == 64) { |
259 | 320k | do { |
260 | 320k | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
261 | 320k | xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
262 | 320k | src_ptr += src_stride; |
263 | 320k | im += 64; |
264 | 320k | } while (--y); |
265 | 5.75k | } else { |
266 | 821 | assert(w == 128); |
267 | | |
268 | 114k | do { |
269 | 114k | xy_x_6tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
270 | 114k | xy_x_6tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
271 | 114k | xy_x_6tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
272 | 114k | xy_x_6tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
273 | 114k | src_ptr += src_stride; |
274 | 114k | im += 128; |
275 | 114k | } while (--y); |
276 | 941 | } |
277 | 431k | } |
278 | 431k | } |
279 | | |
280 | | static void convolve_2d_sr_hor_8tap_avx2( |
281 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
282 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
283 | 29.3k | const int32_t subpel_x_q4, int16_t *const im_block) { |
284 | 29.3k | const uint8_t *src_ptr = src - 3; |
285 | 29.3k | int32_t y = h; |
286 | 29.3k | int16_t *im = im_block; |
287 | 29.3k | __m256i coeffs_256[4], filt_256[4]; |
288 | | |
289 | 29.3k | filt_256[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2); |
290 | 29.3k | filt_256[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2); |
291 | 29.3k | filt_256[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2); |
292 | 29.3k | filt_256[3] = _mm256_loadu_si256((__m256i const *)filt4_global_avx2); |
293 | | |
294 | 29.3k | prepare_half_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_256); |
295 | | |
296 | 29.3k | if (w == 8) { |
297 | 88.7k | do { |
298 | 88.7k | const __m256i res = |
299 | 88.7k | x_convolve_8tap_8x2_avx2(src_ptr, src_stride, coeffs_256, filt_256); |
300 | 88.7k | xy_x_round_store_8x2_avx2(res, im); |
301 | 88.7k | src_ptr += 2 * src_stride; |
302 | 88.7k | im += 2 * 8; |
303 | 88.7k | y -= 2; |
304 | 88.7k | } while (y); |
305 | 16.1k | } else if (w == 16) { |
306 | 61.6k | do { |
307 | 61.6k | __m256i r[2]; |
308 | | |
309 | 61.6k | x_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs_256, filt_256, r); |
310 | 61.6k | xy_x_round_store_32_avx2(r, im); |
311 | 61.6k | src_ptr += 2 * src_stride; |
312 | 61.6k | im += 2 * 16; |
313 | 61.6k | y -= 2; |
314 | 61.6k | } while (y); |
315 | 9.14k | } else if (w == 32) { |
316 | 167k | do { |
317 | 167k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
318 | 167k | src_ptr += src_stride; |
319 | 167k | im += 32; |
320 | 167k | } while (--y); |
321 | 6.40k | } else if (w == 64) { |
322 | 114k | do { |
323 | 114k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
324 | 114k | xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
325 | 114k | src_ptr += src_stride; |
326 | 114k | im += 64; |
327 | 114k | } while (--y); |
328 | 2.60k | } else { |
329 | 130 | assert(w == 128); |
330 | | |
331 | 15.1k | do { |
332 | 15.1k | xy_x_8tap_32_avx2(src_ptr, coeffs_256, filt_256, im); |
333 | 15.1k | xy_x_8tap_32_avx2(src_ptr + 32, coeffs_256, filt_256, im + 32); |
334 | 15.1k | xy_x_8tap_32_avx2(src_ptr + 64, coeffs_256, filt_256, im + 64); |
335 | 15.1k | xy_x_8tap_32_avx2(src_ptr + 96, coeffs_256, filt_256, im + 96); |
336 | 15.1k | src_ptr += src_stride; |
337 | 15.1k | im += 128; |
338 | 15.1k | } while (--y); |
339 | 131 | } |
340 | 29.3k | } |
341 | | |
342 | | static void convolve_2d_sr_ver_2tap_avx2( |
343 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
344 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
345 | 34.4k | uint8_t *dst, const int32_t dst_stride) { |
346 | 34.4k | const int16_t *im = im_block; |
347 | 34.4k | int32_t y = h; |
348 | | |
349 | 34.4k | if (w <= 4) { |
350 | 18.4k | __m128i coeffs_128; |
351 | | |
352 | 18.4k | prepare_coeffs_2tap_sse2(filter_params_y, subpel_y_q4, &coeffs_128); |
353 | | |
354 | 18.4k | if (w == 2) { |
355 | 4.63k | __m128i s_32[2]; |
356 | | |
357 | 4.63k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im); |
358 | | |
359 | 8.66k | do { |
360 | 8.66k | const __m128i res = xy_y_convolve_2tap_2x2_sse2(im, s_32, &coeffs_128); |
361 | 8.66k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
362 | 8.66k | im += 2 * 2; |
363 | 8.66k | dst += 2 * dst_stride; |
364 | 8.66k | y -= 2; |
365 | 8.66k | } while (y); |
366 | 13.7k | } else { |
367 | 13.7k | __m128i s_64[2], r[2]; |
368 | | |
369 | 13.7k | assert(w == 4); |
370 | | |
371 | 13.7k | s_64[0] = _mm_loadl_epi64((__m128i *)im); |
372 | | |
373 | 39.0k | do { |
374 | 39.0k | xy_y_convolve_2tap_4x2_sse2(im, s_64, &coeffs_128, r); |
375 | 39.0k | r[0] = xy_y_round_sse2(r[0]); |
376 | 39.0k | r[1] = xy_y_round_sse2(r[1]); |
377 | 39.0k | const __m128i rr = _mm_packs_epi32(r[0], r[1]); |
378 | 39.0k | pack_store_4x2_sse2(rr, dst, dst_stride); |
379 | 39.0k | im += 2 * 4; |
380 | 39.0k | dst += 2 * dst_stride; |
381 | 39.0k | y -= 2; |
382 | 39.0k | } while (y); |
383 | 13.7k | } |
384 | 18.4k | } else { |
385 | 16.0k | __m256i coeffs_256; |
386 | | |
387 | 16.0k | prepare_coeffs_2tap_avx2(filter_params_y, subpel_y_q4, &coeffs_256); |
388 | | |
389 | 16.0k | if (w == 8) { |
390 | 9.89k | __m128i s_128[2]; |
391 | 9.89k | __m256i r[2]; |
392 | | |
393 | 9.89k | s_128[0] = _mm_loadu_si128((__m128i *)im); |
394 | | |
395 | 31.9k | do { |
396 | 31.9k | xy_y_convolve_2tap_8x2_avx2(im, s_128, &coeffs_256, r); |
397 | 31.9k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
398 | 31.9k | im += 2 * 8; |
399 | 31.9k | dst += 2 * dst_stride; |
400 | 31.9k | y -= 2; |
401 | 31.9k | } while (y); |
402 | 9.89k | } else if (w == 16) { |
403 | 4.27k | __m256i s_256[2], r[4]; |
404 | | |
405 | 4.27k | s_256[0] = _mm256_loadu_si256((__m256i *)im); |
406 | | |
407 | 23.0k | do { |
408 | 23.0k | xy_y_convolve_2tap_16x2_avx2(im, s_256, &coeffs_256, r); |
409 | 23.0k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
410 | 23.0k | im += 2 * 16; |
411 | 23.0k | dst += 2 * dst_stride; |
412 | 23.0k | y -= 2; |
413 | 23.0k | } while (y); |
414 | 4.27k | } else if (w == 32) { |
415 | 1.23k | __m256i s_256[2][2]; |
416 | | |
417 | 1.23k | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
418 | 1.23k | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
419 | | |
420 | 16.0k | do { |
421 | 16.0k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[0], s_256[1], &coeffs_256, |
422 | 16.0k | dst); |
423 | 16.0k | im += 2 * 32; |
424 | 16.0k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1], s_256[0], &coeffs_256, |
425 | 16.0k | dst + dst_stride); |
426 | 16.0k | dst += 2 * dst_stride; |
427 | 16.0k | y -= 2; |
428 | 16.0k | } while (y); |
429 | 1.23k | } else if (w == 64) { |
430 | 563 | __m256i s_256[2][4]; |
431 | | |
432 | 563 | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
433 | 563 | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
434 | 563 | s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
435 | 563 | s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
436 | | |
437 | 13.0k | do { |
438 | 13.0k | xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[0] + 0, s_256[1] + 0, |
439 | 13.0k | &coeffs_256, dst); |
440 | 13.0k | xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[0] + 2, s_256[1] + 2, |
441 | 13.0k | &coeffs_256, dst + 32); |
442 | 13.0k | im += 2 * 64; |
443 | 13.0k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
444 | 13.0k | &coeffs_256, dst + dst_stride); |
445 | 13.0k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2, |
446 | 13.0k | &coeffs_256, dst + dst_stride + 32); |
447 | 13.0k | dst += 2 * dst_stride; |
448 | 13.0k | y -= 2; |
449 | 13.0k | } while (y); |
450 | 563 | } else { |
451 | 98 | __m256i s_256[2][8]; |
452 | | |
453 | 98 | assert(w == 128); |
454 | | |
455 | 98 | load_16bit_8rows_avx2(im, 16, s_256[0]); |
456 | | |
457 | 4.60k | do { |
458 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 128, s_256[0] + 0, s_256[1] + 0, |
459 | 4.60k | &coeffs_256, dst); |
460 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 160, s_256[0] + 2, s_256[1] + 2, |
461 | 4.60k | &coeffs_256, dst + 1 * 32); |
462 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 192, s_256[0] + 4, s_256[1] + 4, |
463 | 4.60k | &coeffs_256, dst + 2 * 32); |
464 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 224, s_256[0] + 6, s_256[1] + 6, |
465 | 4.60k | &coeffs_256, dst + 3 * 32); |
466 | 4.60k | im += 2 * 128; |
467 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
468 | 4.60k | &coeffs_256, dst + dst_stride); |
469 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 32, s_256[1] + 2, s_256[0] + 2, |
470 | 4.60k | &coeffs_256, dst + dst_stride + 1 * 32); |
471 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 64, s_256[1] + 4, s_256[0] + 4, |
472 | 4.60k | &coeffs_256, dst + dst_stride + 2 * 32); |
473 | 4.60k | xy_y_convolve_2tap_32_all_avx2(im + 96, s_256[1] + 6, s_256[0] + 6, |
474 | 4.60k | &coeffs_256, dst + dst_stride + 3 * 32); |
475 | 4.60k | dst += 2 * dst_stride; |
476 | 4.60k | y -= 2; |
477 | 4.60k | } while (y); |
478 | 98 | } |
479 | 16.0k | } |
480 | 34.4k | } |
481 | | |
482 | | static void convolve_2d_sr_ver_2tap_half_avx2( |
483 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
484 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
485 | 10.9k | uint8_t *dst, const int32_t dst_stride) { |
486 | 10.9k | const int16_t *im = im_block; |
487 | 10.9k | int32_t y = h; |
488 | | |
489 | 10.9k | (void)filter_params_y; |
490 | 10.9k | (void)subpel_y_q4; |
491 | | |
492 | 10.9k | if (w == 2) { |
493 | 1.50k | __m128i s_32[2]; |
494 | | |
495 | 1.50k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)im); |
496 | | |
497 | 2.96k | do { |
498 | 2.96k | const __m128i res = xy_y_convolve_2tap_2x2_half_pel_sse2(im, s_32); |
499 | 2.96k | const __m128i r = xy_y_round_half_pel_sse2(res); |
500 | 2.96k | pack_store_2x2_sse2(r, dst, dst_stride); |
501 | 2.96k | im += 2 * 2; |
502 | 2.96k | dst += 2 * dst_stride; |
503 | 2.96k | y -= 2; |
504 | 2.96k | } while (y); |
505 | 9.48k | } else if (w == 4) { |
506 | 3.89k | __m128i s_64[2]; |
507 | | |
508 | 3.89k | s_64[0] = _mm_loadl_epi64((__m128i *)im); |
509 | | |
510 | 11.5k | do { |
511 | 11.5k | const __m128i res = xy_y_convolve_2tap_4x2_half_pel_sse2(im, s_64); |
512 | 11.5k | const __m128i r = xy_y_round_half_pel_sse2(res); |
513 | 11.5k | pack_store_4x2_sse2(r, dst, dst_stride); |
514 | 11.5k | im += 2 * 4; |
515 | 11.5k | dst += 2 * dst_stride; |
516 | 11.5k | y -= 2; |
517 | 11.5k | } while (y); |
518 | 5.59k | } else if (w == 8) { |
519 | 3.08k | __m128i s_128[2]; |
520 | | |
521 | 3.08k | s_128[0] = _mm_loadu_si128((__m128i *)im); |
522 | | |
523 | 11.7k | do { |
524 | 11.7k | const __m256i res = xy_y_convolve_2tap_8x2_half_pel_avx2(im, s_128); |
525 | 11.7k | const __m256i r = xy_y_round_half_pel_avx2(res); |
526 | 11.7k | pack_store_8x2_avx2(r, dst, dst_stride); |
527 | 11.7k | im += 2 * 8; |
528 | 11.7k | dst += 2 * dst_stride; |
529 | 11.7k | y -= 2; |
530 | 11.7k | } while (y); |
531 | 3.08k | } else if (w == 16) { |
532 | 1.43k | __m256i s_256[2], r[2]; |
533 | | |
534 | 1.43k | s_256[0] = _mm256_loadu_si256((__m256i *)im); |
535 | | |
536 | 9.33k | do { |
537 | 9.33k | xy_y_convolve_2tap_16x2_half_pel_avx2(im, s_256, r); |
538 | 9.33k | r[0] = xy_y_round_half_pel_avx2(r[0]); |
539 | 9.33k | r[1] = xy_y_round_half_pel_avx2(r[1]); |
540 | 9.33k | xy_y_pack_store_16x2_avx2(r[0], r[1], dst, dst_stride); |
541 | 9.33k | im += 2 * 16; |
542 | 9.33k | dst += 2 * dst_stride; |
543 | 9.33k | y -= 2; |
544 | 9.33k | } while (y); |
545 | 1.43k | } else if (w == 32) { |
546 | 730 | __m256i s_256[2][2]; |
547 | | |
548 | 730 | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
549 | 730 | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
550 | | |
551 | 9.68k | do { |
552 | 9.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 32, s_256[0], s_256[1], dst); |
553 | 9.68k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 2 * 32, s_256[1], s_256[0], |
554 | 9.68k | dst + dst_stride); |
555 | 9.68k | im += 2 * 32; |
556 | 9.68k | dst += 2 * dst_stride; |
557 | 9.68k | y -= 2; |
558 | 9.68k | } while (y); |
559 | 730 | } else if (w == 64) { |
560 | 268 | __m256i s_256[2][4]; |
561 | | |
562 | 268 | s_256[0][0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
563 | 268 | s_256[0][1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
564 | 268 | s_256[0][2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
565 | 268 | s_256[0][3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
566 | | |
567 | 5.79k | do { |
568 | 5.79k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 64, s_256[0] + 0, |
569 | 5.79k | s_256[1] + 0, dst); |
570 | 5.79k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 96, s_256[0] + 2, |
571 | 5.79k | s_256[1] + 2, dst + 32); |
572 | 5.79k | im += 2 * 64; |
573 | 5.79k | xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
574 | 5.79k | dst + dst_stride); |
575 | 5.79k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
576 | 5.79k | im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 32); |
577 | 5.79k | dst += 2 * dst_stride; |
578 | 5.79k | y -= 2; |
579 | 5.79k | } while (y); |
580 | 268 | } else { |
581 | 72 | __m256i s_256[2][8]; |
582 | | |
583 | 72 | assert(w == 128); |
584 | | |
585 | 72 | load_16bit_8rows_avx2(im, 16, s_256[0]); |
586 | | |
587 | 3.10k | do { |
588 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 128, s_256[0] + 0, |
589 | 3.10k | s_256[1] + 0, dst); |
590 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 160, s_256[0] + 2, |
591 | 3.10k | s_256[1] + 2, dst + 1 * 32); |
592 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 192, s_256[0] + 4, |
593 | 3.10k | s_256[1] + 4, dst + 2 * 32); |
594 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2(im + 224, s_256[0] + 6, |
595 | 3.10k | s_256[1] + 6, dst + 3 * 32); |
596 | 3.10k | im += 2 * 128; |
597 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2(im, s_256[1] + 0, s_256[0] + 0, |
598 | 3.10k | dst + dst_stride); |
599 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
600 | 3.10k | im + 32, s_256[1] + 2, s_256[0] + 2, dst + dst_stride + 1 * 32); |
601 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
602 | 3.10k | im + 64, s_256[1] + 4, s_256[0] + 4, dst + dst_stride + 2 * 32); |
603 | 3.10k | xy_y_convolve_2tap_half_pel_32_all_avx2( |
604 | 3.10k | im + 96, s_256[1] + 6, s_256[0] + 6, dst + dst_stride + 3 * 32); |
605 | 3.10k | dst += 2 * dst_stride; |
606 | 3.10k | y -= 2; |
607 | 3.10k | } while (y); |
608 | 72 | } |
609 | 10.9k | } |
610 | | |
611 | | static void convolve_2d_sr_ver_4tap_avx2( |
612 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
613 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
614 | 522k | uint8_t *dst, const int32_t dst_stride) { |
615 | 522k | const int16_t *im = im_block; |
616 | 522k | int32_t y = h; |
617 | | |
618 | 522k | if (w == 2) { |
619 | 56.3k | __m128i coeffs_128[2], s_32[4], ss_128[2]; |
620 | | |
621 | 56.3k | prepare_coeffs_4tap_sse2(filter_params_y, subpel_y_q4, coeffs_128); |
622 | | |
623 | 56.3k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
624 | 56.3k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
625 | 56.3k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
626 | | |
627 | 56.3k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
628 | 56.3k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
629 | | |
630 | 56.3k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
631 | | |
632 | 98.2k | do { |
633 | 98.2k | const __m128i res = |
634 | 98.2k | xy_y_convolve_4tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
635 | 98.2k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
636 | 98.2k | im += 2 * 2; |
637 | 98.2k | dst += 2 * dst_stride; |
638 | 98.2k | y -= 2; |
639 | 98.2k | } while (y); |
640 | 465k | } else { |
641 | 465k | __m256i coeffs_256[2]; |
642 | | |
643 | 465k | prepare_coeffs_4tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
644 | | |
645 | 465k | if (w == 4) { |
646 | 235k | __m128i s_64[4]; |
647 | 235k | __m256i s_256[2], ss_256[2]; |
648 | | |
649 | 235k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
650 | 235k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
651 | 235k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
652 | | |
653 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
654 | 235k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
655 | 235k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
656 | | |
657 | 235k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
658 | | |
659 | 462k | do { |
660 | 462k | const __m256i res = |
661 | 462k | xy_y_convolve_4tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
662 | 462k | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
663 | 462k | im += 2 * 4; |
664 | 462k | dst += 2 * dst_stride; |
665 | 462k | y -= 2; |
666 | 462k | } while (y); |
667 | 235k | } else if (w == 8) { |
668 | 159k | __m256i s_256[4], r[2]; |
669 | | |
670 | 159k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
671 | 159k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
672 | | |
673 | 159k | if (subpel_y_q4 != 8) { |
674 | 132k | __m256i ss_256[4]; |
675 | | |
676 | 132k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
677 | 132k | ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
678 | | |
679 | 274k | do { |
680 | 274k | xy_y_convolve_4tap_8x2_avx2(im, ss_256, coeffs_256, r); |
681 | 274k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
682 | 274k | im += 2 * 8; |
683 | 274k | dst += 2 * dst_stride; |
684 | 274k | y -= 2; |
685 | 274k | } while (y); |
686 | 132k | } else { |
687 | 47.5k | do { |
688 | 47.5k | xy_y_convolve_4tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
689 | 47.5k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
690 | 47.5k | im += 2 * 8; |
691 | 47.5k | dst += 2 * dst_stride; |
692 | 47.5k | y -= 2; |
693 | 47.5k | } while (y); |
694 | 26.7k | } |
695 | 159k | } else if (w == 16) { |
696 | 65.0k | __m256i s_256[5]; |
697 | | |
698 | 65.0k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
699 | 65.0k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
700 | 65.0k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
701 | | |
702 | 65.0k | if (subpel_y_q4 != 8) { |
703 | 52.9k | __m256i ss_256[4], tt_256[4], r[4]; |
704 | | |
705 | 52.9k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
706 | 52.9k | ss_256[2] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
707 | | |
708 | 52.9k | tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]); |
709 | 52.9k | tt_256[2] = _mm256_unpackhi_epi16(s_256[1], s_256[2]); |
710 | | |
711 | 144k | do { |
712 | 144k | xy_y_convolve_4tap_16x2_avx2(im, s_256, ss_256, tt_256, coeffs_256, |
713 | 144k | r); |
714 | 144k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
715 | 144k | im += 2 * 16; |
716 | 144k | dst += 2 * dst_stride; |
717 | 144k | y -= 2; |
718 | 144k | } while (y); |
719 | 52.9k | } else { |
720 | 12.1k | __m256i r[4]; |
721 | | |
722 | 24.2k | do { |
723 | 24.2k | xy_y_convolve_4tap_16x2_half_pelavx2(im, s_256, coeffs_256, r); |
724 | 24.2k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
725 | 24.2k | im += 2 * 16; |
726 | 24.2k | dst += 2 * dst_stride; |
727 | 24.2k | y -= 2; |
728 | 24.2k | } while (y); |
729 | 12.1k | } |
730 | 65.0k | } else { |
731 | | /*It's a special condition for OBMC. A/c to Av1 spec 4-tap won't |
732 | | support for width(w)>16, but for OBMC while predicting above block |
733 | | it reduces size block to Wx(h/2), for example, if above block size |
734 | | is 32x8, we get block size as 32x4 for OBMC.*/ |
735 | 5.60k | int32_t x = 0; |
736 | | |
737 | 5.60k | assert(!(w % 32)); |
738 | | |
739 | 5.66k | __m256i s_256[2][4], ss_256[2][4], tt_256[2][4], r0[4], r1[4]; |
740 | 6.90k | do { |
741 | 6.90k | const int16_t *s = im + x; |
742 | 6.90k | uint8_t *d = dst + x; |
743 | | |
744 | 6.90k | loadu_unpack_16bit_3rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]); |
745 | 6.90k | loadu_unpack_16bit_3rows_avx2(s + 16, w, s_256[1], ss_256[1], |
746 | 6.90k | tt_256[1]); |
747 | | |
748 | 6.90k | y = h; |
749 | 104k | do { |
750 | 104k | xy_y_convolve_4tap_32x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0], |
751 | 104k | coeffs_256, r0); |
752 | 104k | xy_y_convolve_4tap_32x2_avx2(s + 16, w, s_256[1], ss_256[1], |
753 | 104k | tt_256[1], coeffs_256, r1); |
754 | | |
755 | 104k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
756 | 104k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
757 | | |
758 | 104k | s += 2 * w; |
759 | 104k | d += 2 * dst_stride; |
760 | 104k | y -= 2; |
761 | 104k | } while (y); |
762 | | |
763 | 6.90k | x += 32; |
764 | 6.90k | } while (x < w); |
765 | 5.66k | } |
766 | 465k | } |
767 | 522k | } |
768 | | |
769 | | static void convolve_2d_sr_ver_6tap_avx2( |
770 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
771 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
772 | 395k | uint8_t *dst, const int32_t dst_stride) { |
773 | 395k | const int16_t *im = im_block; |
774 | 395k | int32_t y; |
775 | | |
776 | 395k | if (w == 2) { |
777 | 32.0k | __m128i coeffs_128[3], s_32[6], ss_128[3]; |
778 | | |
779 | 32.0k | prepare_coeffs_6tap_ssse3(filter_params_y, subpel_y_q4, coeffs_128); |
780 | | |
781 | 32.0k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
782 | 32.0k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
783 | 32.0k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
784 | 32.0k | s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2)); |
785 | 32.0k | s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2)); |
786 | | |
787 | 32.0k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
788 | 32.0k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
789 | 32.0k | const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]); |
790 | 32.0k | const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]); |
791 | | |
792 | 32.0k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
793 | 32.0k | ss_128[1] = _mm_unpacklo_epi16(src23, src34); |
794 | | |
795 | 32.0k | y = h; |
796 | 128k | do { |
797 | 128k | const __m128i res = |
798 | 128k | xy_y_convolve_6tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
799 | 128k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
800 | 128k | im += 2 * 2; |
801 | 128k | dst += 2 * dst_stride; |
802 | 128k | y -= 2; |
803 | 128k | } while (y); |
804 | 363k | } else { |
805 | 363k | __m256i coeffs_256[3]; |
806 | | |
807 | 363k | prepare_coeffs_6tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
808 | | |
809 | 363k | if (w == 4) { |
810 | 114k | __m128i s_64[6]; |
811 | 114k | __m256i s_256[6], ss_256[3]; |
812 | | |
813 | 114k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
814 | 114k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
815 | 114k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
816 | 114k | s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4)); |
817 | 114k | s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4)); |
818 | | |
819 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
820 | 114k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
821 | 114k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
822 | 114k | s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]); |
823 | 114k | s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]); |
824 | | |
825 | 114k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
826 | 114k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
827 | | |
828 | 114k | y = h; |
829 | 601k | do { |
830 | 601k | const __m256i res = |
831 | 601k | xy_y_convolve_6tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
832 | 601k | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
833 | 601k | im += 2 * 4; |
834 | 601k | dst += 2 * dst_stride; |
835 | 601k | y -= 2; |
836 | 601k | } while (y); |
837 | 248k | } else if (w == 8) { |
838 | 131k | __m256i s_256[6], r[2]; |
839 | | |
840 | 131k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
841 | 131k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
842 | 131k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8)); |
843 | 131k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8)); |
844 | 131k | y = h; |
845 | | |
846 | 131k | if (subpel_y_q4 != 8) { |
847 | 101k | __m256i ss_256[6]; |
848 | | |
849 | 101k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
850 | 101k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
851 | | |
852 | 101k | ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
853 | 101k | ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]); |
854 | | |
855 | 552k | do { |
856 | 552k | xy_y_convolve_6tap_8x2_avx2(im, ss_256, coeffs_256, r); |
857 | 552k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
858 | 552k | im += 2 * 8; |
859 | 552k | dst += 2 * dst_stride; |
860 | 552k | y -= 2; |
861 | 552k | } while (y); |
862 | 101k | } else { |
863 | 171k | do { |
864 | 171k | xy_y_convolve_6tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
865 | 171k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
866 | 171k | im += 2 * 8; |
867 | 171k | dst += 2 * dst_stride; |
868 | 171k | y -= 2; |
869 | 171k | } while (y); |
870 | 30.4k | } |
871 | 131k | } else if (w == 16) { |
872 | 80.4k | __m256i s_256[6]; |
873 | | |
874 | 80.4k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 16)); |
875 | 80.4k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 16)); |
876 | 80.4k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 16)); |
877 | 80.4k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 16)); |
878 | 80.4k | s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 16)); |
879 | 80.4k | y = h; |
880 | | |
881 | 80.4k | if (subpel_y_q4 != 8) { |
882 | 60.1k | __m256i ss_256[6], tt_256[6], r[4]; |
883 | | |
884 | 60.1k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
885 | 60.1k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
886 | 60.1k | ss_256[3] = _mm256_unpackhi_epi16(s_256[0], s_256[1]); |
887 | 60.1k | ss_256[4] = _mm256_unpackhi_epi16(s_256[2], s_256[3]); |
888 | | |
889 | 60.1k | tt_256[0] = _mm256_unpacklo_epi16(s_256[1], s_256[2]); |
890 | 60.1k | tt_256[1] = _mm256_unpacklo_epi16(s_256[3], s_256[4]); |
891 | 60.1k | tt_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]); |
892 | 60.1k | tt_256[4] = _mm256_unpackhi_epi16(s_256[3], s_256[4]); |
893 | | |
894 | 432k | do { |
895 | 432k | xy_y_convolve_6tap_16x2_avx2(im, 16, s_256, ss_256, tt_256, |
896 | 432k | coeffs_256, r); |
897 | 432k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
898 | 432k | im += 2 * 16; |
899 | 432k | dst += 2 * dst_stride; |
900 | 432k | y -= 2; |
901 | 432k | } while (y); |
902 | 60.1k | } else { |
903 | 20.2k | __m256i ss_256[4], r[4]; |
904 | | |
905 | 148k | do { |
906 | 148k | xy_y_convolve_6tap_16x2_half_pel_avx2(im, 16, s_256, ss_256, |
907 | 148k | coeffs_256, r); |
908 | 148k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
909 | | |
910 | 148k | im += 2 * 16; |
911 | 148k | dst += 2 * dst_stride; |
912 | 148k | y -= 2; |
913 | 148k | } while (y); |
914 | 20.2k | } |
915 | 80.4k | } else { |
916 | 36.7k | int32_t x = 0; |
917 | | |
918 | 36.7k | assert(!(w % 32)); |
919 | | |
920 | 36.8k | __m256i s_256[2][6], ss_256[2][6], tt_256[2][6], r0[4], r1[4]; |
921 | | |
922 | 45.5k | do { |
923 | 45.5k | const int16_t *s = im + x; |
924 | 45.5k | uint8_t *d = dst + x; |
925 | | |
926 | 45.5k | loadu_unpack_16bit_5rows_avx2(s, w, s_256[0], ss_256[0], tt_256[0]); |
927 | 45.5k | loadu_unpack_16bit_5rows_avx2(s + 16, w, s_256[1], ss_256[1], |
928 | 45.5k | tt_256[1]); |
929 | | |
930 | 45.5k | y = h; |
931 | 825k | do { |
932 | 825k | xy_y_convolve_6tap_16x2_avx2(s, w, s_256[0], ss_256[0], tt_256[0], |
933 | 825k | coeffs_256, r0); |
934 | 825k | xy_y_convolve_6tap_16x2_avx2(s + 16, w, s_256[1], ss_256[1], |
935 | 825k | tt_256[1], coeffs_256, r1); |
936 | | |
937 | 825k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
938 | 825k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
939 | | |
940 | 825k | s += 2 * w; |
941 | 825k | d += 2 * dst_stride; |
942 | 825k | y -= 2; |
943 | 825k | } while (y); |
944 | | |
945 | 45.5k | x += 32; |
946 | 45.5k | } while (x < w); |
947 | 36.8k | } |
948 | 363k | } |
949 | 395k | } |
950 | | |
951 | | static void convolve_2d_sr_ver_8tap_avx2( |
952 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
953 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
954 | 27.3k | uint8_t *dst, const int32_t dst_stride) { |
955 | 27.3k | const int16_t *im = im_block; |
956 | 27.3k | int32_t y; |
957 | | |
958 | 27.3k | if (w == 2) { |
959 | 1.65k | __m128i coeffs_128[4], s_32[8], ss_128[4]; |
960 | | |
961 | 1.65k | prepare_coeffs_8tap_sse2(filter_params_y, subpel_y_q4, coeffs_128); |
962 | | |
963 | 1.65k | s_32[0] = _mm_cvtsi32_si128(*(int32_t *)(im + 0 * 2)); |
964 | 1.65k | s_32[1] = _mm_cvtsi32_si128(*(int32_t *)(im + 1 * 2)); |
965 | 1.65k | s_32[2] = _mm_cvtsi32_si128(*(int32_t *)(im + 2 * 2)); |
966 | 1.65k | s_32[3] = _mm_cvtsi32_si128(*(int32_t *)(im + 3 * 2)); |
967 | 1.65k | s_32[4] = _mm_cvtsi32_si128(*(int32_t *)(im + 4 * 2)); |
968 | 1.65k | s_32[5] = _mm_cvtsi32_si128(*(int32_t *)(im + 5 * 2)); |
969 | 1.65k | s_32[6] = _mm_cvtsi32_si128(*(int32_t *)(im + 6 * 2)); |
970 | | |
971 | 1.65k | const __m128i src01 = _mm_unpacklo_epi32(s_32[0], s_32[1]); |
972 | 1.65k | const __m128i src12 = _mm_unpacklo_epi32(s_32[1], s_32[2]); |
973 | 1.65k | const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]); |
974 | 1.65k | const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[4]); |
975 | 1.65k | const __m128i src45 = _mm_unpacklo_epi32(s_32[4], s_32[5]); |
976 | 1.65k | const __m128i src56 = _mm_unpacklo_epi32(s_32[5], s_32[6]); |
977 | | |
978 | 1.65k | ss_128[0] = _mm_unpacklo_epi16(src01, src12); |
979 | 1.65k | ss_128[1] = _mm_unpacklo_epi16(src23, src34); |
980 | 1.65k | ss_128[2] = _mm_unpacklo_epi16(src45, src56); |
981 | | |
982 | 1.65k | y = h; |
983 | 6.60k | do { |
984 | 6.60k | const __m128i res = |
985 | 6.60k | xy_y_convolve_8tap_2x2_sse2(im, s_32, ss_128, coeffs_128); |
986 | 6.60k | xy_y_round_store_2x2_sse2(res, dst, dst_stride); |
987 | 6.60k | im += 2 * 2; |
988 | 6.60k | dst += 2 * dst_stride; |
989 | 6.60k | y -= 2; |
990 | 6.60k | } while (y); |
991 | 25.7k | } else { |
992 | 25.7k | __m256i coeffs_256[4]; |
993 | | |
994 | 25.7k | prepare_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_256); |
995 | | |
996 | 25.7k | if (w == 4) { |
997 | 6.00k | __m128i s_64[8]; |
998 | 6.00k | __m256i s_256[8], ss_256[4]; |
999 | | |
1000 | 6.00k | s_64[0] = _mm_loadl_epi64((__m128i *)(im + 0 * 4)); |
1001 | 6.00k | s_64[1] = _mm_loadl_epi64((__m128i *)(im + 1 * 4)); |
1002 | 6.00k | s_64[2] = _mm_loadl_epi64((__m128i *)(im + 2 * 4)); |
1003 | 6.00k | s_64[3] = _mm_loadl_epi64((__m128i *)(im + 3 * 4)); |
1004 | 6.00k | s_64[4] = _mm_loadl_epi64((__m128i *)(im + 4 * 4)); |
1005 | 6.00k | s_64[5] = _mm_loadl_epi64((__m128i *)(im + 5 * 4)); |
1006 | 6.00k | s_64[6] = _mm_loadl_epi64((__m128i *)(im + 6 * 4)); |
1007 | | |
1008 | | // Load lines a and b. Line a to lower 128, line b to upper 128 |
1009 | 6.00k | s_256[0] = _mm256_setr_m128i(s_64[0], s_64[1]); |
1010 | 6.00k | s_256[1] = _mm256_setr_m128i(s_64[1], s_64[2]); |
1011 | 6.00k | s_256[2] = _mm256_setr_m128i(s_64[2], s_64[3]); |
1012 | 6.00k | s_256[3] = _mm256_setr_m128i(s_64[3], s_64[4]); |
1013 | 6.00k | s_256[4] = _mm256_setr_m128i(s_64[4], s_64[5]); |
1014 | 6.00k | s_256[5] = _mm256_setr_m128i(s_64[5], s_64[6]); |
1015 | | |
1016 | 6.00k | ss_256[0] = _mm256_unpacklo_epi16(s_256[0], s_256[1]); |
1017 | 6.00k | ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]); |
1018 | 6.00k | ss_256[2] = _mm256_unpacklo_epi16(s_256[4], s_256[5]); |
1019 | | |
1020 | 6.00k | y = h; |
1021 | 32.0k | do { |
1022 | 32.0k | const __m256i res = |
1023 | 32.0k | xy_y_convolve_8tap_4x2_avx2(im, s_64, ss_256, coeffs_256); |
1024 | 32.0k | xy_y_round_store_4x2_avx2(res, dst, dst_stride); |
1025 | 32.0k | im += 2 * 4; |
1026 | 32.0k | dst += 2 * dst_stride; |
1027 | 32.0k | y -= 2; |
1028 | 32.0k | } while (y); |
1029 | 19.7k | } else if (w == 8) { |
1030 | 6.42k | __m256i s_256[8], r[2]; |
1031 | | |
1032 | 6.42k | s_256[0] = _mm256_loadu_si256((__m256i *)(im + 0 * 8)); |
1033 | 6.42k | s_256[1] = _mm256_loadu_si256((__m256i *)(im + 1 * 8)); |
1034 | 6.42k | s_256[2] = _mm256_loadu_si256((__m256i *)(im + 2 * 8)); |
1035 | 6.42k | s_256[3] = _mm256_loadu_si256((__m256i *)(im + 3 * 8)); |
1036 | 6.42k | s_256[4] = _mm256_loadu_si256((__m256i *)(im + 4 * 8)); |
1037 | 6.42k | s_256[5] = _mm256_loadu_si256((__m256i *)(im + 5 * 8)); |
1038 | 6.42k | y = h; |
1039 | | |
1040 | 6.42k | if (subpel_y_q4 != 8) { |
1041 | 4.50k | __m256i ss_256[8]; |
1042 | | |
1043 | 4.50k | convolve_8tap_unpack_avx2(s_256, ss_256); |
1044 | | |
1045 | 25.9k | do { |
1046 | 25.9k | xy_y_convolve_8tap_8x2_avx2(im, ss_256, coeffs_256, r); |
1047 | 25.9k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
1048 | 25.9k | im += 2 * 8; |
1049 | 25.9k | dst += 2 * dst_stride; |
1050 | 25.9k | y -= 2; |
1051 | 25.9k | } while (y); |
1052 | 4.50k | } else { |
1053 | 11.2k | do { |
1054 | 11.2k | xy_y_convolve_8tap_8x2_half_pel_avx2(im, coeffs_256, s_256, r); |
1055 | 11.2k | xy_y_round_store_8x2_avx2(r, dst, dst_stride); |
1056 | 11.2k | im += 2 * 8; |
1057 | 11.2k | dst += 2 * dst_stride; |
1058 | 11.2k | y -= 2; |
1059 | 11.2k | } while (y); |
1060 | 1.92k | } |
1061 | 13.2k | } else if (w == 16) { |
1062 | 4.26k | __m256i s_256[8], r[4]; |
1063 | | |
1064 | 4.26k | load_16bit_7rows_avx2(im, 16, s_256); |
1065 | 4.26k | y = h; |
1066 | | |
1067 | 4.26k | if (subpel_y_q4 != 8) { |
1068 | 2.76k | __m256i ss_256[8], tt_256[8]; |
1069 | | |
1070 | 2.76k | convolve_8tap_unpack_avx2(s_256, ss_256); |
1071 | 2.76k | convolve_8tap_unpack_avx2(s_256 + 1, tt_256); |
1072 | | |
1073 | 19.9k | do { |
1074 | 19.9k | xy_y_convolve_8tap_16x2_avx2(im, 16, coeffs_256, s_256, ss_256, |
1075 | 19.9k | tt_256, r); |
1076 | 19.9k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
1077 | | |
1078 | 19.9k | im += 2 * 16; |
1079 | 19.9k | dst += 2 * dst_stride; |
1080 | 19.9k | y -= 2; |
1081 | 19.9k | } while (y); |
1082 | 2.76k | } else { |
1083 | 13.2k | do { |
1084 | 13.2k | xy_y_convolve_8tap_16x2_half_pel_avx2(im, 16, coeffs_256, s_256, r); |
1085 | 13.2k | xy_y_round_store_16x2_avx2(r, dst, dst_stride); |
1086 | | |
1087 | 13.2k | im += 2 * 16; |
1088 | 13.2k | dst += 2 * dst_stride; |
1089 | 13.2k | y -= 2; |
1090 | 13.2k | } while (y); |
1091 | 1.49k | } |
1092 | 9.03k | } else { |
1093 | 9.03k | int32_t x = 0; |
1094 | 9.03k | __m256i s_256[2][8], r0[4], r1[4]; |
1095 | | |
1096 | 9.03k | assert(!(w % 32)); |
1097 | | |
1098 | 9.03k | __m256i ss_256[2][8], tt_256[2][8]; |
1099 | | |
1100 | 12.0k | do { |
1101 | 12.0k | const int16_t *s = im + x; |
1102 | 12.0k | uint8_t *d = dst + x; |
1103 | | |
1104 | 12.0k | load_16bit_7rows_avx2(s, w, s_256[0]); |
1105 | 12.0k | convolve_8tap_unpack_avx2(s_256[0], ss_256[0]); |
1106 | 12.0k | convolve_8tap_unpack_avx2(s_256[0] + 1, tt_256[0]); |
1107 | | |
1108 | 12.0k | load_16bit_7rows_avx2(s + 16, w, s_256[1]); |
1109 | 12.0k | convolve_8tap_unpack_avx2(s_256[1], ss_256[1]); |
1110 | 12.0k | convolve_8tap_unpack_avx2(s_256[1] + 1, tt_256[1]); |
1111 | | |
1112 | 12.0k | y = h; |
1113 | 180k | do { |
1114 | 180k | xy_y_convolve_8tap_16x2_avx2(s, w, coeffs_256, s_256[0], ss_256[0], |
1115 | 180k | tt_256[0], r0); |
1116 | 180k | xy_y_convolve_8tap_16x2_avx2(s + 16, w, coeffs_256, s_256[1], |
1117 | 180k | ss_256[1], tt_256[1], r1); |
1118 | 180k | xy_y_round_store_32_avx2(r0 + 0, r1 + 0, d); |
1119 | 180k | xy_y_round_store_32_avx2(r0 + 2, r1 + 2, d + dst_stride); |
1120 | | |
1121 | 180k | s += 2 * w; |
1122 | 180k | d += 2 * dst_stride; |
1123 | 180k | y -= 2; |
1124 | 180k | } while (y); |
1125 | | |
1126 | 12.0k | x += 32; |
1127 | 12.0k | } while (x < w); |
1128 | 9.03k | } |
1129 | 25.7k | } |
1130 | 27.3k | } |
1131 | | |
1132 | | typedef void (*Convolve2dSrHorTapFunc)( |
1133 | | const uint8_t *const src, const int32_t src_stride, const int32_t w, |
1134 | | const int32_t h, const InterpFilterParams *const filter_params_x, |
1135 | | const int32_t subpel_x_q4, int16_t *const im_block); |
1136 | | |
1137 | | typedef void (*Convolve2dSrVerTapFunc)( |
1138 | | const int16_t *const im_block, const int32_t w, const int32_t h, |
1139 | | const InterpFilterParams *const filter_params_y, const int32_t subpel_y_q4, |
1140 | | uint8_t *dst, const int32_t dst_stride); |
1141 | | |
1142 | | static AOM_FORCE_INLINE void av1_convolve_2d_sr_specialized_avx2( |
1143 | | const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, |
1144 | | int32_t w, int32_t h, const InterpFilterParams *filter_params_x, |
1145 | | const InterpFilterParams *filter_params_y, const int32_t subpel_x_q4, |
1146 | 989k | const int32_t subpel_y_q4, ConvolveParams *conv_params) { |
1147 | 989k | static const Convolve2dSrHorTapFunc |
1148 | 989k | convolve_2d_sr_hor_tap_func_table[MAX_FILTER_TAP + 1] = { |
1149 | 989k | NULL, |
1150 | 989k | NULL, |
1151 | 989k | convolve_2d_sr_hor_2tap_avx2, |
1152 | 989k | NULL, |
1153 | 989k | convolve_2d_sr_hor_4tap_ssse3, |
1154 | 989k | NULL, |
1155 | 989k | convolve_2d_sr_hor_6tap_avx2, |
1156 | 989k | NULL, |
1157 | 989k | convolve_2d_sr_hor_8tap_avx2 |
1158 | 989k | }; |
1159 | 989k | static const Convolve2dSrVerTapFunc |
1160 | 989k | convolve_2d_sr_ver_tap_func_table[MAX_FILTER_TAP + 1] = { |
1161 | 989k | NULL, |
1162 | 989k | convolve_2d_sr_ver_2tap_half_avx2, |
1163 | 989k | convolve_2d_sr_ver_2tap_avx2, |
1164 | 989k | convolve_2d_sr_ver_4tap_avx2, |
1165 | 989k | convolve_2d_sr_ver_4tap_avx2, |
1166 | 989k | convolve_2d_sr_ver_6tap_avx2, |
1167 | 989k | convolve_2d_sr_ver_6tap_avx2, |
1168 | 989k | convolve_2d_sr_ver_8tap_avx2, |
1169 | 989k | convolve_2d_sr_ver_8tap_avx2 |
1170 | 989k | }; |
1171 | 989k | const int32_t tap_x = get_filter_tap(filter_params_x, subpel_x_q4); |
1172 | 989k | const int32_t tap_y = get_filter_tap(filter_params_y, subpel_y_q4); |
1173 | | |
1174 | 989k | assert(tap_x != 12 && tap_y != 12); |
1175 | | |
1176 | 989k | const uint8_t *src_ptr = src - ((tap_y >> 1) - 1) * src_stride; |
1177 | | // Note: im_block is 8-pixel interlaced for width 32 and up, to avoid data |
1178 | | // permutation. |
1179 | 989k | DECLARE_ALIGNED(32, int16_t, |
1180 | 989k | im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]); |
1181 | | |
1182 | 989k | (void)conv_params; |
1183 | | |
1184 | 989k | assert(conv_params->round_0 == 3); |
1185 | 989k | assert(conv_params->round_1 == 11); |
1186 | | |
1187 | | // horizontal filter |
1188 | 989k | int32_t hh = h + tap_y; |
1189 | 989k | assert(!(hh % 2)); |
1190 | | |
1191 | 989k | convolve_2d_sr_hor_tap_func_table[tap_x]( |
1192 | 989k | src_ptr, src_stride, w, hh, filter_params_x, subpel_x_q4, im_block); |
1193 | | |
1194 | | // vertical filter |
1195 | 989k | convolve_2d_sr_ver_tap_func_table[tap_y - (subpel_y_q4 == 8)]( |
1196 | 989k | im_block, w, h, filter_params_y, subpel_y_q4, dst, dst_stride); |
1197 | 989k | } |
1198 | | |
1199 | | #endif // THIRD_PARTY_SVT_AV1_CONVOLVE_2D_AVX2_H_ |