/src/aom/av1/common/x86/warp_plane_avx2.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2019, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <immintrin.h> |
13 | | #include "config/av1_rtcd.h" |
14 | | #include "av1/common/warped_motion.h" |
15 | | #include "aom_dsp/x86/synonyms.h" |
16 | | |
17 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask01_avx2[32]) = { |
18 | | 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, |
19 | | 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 |
20 | | }; |
21 | | |
22 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask23_avx2[32]) = { |
23 | | 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, |
24 | | 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3 |
25 | | }; |
26 | | |
27 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask45_avx2[32]) = { |
28 | | 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, |
29 | | 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5 |
30 | | }; |
31 | | |
32 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_alpha0_mask67_avx2[32]) = { |
33 | | 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, |
34 | | 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7 |
35 | | }; |
36 | | |
37 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask0_avx2[32]) = { |
38 | | 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, |
39 | | 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 |
40 | | }; |
41 | | |
42 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask1_avx2[32]) = { |
43 | | 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, |
44 | | 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7 |
45 | | }; |
46 | | |
47 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask2_avx2[32]) = { |
48 | | 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, |
49 | | 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11 |
50 | | }; |
51 | | |
52 | | DECLARE_ALIGNED(32, static const uint8_t, shuffle_gamma0_mask3_avx2[32]) = { |
53 | | 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, |
54 | | 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15 |
55 | | }; |
56 | | |
57 | | DECLARE_ALIGNED(32, static const uint8_t, |
58 | | shuffle_src0[32]) = { 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3, |
59 | | 5, 5, 7, 7, 9, 0, 2, 2, 4, 4, 6, |
60 | | 6, 8, 1, 3, 3, 5, 5, 7, 7, 9 }; |
61 | | |
62 | | DECLARE_ALIGNED(32, static const uint8_t, |
63 | | shuffle_src1[32]) = { 4, 6, 6, 8, 8, 10, 10, 12, 5, 7, 7, |
64 | | 9, 9, 11, 11, 13, 4, 6, 6, 8, 8, 10, |
65 | | 10, 12, 5, 7, 7, 9, 9, 11, 11, 13 }; |
66 | | |
67 | | DECLARE_ALIGNED(32, static const uint8_t, |
68 | | shuffle_src2[32]) = { 1, 3, 3, 5, 5, 7, 7, 9, 2, 4, 4, |
69 | | 6, 6, 8, 8, 10, 1, 3, 3, 5, 5, 7, |
70 | | 7, 9, 2, 4, 4, 6, 6, 8, 8, 10 }; |
71 | | |
72 | | DECLARE_ALIGNED(32, static const uint8_t, |
73 | | shuffle_src3[32]) = { 5, 7, 7, 9, 9, 11, 11, 13, 6, 8, 8, |
74 | | 10, 10, 12, 12, 14, 5, 7, 7, 9, 9, 11, |
75 | | 11, 13, 6, 8, 8, 10, 10, 12, 12, 14 }; |
76 | | |
77 | | static inline void filter_src_pixels_avx2(const __m256i src, __m256i *horz_out, |
78 | | __m256i *coeff, |
79 | | const __m256i *shuffle_src, |
80 | | const __m256i *round_const, |
81 | 14.0M | const __m128i *shift, int row) { |
82 | 14.0M | const __m256i src_0 = _mm256_shuffle_epi8(src, shuffle_src[0]); |
83 | 14.0M | const __m256i src_1 = _mm256_shuffle_epi8(src, shuffle_src[1]); |
84 | 14.0M | const __m256i src_2 = _mm256_shuffle_epi8(src, shuffle_src[2]); |
85 | 14.0M | const __m256i src_3 = _mm256_shuffle_epi8(src, shuffle_src[3]); |
86 | | |
87 | 14.0M | const __m256i res_02 = _mm256_maddubs_epi16(src_0, coeff[0]); |
88 | 14.0M | const __m256i res_46 = _mm256_maddubs_epi16(src_1, coeff[1]); |
89 | 14.0M | const __m256i res_13 = _mm256_maddubs_epi16(src_2, coeff[2]); |
90 | 14.0M | const __m256i res_57 = _mm256_maddubs_epi16(src_3, coeff[3]); |
91 | | |
92 | 14.0M | const __m256i res_even = _mm256_add_epi16(res_02, res_46); |
93 | 14.0M | const __m256i res_odd = _mm256_add_epi16(res_13, res_57); |
94 | 14.0M | const __m256i res = |
95 | 14.0M | _mm256_add_epi16(_mm256_add_epi16(res_even, res_odd), *round_const); |
96 | 14.0M | horz_out[row] = _mm256_srl_epi16(res, *shift); |
97 | 14.0M | } |
98 | | |
99 | | static inline void prepare_horizontal_filter_coeff_avx2(int alpha, int beta, |
100 | | int sx, |
101 | 3.87M | __m256i *coeff) { |
102 | 3.87M | __m128i tmp_0 = _mm_loadl_epi64( |
103 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 0 * alpha)) >> |
104 | 3.87M | WARPEDDIFF_PREC_BITS]); |
105 | 3.87M | __m128i tmp_1 = _mm_loadl_epi64( |
106 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 1 * alpha)) >> |
107 | 3.87M | WARPEDDIFF_PREC_BITS]); |
108 | 3.87M | __m128i tmp_2 = _mm_loadl_epi64( |
109 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 2 * alpha)) >> |
110 | 3.87M | WARPEDDIFF_PREC_BITS]); |
111 | 3.87M | __m128i tmp_3 = _mm_loadl_epi64( |
112 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 3 * alpha)) >> |
113 | 3.87M | WARPEDDIFF_PREC_BITS]); |
114 | | |
115 | 3.87M | __m128i tmp_4 = _mm_loadl_epi64( |
116 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 4 * alpha)) >> |
117 | 3.87M | WARPEDDIFF_PREC_BITS]); |
118 | 3.87M | __m128i tmp_5 = _mm_loadl_epi64( |
119 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 5 * alpha)) >> |
120 | 3.87M | WARPEDDIFF_PREC_BITS]); |
121 | 3.87M | __m128i tmp_6 = _mm_loadl_epi64( |
122 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 6 * alpha)) >> |
123 | 3.87M | WARPEDDIFF_PREC_BITS]); |
124 | 3.87M | __m128i tmp_7 = _mm_loadl_epi64( |
125 | 3.87M | (__m128i *)&av1_filter_8bit[((unsigned)(sx + 7 * alpha)) >> |
126 | 3.87M | WARPEDDIFF_PREC_BITS]); |
127 | | |
128 | 3.87M | __m256i tmp0_256 = _mm256_castsi128_si256(tmp_0); |
129 | 3.87M | __m256i tmp2_256 = _mm256_castsi128_si256(tmp_2); |
130 | 3.87M | __m256i tmp1_256 = _mm256_castsi128_si256(tmp_1); |
131 | 3.87M | __m256i tmp3_256 = _mm256_castsi128_si256(tmp_3); |
132 | | |
133 | 3.87M | __m256i tmp4_256 = _mm256_castsi128_si256(tmp_4); |
134 | 3.87M | __m256i tmp6_256 = _mm256_castsi128_si256(tmp_6); |
135 | 3.87M | __m256i tmp5_256 = _mm256_castsi128_si256(tmp_5); |
136 | 3.87M | __m256i tmp7_256 = _mm256_castsi128_si256(tmp_7); |
137 | | |
138 | 3.87M | __m128i tmp_8 = _mm_loadl_epi64( |
139 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 0 * alpha) >> |
140 | 3.87M | WARPEDDIFF_PREC_BITS]); |
141 | 3.87M | tmp0_256 = _mm256_inserti128_si256(tmp0_256, tmp_8, 1); |
142 | | |
143 | 3.87M | __m128i tmp_9 = _mm_loadl_epi64( |
144 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 1 * alpha) >> |
145 | 3.87M | WARPEDDIFF_PREC_BITS]); |
146 | 3.87M | tmp1_256 = _mm256_inserti128_si256(tmp1_256, tmp_9, 1); |
147 | | |
148 | 3.87M | __m128i tmp_10 = _mm_loadl_epi64( |
149 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 2 * alpha) >> |
150 | 3.87M | WARPEDDIFF_PREC_BITS]); |
151 | 3.87M | tmp2_256 = _mm256_inserti128_si256(tmp2_256, tmp_10, 1); |
152 | | |
153 | 3.87M | __m128i tmp_11 = _mm_loadl_epi64( |
154 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 3 * alpha) >> |
155 | 3.87M | WARPEDDIFF_PREC_BITS]); |
156 | 3.87M | tmp3_256 = _mm256_inserti128_si256(tmp3_256, tmp_11, 1); |
157 | | |
158 | 3.87M | tmp_2 = _mm_loadl_epi64( |
159 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 4 * alpha) >> |
160 | 3.87M | WARPEDDIFF_PREC_BITS]); |
161 | 3.87M | tmp4_256 = _mm256_inserti128_si256(tmp4_256, tmp_2, 1); |
162 | | |
163 | 3.87M | tmp_3 = _mm_loadl_epi64( |
164 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 5 * alpha) >> |
165 | 3.87M | WARPEDDIFF_PREC_BITS]); |
166 | 3.87M | tmp5_256 = _mm256_inserti128_si256(tmp5_256, tmp_3, 1); |
167 | | |
168 | 3.87M | tmp_6 = _mm_loadl_epi64( |
169 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 6 * alpha) >> |
170 | 3.87M | WARPEDDIFF_PREC_BITS]); |
171 | 3.87M | tmp6_256 = _mm256_inserti128_si256(tmp6_256, tmp_6, 1); |
172 | | |
173 | 3.87M | tmp_7 = _mm_loadl_epi64( |
174 | 3.87M | (__m128i *)&av1_filter_8bit[(unsigned)((sx + beta) + 7 * alpha) >> |
175 | 3.87M | WARPEDDIFF_PREC_BITS]); |
176 | 3.87M | tmp7_256 = _mm256_inserti128_si256(tmp7_256, tmp_7, 1); |
177 | | |
178 | 3.87M | const __m256i tmp_12 = _mm256_unpacklo_epi16(tmp0_256, tmp2_256); |
179 | 3.87M | const __m256i tmp_13 = _mm256_unpacklo_epi16(tmp1_256, tmp3_256); |
180 | 3.87M | const __m256i tmp_14 = _mm256_unpacklo_epi16(tmp4_256, tmp6_256); |
181 | 3.87M | const __m256i tmp_15 = _mm256_unpacklo_epi16(tmp5_256, tmp7_256); |
182 | | |
183 | 3.87M | const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14); |
184 | 3.87M | const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14); |
185 | 3.87M | const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15); |
186 | 3.87M | const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15); |
187 | | |
188 | 3.87M | coeff[0] = _mm256_unpacklo_epi64(res_0, res_2); |
189 | 3.87M | coeff[1] = _mm256_unpackhi_epi64(res_0, res_2); |
190 | 3.87M | coeff[2] = _mm256_unpacklo_epi64(res_1, res_3); |
191 | 3.87M | coeff[3] = _mm256_unpackhi_epi64(res_1, res_3); |
192 | 3.87M | } |
193 | | |
194 | | static inline void prepare_horizontal_filter_coeff_beta0_avx2(int alpha, int sx, |
195 | 416k | __m256i *coeff) { |
196 | 416k | __m128i tmp_0 = _mm_loadl_epi64( |
197 | 416k | (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]); |
198 | 416k | __m128i tmp_1 = _mm_loadl_epi64( |
199 | 416k | (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]); |
200 | 416k | __m128i tmp_2 = _mm_loadl_epi64( |
201 | 416k | (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]); |
202 | 416k | __m128i tmp_3 = _mm_loadl_epi64( |
203 | 416k | (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]); |
204 | 416k | __m128i tmp_4 = _mm_loadl_epi64( |
205 | 416k | (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]); |
206 | 416k | __m128i tmp_5 = _mm_loadl_epi64( |
207 | 416k | (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]); |
208 | 416k | __m128i tmp_6 = _mm_loadl_epi64( |
209 | 416k | (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]); |
210 | 416k | __m128i tmp_7 = _mm_loadl_epi64( |
211 | 416k | (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]); |
212 | | |
213 | 416k | tmp_0 = _mm_unpacklo_epi16(tmp_0, tmp_2); |
214 | 416k | tmp_1 = _mm_unpacklo_epi16(tmp_1, tmp_3); |
215 | 416k | tmp_4 = _mm_unpacklo_epi16(tmp_4, tmp_6); |
216 | 416k | tmp_5 = _mm_unpacklo_epi16(tmp_5, tmp_7); |
217 | | |
218 | 416k | const __m256i tmp_12 = _mm256_broadcastsi128_si256(tmp_0); |
219 | 416k | const __m256i tmp_13 = _mm256_broadcastsi128_si256(tmp_1); |
220 | 416k | const __m256i tmp_14 = _mm256_broadcastsi128_si256(tmp_4); |
221 | 416k | const __m256i tmp_15 = _mm256_broadcastsi128_si256(tmp_5); |
222 | | |
223 | 416k | const __m256i res_0 = _mm256_unpacklo_epi32(tmp_12, tmp_14); |
224 | 416k | const __m256i res_1 = _mm256_unpackhi_epi32(tmp_12, tmp_14); |
225 | 416k | const __m256i res_2 = _mm256_unpacklo_epi32(tmp_13, tmp_15); |
226 | 416k | const __m256i res_3 = _mm256_unpackhi_epi32(tmp_13, tmp_15); |
227 | | |
228 | 416k | coeff[0] = _mm256_unpacklo_epi64(res_0, res_2); |
229 | 416k | coeff[1] = _mm256_unpackhi_epi64(res_0, res_2); |
230 | 416k | coeff[2] = _mm256_unpacklo_epi64(res_1, res_3); |
231 | 416k | coeff[3] = _mm256_unpackhi_epi64(res_1, res_3); |
232 | 416k | } |
233 | | |
234 | | static inline void prepare_horizontal_filter_coeff_alpha0_avx2(int beta, int sx, |
235 | 3.67M | __m256i *coeff) { |
236 | 3.67M | const __m128i tmp_0 = |
237 | 3.67M | _mm_loadl_epi64((__m128i *)&av1_filter_8bit[sx >> WARPEDDIFF_PREC_BITS]); |
238 | 3.67M | const __m128i tmp_1 = _mm_loadl_epi64( |
239 | 3.67M | (__m128i *)&av1_filter_8bit[(sx + beta) >> WARPEDDIFF_PREC_BITS]); |
240 | | |
241 | 3.67M | const __m256i res_0 = |
242 | 3.67M | _mm256_inserti128_si256(_mm256_castsi128_si256(tmp_0), tmp_1, 0x1); |
243 | | |
244 | 3.67M | coeff[0] = _mm256_shuffle_epi8( |
245 | 3.67M | res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask01_avx2)); |
246 | 3.67M | coeff[1] = _mm256_shuffle_epi8( |
247 | 3.67M | res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask23_avx2)); |
248 | 3.67M | coeff[2] = _mm256_shuffle_epi8( |
249 | 3.67M | res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask45_avx2)); |
250 | 3.67M | coeff[3] = _mm256_shuffle_epi8( |
251 | 3.67M | res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask67_avx2)); |
252 | 3.67M | } |
253 | | |
254 | | static inline void horizontal_filter_avx2(const __m256i src, __m256i *horz_out, |
255 | | int sx, int alpha, int beta, int row, |
256 | | const __m256i *shuffle_src, |
257 | | const __m256i *round_const, |
258 | 3.87M | const __m128i *shift) { |
259 | 3.87M | __m256i coeff[4]; |
260 | 3.87M | prepare_horizontal_filter_coeff_avx2(alpha, beta, sx, coeff); |
261 | 3.87M | filter_src_pixels_avx2(src, horz_out, coeff, shuffle_src, round_const, shift, |
262 | 3.87M | row); |
263 | 3.87M | } |
264 | | static inline void prepare_horizontal_filter_coeff(int alpha, int sx, |
265 | 558k | __m256i *coeff) { |
266 | 558k | const __m128i tmp_0 = _mm_loadl_epi64( |
267 | 558k | (__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]); |
268 | 558k | const __m128i tmp_1 = _mm_loadl_epi64( |
269 | 558k | (__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]); |
270 | 558k | const __m128i tmp_2 = _mm_loadl_epi64( |
271 | 558k | (__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]); |
272 | 558k | const __m128i tmp_3 = _mm_loadl_epi64( |
273 | 558k | (__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]); |
274 | 558k | const __m128i tmp_4 = _mm_loadl_epi64( |
275 | 558k | (__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]); |
276 | 558k | const __m128i tmp_5 = _mm_loadl_epi64( |
277 | 558k | (__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]); |
278 | 558k | const __m128i tmp_6 = _mm_loadl_epi64( |
279 | 558k | (__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]); |
280 | 558k | const __m128i tmp_7 = _mm_loadl_epi64( |
281 | 558k | (__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]); |
282 | | |
283 | 558k | const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2); |
284 | 558k | const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3); |
285 | 558k | const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6); |
286 | 558k | const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7); |
287 | | |
288 | 558k | const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10); |
289 | 558k | const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10); |
290 | 558k | const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11); |
291 | 558k | const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11); |
292 | | |
293 | 558k | coeff[0] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_12, tmp_14)); |
294 | 558k | coeff[1] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_12, tmp_14)); |
295 | 558k | coeff[2] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_13, tmp_15)); |
296 | 558k | coeff[3] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_13, tmp_15)); |
297 | 558k | } |
298 | | |
299 | | static inline void warp_horizontal_filter_avx2( |
300 | | const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4, |
301 | | int32_t sx4, int alpha, int beta, int p_height, int height, int i, |
302 | | const __m256i *round_const, const __m128i *shift, |
303 | 480k | const __m256i *shuffle_src) { |
304 | 480k | int k, iy, sx, row = 0; |
305 | 480k | __m256i coeff[4]; |
306 | 3.81M | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
307 | 3.32M | iy = iy4 + k; |
308 | 3.32M | iy = clamp(iy, 0, height - 1); |
309 | 3.32M | const __m128i src_0 = |
310 | 3.32M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
311 | 3.32M | iy = iy4 + k + 1; |
312 | 3.32M | iy = clamp(iy, 0, height - 1); |
313 | 3.32M | const __m128i src_1 = |
314 | 3.32M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
315 | 3.32M | const __m256i src_01 = |
316 | 3.32M | _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1); |
317 | 3.32M | sx = sx4 + beta * (k + 4); |
318 | 3.32M | horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, shuffle_src, |
319 | 3.32M | round_const, shift); |
320 | 3.32M | row += 1; |
321 | 3.32M | } |
322 | 480k | iy = iy4 + k; |
323 | 480k | iy = clamp(iy, 0, height - 1); |
324 | 480k | const __m256i src_01 = _mm256_castsi128_si256( |
325 | 480k | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7))); |
326 | 480k | sx = sx4 + beta * (k + 4); |
327 | 480k | prepare_horizontal_filter_coeff(alpha, sx, coeff); |
328 | 480k | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
329 | 480k | shift, row); |
330 | 480k | } |
331 | | |
332 | | static inline void warp_horizontal_filter_alpha0_avx2( |
333 | | const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4, |
334 | | int32_t sx4, int alpha, int beta, int p_height, int height, int i, |
335 | | const __m256i *round_const, const __m128i *shift, |
336 | 414k | const __m256i *shuffle_src) { |
337 | 414k | (void)alpha; |
338 | 414k | int k, iy, sx, row = 0; |
339 | 414k | __m256i coeff[4]; |
340 | 3.29M | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
341 | 2.88M | iy = iy4 + k; |
342 | 2.88M | iy = clamp(iy, 0, height - 1); |
343 | 2.88M | const __m128i src_0 = |
344 | 2.88M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
345 | 2.88M | iy = iy4 + k + 1; |
346 | 2.88M | iy = clamp(iy, 0, height - 1); |
347 | 2.88M | const __m128i src_1 = |
348 | 2.88M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
349 | 2.88M | const __m256i src_01 = |
350 | 2.88M | _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1); |
351 | 2.88M | sx = sx4 + beta * (k + 4); |
352 | 2.88M | prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff); |
353 | 2.88M | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
354 | 2.88M | shift, row); |
355 | 2.88M | row += 1; |
356 | 2.88M | } |
357 | 414k | iy = iy4 + k; |
358 | 414k | iy = clamp(iy, 0, height - 1); |
359 | 414k | const __m256i src_01 = _mm256_castsi128_si256( |
360 | 414k | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7))); |
361 | 414k | sx = sx4 + beta * (k + 4); |
362 | 414k | prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx, coeff); |
363 | 414k | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
364 | 414k | shift, row); |
365 | 414k | } |
366 | | |
367 | | static inline void warp_horizontal_filter_beta0_avx2( |
368 | | const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4, |
369 | | int32_t sx4, int alpha, int beta, int p_height, int height, int i, |
370 | | const __m256i *round_const, const __m128i *shift, |
371 | 416k | const __m256i *shuffle_src) { |
372 | 416k | (void)beta; |
373 | 416k | int k, iy, row = 0; |
374 | 416k | __m256i coeff[4]; |
375 | 416k | prepare_horizontal_filter_coeff_beta0_avx2(alpha, sx4, coeff); |
376 | 3.32M | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
377 | 2.91M | iy = iy4 + k; |
378 | 2.91M | iy = clamp(iy, 0, height - 1); |
379 | 2.91M | const __m128i src_0 = |
380 | 2.91M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
381 | 2.91M | iy = iy4 + k + 1; |
382 | 2.91M | iy = clamp(iy, 0, height - 1); |
383 | 2.91M | const __m128i src_1 = |
384 | 2.91M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
385 | 2.91M | const __m256i src_01 = |
386 | 2.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1); |
387 | 2.91M | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
388 | 2.91M | shift, row); |
389 | 2.91M | row += 1; |
390 | 2.91M | } |
391 | 416k | iy = iy4 + k; |
392 | 416k | iy = clamp(iy, 0, height - 1); |
393 | 416k | const __m256i src_01 = _mm256_castsi128_si256( |
394 | 416k | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7))); |
395 | 416k | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
396 | 416k | shift, row); |
397 | 416k | } |
398 | | |
399 | | static inline void warp_horizontal_filter_alpha0_beta0_avx2( |
400 | | const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4, |
401 | | int32_t sx4, int alpha, int beta, int p_height, int height, int i, |
402 | | const __m256i *round_const, const __m128i *shift, |
403 | 383k | const __m256i *shuffle_src) { |
404 | 383k | (void)alpha; |
405 | 383k | int k, iy, row = 0; |
406 | 383k | __m256i coeff[4]; |
407 | 383k | prepare_horizontal_filter_coeff_alpha0_avx2(beta, sx4, coeff); |
408 | 3.02M | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
409 | 2.64M | iy = iy4 + k; |
410 | 2.64M | iy = clamp(iy, 0, height - 1); |
411 | 2.64M | const __m128i src0 = |
412 | 2.64M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
413 | 2.64M | iy = iy4 + k + 1; |
414 | 2.64M | iy = clamp(iy, 0, height - 1); |
415 | 2.64M | const __m128i src1 = |
416 | 2.64M | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
417 | 2.64M | const __m256i src_01 = |
418 | 2.64M | _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1); |
419 | 2.64M | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
420 | 2.64M | shift, row); |
421 | 2.64M | row += 1; |
422 | 2.64M | } |
423 | 383k | iy = iy4 + k; |
424 | 383k | iy = clamp(iy, 0, height - 1); |
425 | 383k | const __m256i src_01 = _mm256_castsi128_si256( |
426 | 383k | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7))); |
427 | 383k | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const, |
428 | 383k | shift, row); |
429 | 383k | } |
430 | | |
431 | | static inline void unpack_weights_and_set_round_const_avx2( |
432 | | ConvolveParams *conv_params, const int round_bits, const int offset_bits, |
433 | 238k | __m256i *res_sub_const, __m256i *round_bits_const, __m256i *wt) { |
434 | 238k | *res_sub_const = |
435 | 238k | _mm256_set1_epi16(-(1 << (offset_bits - conv_params->round_1)) - |
436 | 238k | (1 << (offset_bits - conv_params->round_1 - 1))); |
437 | 238k | *round_bits_const = _mm256_set1_epi16(((1 << round_bits) >> 1)); |
438 | | |
439 | 238k | const int w0 = conv_params->fwd_offset; |
440 | 238k | const int w1 = conv_params->bck_offset; |
441 | 238k | const __m256i wt0 = _mm256_set1_epi16((short)w0); |
442 | 238k | const __m256i wt1 = _mm256_set1_epi16((short)w1); |
443 | 238k | *wt = _mm256_unpacklo_epi16(wt0, wt1); |
444 | 238k | } |
445 | | |
446 | | static inline void prepare_vertical_filter_coeffs_avx2(int gamma, int delta, |
447 | | int sy, |
448 | 3.91M | __m256i *coeffs) { |
449 | 3.91M | __m128i filt_00 = |
450 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
451 | 3.91M | ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS))); |
452 | 3.91M | __m128i filt_01 = |
453 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
454 | 3.91M | ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS))); |
455 | 3.91M | __m128i filt_02 = |
456 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
457 | 3.91M | ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS))); |
458 | 3.91M | __m128i filt_03 = |
459 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
460 | 3.91M | ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS))); |
461 | | |
462 | 3.91M | __m128i filt_10 = _mm_loadu_si128( |
463 | 3.91M | (__m128i *)(av1_warped_filter + |
464 | 3.91M | (((sy + delta) + 0 * gamma) >> WARPEDDIFF_PREC_BITS))); |
465 | 3.91M | __m128i filt_11 = _mm_loadu_si128( |
466 | 3.91M | (__m128i *)(av1_warped_filter + |
467 | 3.91M | (((sy + delta) + 2 * gamma) >> WARPEDDIFF_PREC_BITS))); |
468 | 3.91M | __m128i filt_12 = _mm_loadu_si128( |
469 | 3.91M | (__m128i *)(av1_warped_filter + |
470 | 3.91M | (((sy + delta) + 4 * gamma) >> WARPEDDIFF_PREC_BITS))); |
471 | 3.91M | __m128i filt_13 = _mm_loadu_si128( |
472 | 3.91M | (__m128i *)(av1_warped_filter + |
473 | 3.91M | (((sy + delta) + 6 * gamma) >> WARPEDDIFF_PREC_BITS))); |
474 | | |
475 | 3.91M | __m256i filt_0 = |
476 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1); |
477 | 3.91M | __m256i filt_1 = |
478 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1); |
479 | 3.91M | __m256i filt_2 = |
480 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1); |
481 | 3.91M | __m256i filt_3 = |
482 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1); |
483 | | |
484 | 3.91M | __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1); |
485 | 3.91M | __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3); |
486 | 3.91M | __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1); |
487 | 3.91M | __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3); |
488 | | |
489 | 3.91M | coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1); |
490 | 3.91M | coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1); |
491 | 3.91M | coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3); |
492 | 3.91M | coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3); |
493 | | |
494 | 3.91M | filt_00 = |
495 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
496 | 3.91M | ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS))); |
497 | 3.91M | filt_01 = |
498 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
499 | 3.91M | ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS))); |
500 | 3.91M | filt_02 = |
501 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
502 | 3.91M | ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS))); |
503 | 3.91M | filt_03 = |
504 | 3.91M | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
505 | 3.91M | ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS))); |
506 | | |
507 | 3.91M | filt_10 = _mm_loadu_si128( |
508 | 3.91M | (__m128i *)(av1_warped_filter + |
509 | 3.91M | (((sy + delta) + 1 * gamma) >> WARPEDDIFF_PREC_BITS))); |
510 | 3.91M | filt_11 = _mm_loadu_si128( |
511 | 3.91M | (__m128i *)(av1_warped_filter + |
512 | 3.91M | (((sy + delta) + 3 * gamma) >> WARPEDDIFF_PREC_BITS))); |
513 | 3.91M | filt_12 = _mm_loadu_si128( |
514 | 3.91M | (__m128i *)(av1_warped_filter + |
515 | 3.91M | (((sy + delta) + 5 * gamma) >> WARPEDDIFF_PREC_BITS))); |
516 | 3.91M | filt_13 = _mm_loadu_si128( |
517 | 3.91M | (__m128i *)(av1_warped_filter + |
518 | 3.91M | (((sy + delta) + 7 * gamma) >> WARPEDDIFF_PREC_BITS))); |
519 | | |
520 | 3.91M | filt_0 = |
521 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_00), filt_10, 0x1); |
522 | 3.91M | filt_1 = |
523 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_01), filt_11, 0x1); |
524 | 3.91M | filt_2 = |
525 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_02), filt_12, 0x1); |
526 | 3.91M | filt_3 = |
527 | 3.91M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_03), filt_13, 0x1); |
528 | | |
529 | 3.91M | res_0 = _mm256_unpacklo_epi32(filt_0, filt_1); |
530 | 3.91M | res_1 = _mm256_unpacklo_epi32(filt_2, filt_3); |
531 | 3.91M | res_2 = _mm256_unpackhi_epi32(filt_0, filt_1); |
532 | 3.91M | res_3 = _mm256_unpackhi_epi32(filt_2, filt_3); |
533 | | |
534 | 3.91M | coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1); |
535 | 3.91M | coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1); |
536 | 3.91M | coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3); |
537 | 3.91M | coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3); |
538 | 3.91M | } |
539 | | |
540 | | static inline void prepare_vertical_filter_coeffs_delta0_avx2(int gamma, int sy, |
541 | 172k | __m256i *coeffs) { |
542 | 172k | __m128i filt_00 = |
543 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
544 | 172k | ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS))); |
545 | 172k | __m128i filt_01 = |
546 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
547 | 172k | ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS))); |
548 | 172k | __m128i filt_02 = |
549 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
550 | 172k | ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS))); |
551 | 172k | __m128i filt_03 = |
552 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
553 | 172k | ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS))); |
554 | | |
555 | 172k | __m256i filt_0 = _mm256_broadcastsi128_si256(filt_00); |
556 | 172k | __m256i filt_1 = _mm256_broadcastsi128_si256(filt_01); |
557 | 172k | __m256i filt_2 = _mm256_broadcastsi128_si256(filt_02); |
558 | 172k | __m256i filt_3 = _mm256_broadcastsi128_si256(filt_03); |
559 | | |
560 | 172k | __m256i res_0 = _mm256_unpacklo_epi32(filt_0, filt_1); |
561 | 172k | __m256i res_1 = _mm256_unpacklo_epi32(filt_2, filt_3); |
562 | 172k | __m256i res_2 = _mm256_unpackhi_epi32(filt_0, filt_1); |
563 | 172k | __m256i res_3 = _mm256_unpackhi_epi32(filt_2, filt_3); |
564 | | |
565 | 172k | coeffs[0] = _mm256_unpacklo_epi64(res_0, res_1); |
566 | 172k | coeffs[1] = _mm256_unpackhi_epi64(res_0, res_1); |
567 | 172k | coeffs[2] = _mm256_unpacklo_epi64(res_2, res_3); |
568 | 172k | coeffs[3] = _mm256_unpackhi_epi64(res_2, res_3); |
569 | | |
570 | 172k | filt_00 = |
571 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
572 | 172k | ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS))); |
573 | 172k | filt_01 = |
574 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
575 | 172k | ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS))); |
576 | 172k | filt_02 = |
577 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
578 | 172k | ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS))); |
579 | 172k | filt_03 = |
580 | 172k | _mm_loadu_si128((__m128i *)(av1_warped_filter + |
581 | 172k | ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS))); |
582 | | |
583 | 172k | filt_0 = _mm256_broadcastsi128_si256(filt_00); |
584 | 172k | filt_1 = _mm256_broadcastsi128_si256(filt_01); |
585 | 172k | filt_2 = _mm256_broadcastsi128_si256(filt_02); |
586 | 172k | filt_3 = _mm256_broadcastsi128_si256(filt_03); |
587 | | |
588 | 172k | res_0 = _mm256_unpacklo_epi32(filt_0, filt_1); |
589 | 172k | res_1 = _mm256_unpacklo_epi32(filt_2, filt_3); |
590 | 172k | res_2 = _mm256_unpackhi_epi32(filt_0, filt_1); |
591 | 172k | res_3 = _mm256_unpackhi_epi32(filt_2, filt_3); |
592 | | |
593 | 172k | coeffs[4] = _mm256_unpacklo_epi64(res_0, res_1); |
594 | 172k | coeffs[5] = _mm256_unpackhi_epi64(res_0, res_1); |
595 | 172k | coeffs[6] = _mm256_unpacklo_epi64(res_2, res_3); |
596 | 172k | coeffs[7] = _mm256_unpackhi_epi64(res_2, res_3); |
597 | 172k | } |
598 | | |
599 | | static inline void prepare_vertical_filter_coeffs_gamma0_avx2(int delta, int sy, |
600 | 2.21M | __m256i *coeffs) { |
601 | 2.21M | const __m128i filt_0 = _mm_loadu_si128( |
602 | 2.21M | (__m128i *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS))); |
603 | 2.21M | const __m128i filt_1 = _mm_loadu_si128( |
604 | 2.21M | (__m128i *)(av1_warped_filter + ((sy + delta) >> WARPEDDIFF_PREC_BITS))); |
605 | | |
606 | 2.21M | __m256i res_0 = |
607 | 2.21M | _mm256_inserti128_si256(_mm256_castsi128_si256(filt_0), filt_1, 0x1); |
608 | | |
609 | 2.21M | coeffs[0] = _mm256_shuffle_epi8( |
610 | 2.21M | res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask0_avx2)); |
611 | 2.21M | coeffs[1] = _mm256_shuffle_epi8( |
612 | 2.21M | res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask1_avx2)); |
613 | 2.21M | coeffs[2] = _mm256_shuffle_epi8( |
614 | 2.21M | res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask2_avx2)); |
615 | 2.21M | coeffs[3] = _mm256_shuffle_epi8( |
616 | 2.21M | res_0, _mm256_load_si256((__m256i *)shuffle_gamma0_mask3_avx2)); |
617 | | |
618 | 2.21M | coeffs[4] = coeffs[0]; |
619 | 2.21M | coeffs[5] = coeffs[1]; |
620 | 2.21M | coeffs[6] = coeffs[2]; |
621 | 2.21M | coeffs[7] = coeffs[3]; |
622 | 2.21M | } |
623 | | |
624 | | static inline void filter_src_pixels_vertical_avx2(__m256i *horz_out, |
625 | | __m256i *src, |
626 | | __m256i *coeffs, |
627 | | __m256i *res_lo, |
628 | 8.07M | __m256i *res_hi, int row) { |
629 | 8.07M | const __m256i src_6 = horz_out[row + 3]; |
630 | 8.07M | const __m256i src_7 = |
631 | 8.07M | _mm256_permute2x128_si256(horz_out[row + 3], horz_out[row + 4], 0x21); |
632 | | |
633 | 8.07M | src[6] = _mm256_unpacklo_epi16(src_6, src_7); |
634 | | |
635 | 8.07M | const __m256i res_0 = _mm256_madd_epi16(src[0], coeffs[0]); |
636 | 8.07M | const __m256i res_2 = _mm256_madd_epi16(src[2], coeffs[1]); |
637 | 8.07M | const __m256i res_4 = _mm256_madd_epi16(src[4], coeffs[2]); |
638 | 8.07M | const __m256i res_6 = _mm256_madd_epi16(src[6], coeffs[3]); |
639 | | |
640 | 8.07M | const __m256i res_even = _mm256_add_epi32(_mm256_add_epi32(res_0, res_2), |
641 | 8.07M | _mm256_add_epi32(res_4, res_6)); |
642 | | |
643 | 8.07M | src[7] = _mm256_unpackhi_epi16(src_6, src_7); |
644 | | |
645 | 8.07M | const __m256i res_1 = _mm256_madd_epi16(src[1], coeffs[4]); |
646 | 8.07M | const __m256i res_3 = _mm256_madd_epi16(src[3], coeffs[5]); |
647 | 8.07M | const __m256i res_5 = _mm256_madd_epi16(src[5], coeffs[6]); |
648 | 8.07M | const __m256i res_7 = _mm256_madd_epi16(src[7], coeffs[7]); |
649 | | |
650 | 8.07M | const __m256i res_odd = _mm256_add_epi32(_mm256_add_epi32(res_1, res_3), |
651 | 8.07M | _mm256_add_epi32(res_5, res_7)); |
652 | | |
653 | | // Rearrange pixels back into the order 0 ... 7 |
654 | 8.07M | *res_lo = _mm256_unpacklo_epi32(res_even, res_odd); |
655 | 8.07M | *res_hi = _mm256_unpackhi_epi32(res_even, res_odd); |
656 | 8.07M | } |
657 | | |
658 | | static inline void store_vertical_filter_output_avx2( |
659 | | const __m256i *res_lo, const __m256i *res_hi, const __m256i *res_add_const, |
660 | | const __m256i *wt, const __m256i *res_sub_const, |
661 | | const __m256i *round_bits_const, uint8_t *pred, ConvolveParams *conv_params, |
662 | | int i, int j, int k, const int reduce_bits_vert, int p_stride, int p_width, |
663 | 8.07M | const int round_bits) { |
664 | 8.07M | __m256i res_lo_1 = *res_lo; |
665 | 8.07M | __m256i res_hi_1 = *res_hi; |
666 | | |
667 | 8.07M | if (conv_params->is_compound) { |
668 | 212k | __m128i *const p_0 = |
669 | 212k | (__m128i *)&conv_params->dst[(i + k + 4) * conv_params->dst_stride + j]; |
670 | 212k | __m128i *const p_1 = |
671 | 212k | (__m128i *)&conv_params |
672 | 212k | ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j]; |
673 | | |
674 | 212k | res_lo_1 = _mm256_srai_epi32(_mm256_add_epi32(res_lo_1, *res_add_const), |
675 | 212k | reduce_bits_vert); |
676 | | |
677 | 212k | const __m256i temp_lo_16 = _mm256_packus_epi32(res_lo_1, res_lo_1); |
678 | 212k | __m256i res_lo_16; |
679 | 212k | if (conv_params->do_average) { |
680 | 91.0k | __m128i *const dst8_0 = (__m128i *)&pred[(i + k + 4) * p_stride + j]; |
681 | 91.0k | __m128i *const dst8_1 = |
682 | 91.0k | (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j]; |
683 | 91.0k | const __m128i p_16_0 = _mm_loadl_epi64(p_0); |
684 | 91.0k | const __m128i p_16_1 = _mm_loadl_epi64(p_1); |
685 | 91.0k | const __m256i p_16 = |
686 | 91.0k | _mm256_inserti128_si256(_mm256_castsi128_si256(p_16_0), p_16_1, 1); |
687 | 91.0k | if (conv_params->use_dist_wtd_comp_avg) { |
688 | 48.3k | const __m256i p_16_lo = _mm256_unpacklo_epi16(p_16, temp_lo_16); |
689 | 48.3k | const __m256i wt_res_lo = _mm256_madd_epi16(p_16_lo, *wt); |
690 | 48.3k | const __m256i shifted_32 = |
691 | 48.3k | _mm256_srai_epi32(wt_res_lo, DIST_PRECISION_BITS); |
692 | 48.3k | res_lo_16 = _mm256_packus_epi32(shifted_32, shifted_32); |
693 | 48.3k | } else { |
694 | 42.7k | res_lo_16 = _mm256_srai_epi16(_mm256_add_epi16(p_16, temp_lo_16), 1); |
695 | 42.7k | } |
696 | 91.0k | res_lo_16 = _mm256_add_epi16(res_lo_16, *res_sub_const); |
697 | 91.0k | res_lo_16 = _mm256_srai_epi16( |
698 | 91.0k | _mm256_add_epi16(res_lo_16, *round_bits_const), round_bits); |
699 | 91.0k | const __m256i res_8_lo = _mm256_packus_epi16(res_lo_16, res_lo_16); |
700 | 91.0k | const __m128i res_8_lo_0 = _mm256_castsi256_si128(res_8_lo); |
701 | 91.0k | const __m128i res_8_lo_1 = _mm256_extracti128_si256(res_8_lo, 1); |
702 | 91.0k | *(int *)dst8_0 = _mm_cvtsi128_si32(res_8_lo_0); |
703 | 91.0k | *(int *)dst8_1 = _mm_cvtsi128_si32(res_8_lo_1); |
704 | 121k | } else { |
705 | 121k | const __m128i temp_lo_16_0 = _mm256_castsi256_si128(temp_lo_16); |
706 | 121k | const __m128i temp_lo_16_1 = _mm256_extracti128_si256(temp_lo_16, 1); |
707 | 121k | _mm_storel_epi64(p_0, temp_lo_16_0); |
708 | 121k | _mm_storel_epi64(p_1, temp_lo_16_1); |
709 | 121k | } |
710 | 212k | if (p_width > 4) { |
711 | 212k | __m128i *const p4_0 = |
712 | 212k | (__m128i *)&conv_params |
713 | 212k | ->dst[(i + k + 4) * conv_params->dst_stride + j + 4]; |
714 | 212k | __m128i *const p4_1 = |
715 | 212k | (__m128i *)&conv_params |
716 | 212k | ->dst[(i + (k + 1) + 4) * conv_params->dst_stride + j + 4]; |
717 | 212k | res_hi_1 = _mm256_srai_epi32(_mm256_add_epi32(res_hi_1, *res_add_const), |
718 | 212k | reduce_bits_vert); |
719 | 212k | const __m256i temp_hi_16 = _mm256_packus_epi32(res_hi_1, res_hi_1); |
720 | 212k | __m256i res_hi_16; |
721 | 212k | if (conv_params->do_average) { |
722 | 91.0k | __m128i *const dst8_4_0 = |
723 | 91.0k | (__m128i *)&pred[(i + k + 4) * p_stride + j + 4]; |
724 | 91.0k | __m128i *const dst8_4_1 = |
725 | 91.0k | (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j + 4]; |
726 | 91.0k | const __m128i p4_16_0 = _mm_loadl_epi64(p4_0); |
727 | 91.0k | const __m128i p4_16_1 = _mm_loadl_epi64(p4_1); |
728 | 91.0k | const __m256i p4_16 = _mm256_inserti128_si256( |
729 | 91.0k | _mm256_castsi128_si256(p4_16_0), p4_16_1, 1); |
730 | 91.0k | if (conv_params->use_dist_wtd_comp_avg) { |
731 | 48.3k | const __m256i p_16_hi = _mm256_unpacklo_epi16(p4_16, temp_hi_16); |
732 | 48.3k | const __m256i wt_res_hi = _mm256_madd_epi16(p_16_hi, *wt); |
733 | 48.3k | const __m256i shifted_32 = |
734 | 48.3k | _mm256_srai_epi32(wt_res_hi, DIST_PRECISION_BITS); |
735 | 48.3k | res_hi_16 = _mm256_packus_epi32(shifted_32, shifted_32); |
736 | 48.3k | } else { |
737 | 42.7k | res_hi_16 = _mm256_srai_epi16(_mm256_add_epi16(p4_16, temp_hi_16), 1); |
738 | 42.7k | } |
739 | 91.0k | res_hi_16 = _mm256_add_epi16(res_hi_16, *res_sub_const); |
740 | 91.0k | res_hi_16 = _mm256_srai_epi16( |
741 | 91.0k | _mm256_add_epi16(res_hi_16, *round_bits_const), round_bits); |
742 | 91.0k | __m256i res_8_hi = _mm256_packus_epi16(res_hi_16, res_hi_16); |
743 | 91.0k | const __m128i res_8_hi_0 = _mm256_castsi256_si128(res_8_hi); |
744 | 91.0k | const __m128i res_8_hi_1 = _mm256_extracti128_si256(res_8_hi, 1); |
745 | 91.0k | *(int *)dst8_4_0 = _mm_cvtsi128_si32(res_8_hi_0); |
746 | 91.0k | *(int *)dst8_4_1 = _mm_cvtsi128_si32(res_8_hi_1); |
747 | 121k | } else { |
748 | 121k | const __m128i temp_hi_16_0 = _mm256_castsi256_si128(temp_hi_16); |
749 | 121k | const __m128i temp_hi_16_1 = _mm256_extracti128_si256(temp_hi_16, 1); |
750 | 121k | _mm_storel_epi64(p4_0, temp_hi_16_0); |
751 | 121k | _mm_storel_epi64(p4_1, temp_hi_16_1); |
752 | 121k | } |
753 | 212k | } |
754 | 7.86M | } else { |
755 | 7.86M | const __m256i res_lo_round = _mm256_srai_epi32( |
756 | 7.86M | _mm256_add_epi32(res_lo_1, *res_add_const), reduce_bits_vert); |
757 | 7.86M | const __m256i res_hi_round = _mm256_srai_epi32( |
758 | 7.86M | _mm256_add_epi32(res_hi_1, *res_add_const), reduce_bits_vert); |
759 | | |
760 | 7.86M | const __m256i res_16bit = _mm256_packs_epi32(res_lo_round, res_hi_round); |
761 | 7.86M | const __m256i res_8bit = _mm256_packus_epi16(res_16bit, res_16bit); |
762 | 7.86M | const __m128i res_8bit0 = _mm256_castsi256_si128(res_8bit); |
763 | 7.86M | const __m128i res_8bit1 = _mm256_extracti128_si256(res_8bit, 1); |
764 | | |
765 | | // Store, blending with 'pred' if needed |
766 | 7.86M | __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j]; |
767 | 7.86M | __m128i *const p1 = (__m128i *)&pred[(i + (k + 1) + 4) * p_stride + j]; |
768 | | |
769 | 7.86M | if (p_width == 4) { |
770 | 0 | *(int *)p = _mm_cvtsi128_si32(res_8bit0); |
771 | 0 | *(int *)p1 = _mm_cvtsi128_si32(res_8bit1); |
772 | 7.86M | } else { |
773 | 7.86M | _mm_storel_epi64(p, res_8bit0); |
774 | 7.86M | _mm_storel_epi64(p1, res_8bit1); |
775 | 7.86M | } |
776 | 7.86M | } |
777 | 8.07M | } |
778 | | |
779 | | static inline void warp_vertical_filter_avx2( |
780 | | uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params, |
781 | | int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width, |
782 | | int i, int j, int sy4, const int reduce_bits_vert, |
783 | | const __m256i *res_add_const, const int round_bits, |
784 | | const __m256i *res_sub_const, const __m256i *round_bits_const, |
785 | 979k | const __m256i *wt) { |
786 | 979k | int k, row = 0; |
787 | 979k | __m256i src[8]; |
788 | 979k | const __m256i src_0 = horz_out[0]; |
789 | 979k | const __m256i src_1 = |
790 | 979k | _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21); |
791 | 979k | const __m256i src_2 = horz_out[1]; |
792 | 979k | const __m256i src_3 = |
793 | 979k | _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21); |
794 | 979k | const __m256i src_4 = horz_out[2]; |
795 | 979k | const __m256i src_5 = |
796 | 979k | _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21); |
797 | | |
798 | 979k | src[0] = _mm256_unpacklo_epi16(src_0, src_1); |
799 | 979k | src[2] = _mm256_unpacklo_epi16(src_2, src_3); |
800 | 979k | src[4] = _mm256_unpacklo_epi16(src_4, src_5); |
801 | | |
802 | 979k | src[1] = _mm256_unpackhi_epi16(src_0, src_1); |
803 | 979k | src[3] = _mm256_unpackhi_epi16(src_2, src_3); |
804 | 979k | src[5] = _mm256_unpackhi_epi16(src_4, src_5); |
805 | | |
806 | 4.89M | for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) { |
807 | 3.91M | int sy = sy4 + delta * (k + 4); |
808 | 3.91M | __m256i coeffs[8]; |
809 | 3.91M | prepare_vertical_filter_coeffs_avx2(gamma, delta, sy, coeffs); |
810 | 3.91M | __m256i res_lo, res_hi; |
811 | 3.91M | filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi, |
812 | 3.91M | row); |
813 | 3.91M | store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt, |
814 | 3.91M | res_sub_const, round_bits_const, pred, |
815 | 3.91M | conv_params, i, j, k, reduce_bits_vert, |
816 | 3.91M | p_stride, p_width, round_bits); |
817 | 3.91M | src[0] = src[2]; |
818 | 3.91M | src[2] = src[4]; |
819 | 3.91M | src[4] = src[6]; |
820 | 3.91M | src[1] = src[3]; |
821 | 3.91M | src[3] = src[5]; |
822 | 3.91M | src[5] = src[7]; |
823 | | |
824 | 3.91M | row += 1; |
825 | 3.91M | } |
826 | 979k | } |
827 | | |
828 | | static inline void warp_vertical_filter_gamma0_avx2( |
829 | | uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params, |
830 | | int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width, |
831 | | int i, int j, int sy4, const int reduce_bits_vert, |
832 | | const __m256i *res_add_const, const int round_bits, |
833 | | const __m256i *res_sub_const, const __m256i *round_bits_const, |
834 | 450k | const __m256i *wt) { |
835 | 450k | (void)gamma; |
836 | 450k | int k, row = 0; |
837 | 450k | __m256i src[8]; |
838 | 450k | const __m256i src_0 = horz_out[0]; |
839 | 450k | const __m256i src_1 = |
840 | 450k | _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21); |
841 | 450k | const __m256i src_2 = horz_out[1]; |
842 | 450k | const __m256i src_3 = |
843 | 450k | _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21); |
844 | 450k | const __m256i src_4 = horz_out[2]; |
845 | 450k | const __m256i src_5 = |
846 | 450k | _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21); |
847 | | |
848 | 450k | src[0] = _mm256_unpacklo_epi16(src_0, src_1); |
849 | 450k | src[2] = _mm256_unpacklo_epi16(src_2, src_3); |
850 | 450k | src[4] = _mm256_unpacklo_epi16(src_4, src_5); |
851 | | |
852 | 450k | src[1] = _mm256_unpackhi_epi16(src_0, src_1); |
853 | 450k | src[3] = _mm256_unpackhi_epi16(src_2, src_3); |
854 | 450k | src[5] = _mm256_unpackhi_epi16(src_4, src_5); |
855 | | |
856 | 2.25M | for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) { |
857 | 1.80M | int sy = sy4 + delta * (k + 4); |
858 | 1.80M | __m256i coeffs[8]; |
859 | 1.80M | prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy, coeffs); |
860 | 1.80M | __m256i res_lo, res_hi; |
861 | 1.80M | filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi, |
862 | 1.80M | row); |
863 | 1.80M | store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt, |
864 | 1.80M | res_sub_const, round_bits_const, pred, |
865 | 1.80M | conv_params, i, j, k, reduce_bits_vert, |
866 | 1.80M | p_stride, p_width, round_bits); |
867 | 1.80M | src[0] = src[2]; |
868 | 1.80M | src[2] = src[4]; |
869 | 1.80M | src[4] = src[6]; |
870 | 1.80M | src[1] = src[3]; |
871 | 1.80M | src[3] = src[5]; |
872 | 1.80M | src[5] = src[7]; |
873 | 1.80M | row += 1; |
874 | 1.80M | } |
875 | 450k | } |
876 | | |
877 | | static inline void warp_vertical_filter_delta0_avx2( |
878 | | uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params, |
879 | | int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width, |
880 | | int i, int j, int sy4, const int reduce_bits_vert, |
881 | | const __m256i *res_add_const, const int round_bits, |
882 | | const __m256i *res_sub_const, const __m256i *round_bits_const, |
883 | 172k | const __m256i *wt) { |
884 | 172k | (void)delta; |
885 | 172k | int k, row = 0; |
886 | 172k | __m256i src[8], coeffs[8]; |
887 | 172k | const __m256i src_0 = horz_out[0]; |
888 | 172k | const __m256i src_1 = |
889 | 172k | _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21); |
890 | 172k | const __m256i src_2 = horz_out[1]; |
891 | 172k | const __m256i src_3 = |
892 | 172k | _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21); |
893 | 172k | const __m256i src_4 = horz_out[2]; |
894 | 172k | const __m256i src_5 = |
895 | 172k | _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21); |
896 | | |
897 | 172k | src[0] = _mm256_unpacklo_epi16(src_0, src_1); |
898 | 172k | src[2] = _mm256_unpacklo_epi16(src_2, src_3); |
899 | 172k | src[4] = _mm256_unpacklo_epi16(src_4, src_5); |
900 | | |
901 | 172k | src[1] = _mm256_unpackhi_epi16(src_0, src_1); |
902 | 172k | src[3] = _mm256_unpackhi_epi16(src_2, src_3); |
903 | 172k | src[5] = _mm256_unpackhi_epi16(src_4, src_5); |
904 | | |
905 | 172k | prepare_vertical_filter_coeffs_delta0_avx2(gamma, sy4, coeffs); |
906 | | |
907 | 860k | for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) { |
908 | 688k | __m256i res_lo, res_hi; |
909 | 688k | filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi, |
910 | 688k | row); |
911 | 688k | store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt, |
912 | 688k | res_sub_const, round_bits_const, pred, |
913 | 688k | conv_params, i, j, k, reduce_bits_vert, |
914 | 688k | p_stride, p_width, round_bits); |
915 | 688k | src[0] = src[2]; |
916 | 688k | src[2] = src[4]; |
917 | 688k | src[4] = src[6]; |
918 | 688k | src[1] = src[3]; |
919 | 688k | src[3] = src[5]; |
920 | 688k | src[5] = src[7]; |
921 | 688k | row += 1; |
922 | 688k | } |
923 | 172k | } |
924 | | |
925 | | static inline void warp_vertical_filter_gamma0_delta0_avx2( |
926 | | uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params, |
927 | | int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width, |
928 | | int i, int j, int sy4, const int reduce_bits_vert, |
929 | | const __m256i *res_add_const, const int round_bits, |
930 | | const __m256i *res_sub_const, const __m256i *round_bits_const, |
931 | 412k | const __m256i *wt) { |
932 | 412k | (void)gamma; |
933 | 412k | int k, row = 0; |
934 | 412k | __m256i src[8], coeffs[8]; |
935 | 412k | const __m256i src_0 = horz_out[0]; |
936 | 412k | const __m256i src_1 = |
937 | 412k | _mm256_permute2x128_si256(horz_out[0], horz_out[1], 0x21); |
938 | 412k | const __m256i src_2 = horz_out[1]; |
939 | 412k | const __m256i src_3 = |
940 | 412k | _mm256_permute2x128_si256(horz_out[1], horz_out[2], 0x21); |
941 | 412k | const __m256i src_4 = horz_out[2]; |
942 | 412k | const __m256i src_5 = |
943 | 412k | _mm256_permute2x128_si256(horz_out[2], horz_out[3], 0x21); |
944 | | |
945 | 412k | src[0] = _mm256_unpacklo_epi16(src_0, src_1); |
946 | 412k | src[2] = _mm256_unpacklo_epi16(src_2, src_3); |
947 | 412k | src[4] = _mm256_unpacklo_epi16(src_4, src_5); |
948 | | |
949 | 412k | src[1] = _mm256_unpackhi_epi16(src_0, src_1); |
950 | 412k | src[3] = _mm256_unpackhi_epi16(src_2, src_3); |
951 | 412k | src[5] = _mm256_unpackhi_epi16(src_4, src_5); |
952 | | |
953 | 412k | prepare_vertical_filter_coeffs_gamma0_avx2(delta, sy4, coeffs); |
954 | | |
955 | 2.07M | for (k = -4; k < AOMMIN(4, p_height - i - 4); k += 2) { |
956 | 1.66M | __m256i res_lo, res_hi; |
957 | 1.66M | filter_src_pixels_vertical_avx2(horz_out, src, coeffs, &res_lo, &res_hi, |
958 | 1.66M | row); |
959 | 1.66M | store_vertical_filter_output_avx2(&res_lo, &res_hi, res_add_const, wt, |
960 | 1.66M | res_sub_const, round_bits_const, pred, |
961 | 1.66M | conv_params, i, j, k, reduce_bits_vert, |
962 | 1.66M | p_stride, p_width, round_bits); |
963 | 1.66M | src[0] = src[2]; |
964 | 1.66M | src[2] = src[4]; |
965 | 1.66M | src[4] = src[6]; |
966 | 1.66M | src[1] = src[3]; |
967 | 1.66M | src[3] = src[5]; |
968 | 1.66M | src[5] = src[7]; |
969 | 1.66M | row += 1; |
970 | 1.66M | } |
971 | 412k | } |
972 | | |
973 | | static inline void prepare_warp_vertical_filter_avx2( |
974 | | uint8_t *pred, __m256i *horz_out, ConvolveParams *conv_params, |
975 | | int16_t gamma, int16_t delta, int p_height, int p_stride, int p_width, |
976 | | int i, int j, int sy4, const int reduce_bits_vert, |
977 | | const __m256i *res_add_const, const int round_bits, |
978 | | const __m256i *res_sub_const, const __m256i *round_bits_const, |
979 | 2.01M | const __m256i *wt) { |
980 | 2.01M | if (gamma == 0 && delta == 0) |
981 | 412k | warp_vertical_filter_gamma0_delta0_avx2( |
982 | 412k | pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width, |
983 | 412k | i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const, |
984 | 412k | round_bits_const, wt); |
985 | 1.60M | else if (gamma == 0 && delta != 0) |
986 | 450k | warp_vertical_filter_gamma0_avx2( |
987 | 450k | pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width, |
988 | 450k | i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const, |
989 | 450k | round_bits_const, wt); |
990 | 1.15M | else if (gamma != 0 && delta == 0) |
991 | 172k | warp_vertical_filter_delta0_avx2( |
992 | 172k | pred, horz_out, conv_params, gamma, delta, p_height, p_stride, p_width, |
993 | 172k | i, j, sy4, reduce_bits_vert, res_add_const, round_bits, res_sub_const, |
994 | 172k | round_bits_const, wt); |
995 | 977k | else |
996 | 977k | warp_vertical_filter_avx2(pred, horz_out, conv_params, gamma, delta, |
997 | 977k | p_height, p_stride, p_width, i, j, sy4, |
998 | 977k | reduce_bits_vert, res_add_const, round_bits, |
999 | 977k | res_sub_const, round_bits_const, wt); |
1000 | 2.01M | } |
1001 | | |
1002 | | static inline void prepare_warp_horizontal_filter_avx2( |
1003 | | const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4, |
1004 | | int32_t sx4, int alpha, int beta, int p_height, int height, int i, |
1005 | | const __m256i *round_const, const __m128i *shift, |
1006 | 1.69M | const __m256i *shuffle_src) { |
1007 | 1.69M | if (alpha == 0 && beta == 0) |
1008 | 384k | warp_horizontal_filter_alpha0_beta0_avx2( |
1009 | 384k | ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, i, |
1010 | 384k | round_const, shift, shuffle_src); |
1011 | 1.31M | else if (alpha == 0 && beta != 0) |
1012 | 414k | warp_horizontal_filter_alpha0_avx2(ref, horz_out, stride, ix4, iy4, sx4, |
1013 | 414k | alpha, beta, p_height, height, i, |
1014 | 414k | round_const, shift, shuffle_src); |
1015 | 897k | else if (alpha != 0 && beta == 0) |
1016 | 416k | warp_horizontal_filter_beta0_avx2(ref, horz_out, stride, ix4, iy4, sx4, |
1017 | 416k | alpha, beta, p_height, height, i, |
1018 | 416k | round_const, shift, shuffle_src); |
1019 | 480k | else |
1020 | 480k | warp_horizontal_filter_avx2(ref, horz_out, stride, ix4, iy4, sx4, alpha, |
1021 | 480k | beta, p_height, height, i, round_const, shift, |
1022 | 480k | shuffle_src); |
1023 | 1.69M | } |
1024 | | |
1025 | | void av1_warp_affine_avx2(const int32_t *mat, const uint8_t *ref, int width, |
1026 | | int height, int stride, uint8_t *pred, int p_col, |
1027 | | int p_row, int p_width, int p_height, int p_stride, |
1028 | | int subsampling_x, int subsampling_y, |
1029 | | ConvolveParams *conv_params, int16_t alpha, |
1030 | 238k | int16_t beta, int16_t gamma, int16_t delta) { |
1031 | 238k | __m256i horz_out[8]; |
1032 | 238k | int i, j, k; |
1033 | 238k | const int bd = 8; |
1034 | 238k | const int reduce_bits_horiz = conv_params->round_0; |
1035 | 238k | const int reduce_bits_vert = conv_params->is_compound |
1036 | 238k | ? conv_params->round_1 |
1037 | 238k | : 2 * FILTER_BITS - reduce_bits_horiz; |
1038 | 238k | const int offset_bits_horiz = bd + FILTER_BITS - 1; |
1039 | 238k | assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL)); |
1040 | | |
1041 | 238k | const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz; |
1042 | 238k | const __m256i reduce_bits_vert_const = |
1043 | 238k | _mm256_set1_epi32(((1 << reduce_bits_vert) >> 1)); |
1044 | 238k | const __m256i res_add_const = _mm256_set1_epi32(1 << offset_bits_vert); |
1045 | 238k | const int round_bits = |
1046 | 238k | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
1047 | 238k | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
1048 | 238k | assert(IMPLIES(conv_params->do_average, conv_params->is_compound)); |
1049 | | |
1050 | 238k | const __m256i round_const = _mm256_set1_epi16( |
1051 | 238k | (1 << offset_bits_horiz) + ((1 << reduce_bits_horiz) >> 1)); |
1052 | 238k | const __m128i shift = _mm_cvtsi32_si128(reduce_bits_horiz); |
1053 | | |
1054 | 238k | __m256i res_sub_const, round_bits_const, wt; |
1055 | 238k | unpack_weights_and_set_round_const_avx2(conv_params, round_bits, offset_bits, |
1056 | 238k | &res_sub_const, &round_bits_const, |
1057 | 238k | &wt); |
1058 | | |
1059 | 238k | __m256i res_add_const_1; |
1060 | 238k | if (conv_params->is_compound == 1) { |
1061 | 7.74k | res_add_const_1 = _mm256_add_epi32(reduce_bits_vert_const, res_add_const); |
1062 | 230k | } else { |
1063 | 230k | res_add_const_1 = _mm256_set1_epi32(-(1 << (bd + reduce_bits_vert - 1)) + |
1064 | 230k | ((1 << reduce_bits_vert) >> 1)); |
1065 | 230k | } |
1066 | 238k | const int32_t const1 = alpha * (-4) + beta * (-4) + |
1067 | 238k | (1 << (WARPEDDIFF_PREC_BITS - 1)) + |
1068 | 238k | (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS); |
1069 | 238k | const int32_t const2 = gamma * (-4) + delta * (-4) + |
1070 | 238k | (1 << (WARPEDDIFF_PREC_BITS - 1)) + |
1071 | 238k | (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS); |
1072 | 238k | const int32_t const3 = ((1 << WARP_PARAM_REDUCE_BITS) - 1); |
1073 | 238k | const int16_t const4 = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)); |
1074 | 238k | const int16_t const5 = (1 << (FILTER_BITS - reduce_bits_horiz)); |
1075 | | |
1076 | 238k | __m256i shuffle_src[4]; |
1077 | 238k | shuffle_src[0] = _mm256_load_si256((__m256i *)shuffle_src0); |
1078 | 238k | shuffle_src[1] = _mm256_load_si256((__m256i *)shuffle_src1); |
1079 | 238k | shuffle_src[2] = _mm256_load_si256((__m256i *)shuffle_src2); |
1080 | 238k | shuffle_src[3] = _mm256_load_si256((__m256i *)shuffle_src3); |
1081 | | |
1082 | 765k | for (i = 0; i < p_height; i += 8) { |
1083 | 2.55M | for (j = 0; j < p_width; j += 8) { |
1084 | 2.02M | const int32_t src_x = (p_col + j + 4) << subsampling_x; |
1085 | 2.02M | const int32_t src_y = (p_row + i + 4) << subsampling_y; |
1086 | 2.02M | const int64_t dst_x = |
1087 | 2.02M | (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0]; |
1088 | 2.02M | const int64_t dst_y = |
1089 | 2.02M | (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1]; |
1090 | 2.02M | const int64_t x4 = dst_x >> subsampling_x; |
1091 | 2.02M | const int64_t y4 = dst_y >> subsampling_y; |
1092 | | |
1093 | 2.02M | int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS); |
1094 | 2.02M | int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1); |
1095 | 2.02M | int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS); |
1096 | 2.02M | int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1); |
1097 | | |
1098 | | // Add in all the constant terms, including rounding and offset |
1099 | 2.02M | sx4 += const1; |
1100 | 2.02M | sy4 += const2; |
1101 | | |
1102 | 2.02M | sx4 &= ~const3; |
1103 | 2.02M | sy4 &= ~const3; |
1104 | | |
1105 | | // Horizontal filter |
1106 | | // If the block is aligned such that, after clamping, every sample |
1107 | | // would be taken from the leftmost/rightmost column, then we can |
1108 | | // skip the expensive horizontal filter. |
1109 | | |
1110 | 2.02M | if (ix4 <= -7) { |
1111 | 121k | int iy, row = 0; |
1112 | 971k | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
1113 | 849k | iy = iy4 + k; |
1114 | 849k | iy = clamp(iy, 0, height - 1); |
1115 | 849k | const __m256i temp_0 = |
1116 | 849k | _mm256_set1_epi16(const4 + ref[iy * stride] * const5); |
1117 | 849k | iy = iy4 + k + 1; |
1118 | 849k | iy = clamp(iy, 0, height - 1); |
1119 | 849k | const __m256i temp_1 = |
1120 | 849k | _mm256_set1_epi16(const4 + ref[iy * stride] * const5); |
1121 | 849k | horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0); |
1122 | 849k | row += 1; |
1123 | 849k | } |
1124 | 121k | iy = iy4 + k; |
1125 | 121k | iy = clamp(iy, 0, height - 1); |
1126 | 121k | horz_out[row] = _mm256_set1_epi16(const4 + ref[iy * stride] * const5); |
1127 | 1.90M | } else if (ix4 >= width + 6) { |
1128 | 130k | int iy, row = 0; |
1129 | 1.04M | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
1130 | 912k | iy = iy4 + k; |
1131 | 912k | iy = clamp(iy, 0, height - 1); |
1132 | 912k | const __m256i temp_0 = _mm256_set1_epi16( |
1133 | 912k | const4 + ref[iy * stride + (width - 1)] * const5); |
1134 | 912k | iy = iy4 + k + 1; |
1135 | 912k | iy = clamp(iy, 0, height - 1); |
1136 | 912k | const __m256i temp_1 = _mm256_set1_epi16( |
1137 | 912k | const4 + ref[iy * stride + (width - 1)] * const5); |
1138 | 912k | horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0); |
1139 | 912k | row += 1; |
1140 | 912k | } |
1141 | 130k | iy = iy4 + k; |
1142 | 130k | iy = clamp(iy, 0, height - 1); |
1143 | 130k | horz_out[row] = |
1144 | 130k | _mm256_set1_epi16(const4 + ref[iy * stride + (width - 1)] * const5); |
1145 | 1.77M | } else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) { |
1146 | 78.8k | const int out_of_boundary_left = -(ix4 - 6); |
1147 | 78.8k | const int out_of_boundary_right = (ix4 + 8) - width; |
1148 | 78.8k | int iy, sx, row = 0; |
1149 | 630k | for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) { |
1150 | 552k | iy = iy4 + k; |
1151 | 552k | iy = clamp(iy, 0, height - 1); |
1152 | 552k | __m128i src0 = |
1153 | 552k | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
1154 | 552k | iy = iy4 + k + 1; |
1155 | 552k | iy = clamp(iy, 0, height - 1); |
1156 | 552k | __m128i src1 = |
1157 | 552k | _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
1158 | | |
1159 | 552k | if (out_of_boundary_left >= 0) { |
1160 | 396k | const __m128i shuffle_reg_left = |
1161 | 396k | _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]); |
1162 | 396k | src0 = _mm_shuffle_epi8(src0, shuffle_reg_left); |
1163 | 396k | src1 = _mm_shuffle_epi8(src1, shuffle_reg_left); |
1164 | 396k | } |
1165 | 552k | if (out_of_boundary_right >= 0) { |
1166 | 323k | const __m128i shuffle_reg_right = _mm_loadu_si128( |
1167 | 323k | (__m128i *)warp_pad_right[out_of_boundary_right]); |
1168 | 323k | src0 = _mm_shuffle_epi8(src0, shuffle_reg_right); |
1169 | 323k | src1 = _mm_shuffle_epi8(src1, shuffle_reg_right); |
1170 | 323k | } |
1171 | 552k | sx = sx4 + beta * (k + 4); |
1172 | 552k | const __m256i src_01 = |
1173 | 552k | _mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1); |
1174 | 552k | horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, |
1175 | 552k | shuffle_src, &round_const, &shift); |
1176 | 552k | row += 1; |
1177 | 552k | } |
1178 | 78.8k | iy = iy4 + k; |
1179 | 78.8k | iy = clamp(iy, 0, height - 1); |
1180 | 78.8k | __m128i src = _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)); |
1181 | 78.8k | if (out_of_boundary_left >= 0) { |
1182 | 56.6k | const __m128i shuffle_reg_left = |
1183 | 56.6k | _mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]); |
1184 | 56.6k | src = _mm_shuffle_epi8(src, shuffle_reg_left); |
1185 | 56.6k | } |
1186 | 78.8k | if (out_of_boundary_right >= 0) { |
1187 | 46.2k | const __m128i shuffle_reg_right = |
1188 | 46.2k | _mm_loadu_si128((__m128i *)warp_pad_right[out_of_boundary_right]); |
1189 | 46.2k | src = _mm_shuffle_epi8(src, shuffle_reg_right); |
1190 | 46.2k | } |
1191 | 78.8k | sx = sx4 + beta * (k + 4); |
1192 | 78.8k | const __m256i src_01 = _mm256_castsi128_si256(src); |
1193 | 78.8k | __m256i coeff[4]; |
1194 | 78.8k | prepare_horizontal_filter_coeff(alpha, sx, coeff); |
1195 | 78.8k | filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, |
1196 | 78.8k | &round_const, &shift, row); |
1197 | 1.69M | } else { |
1198 | 1.69M | prepare_warp_horizontal_filter_avx2( |
1199 | 1.69M | ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, |
1200 | 1.69M | i, &round_const, &shift, shuffle_src); |
1201 | 1.69M | } |
1202 | | |
1203 | | // Vertical filter |
1204 | 2.02M | prepare_warp_vertical_filter_avx2( |
1205 | 2.02M | pred, horz_out, conv_params, gamma, delta, p_height, p_stride, |
1206 | 2.02M | p_width, i, j, sy4, reduce_bits_vert, &res_add_const_1, round_bits, |
1207 | 2.02M | &res_sub_const, &round_bits_const, &wt); |
1208 | 2.02M | } |
1209 | 526k | } |
1210 | 238k | } |