/src/aom/aom_dsp/simd/v256_intrinsics_x86.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #ifndef AOM_AOM_DSP_SIMD_V256_INTRINSICS_X86_H_ |
13 | | #define AOM_AOM_DSP_SIMD_V256_INTRINSICS_X86_H_ |
14 | | |
15 | | #if !defined(__AVX2__) |
16 | | |
17 | | #include "aom_dsp/simd/v256_intrinsics_v128.h" |
18 | | |
19 | | #else |
20 | | |
21 | | // The _m256i type seems to cause problems for g++'s mangling prior to |
22 | | // version 5, but adding -fabi-version=0 fixes this. |
23 | | #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 5 && \ |
24 | | defined(__AVX2__) && defined(__cplusplus) |
25 | | #pragma GCC optimize "-fabi-version=0" |
26 | | #endif |
27 | | |
28 | | #include <immintrin.h> |
29 | | |
30 | | #include "aom_dsp/simd/v128_intrinsics_x86.h" |
31 | | |
32 | | typedef __m256i v256; |
33 | | |
34 | 0 | SIMD_INLINE uint32_t v256_low_u32(v256 a) { |
35 | 0 | return (uint32_t)_mm_cvtsi128_si32(_mm256_extracti128_si256(a, 0)); |
36 | 0 | } |
37 | | |
38 | 0 | SIMD_INLINE v64 v256_low_v64(v256 a) { |
39 | 0 | return _mm_unpacklo_epi64(_mm256_extracti128_si256(a, 0), v64_zero()); |
40 | 0 | } |
41 | | |
42 | 0 | SIMD_INLINE uint64_t v256_low_u64(v256 a) { return v64_u64(v256_low_v64(a)); } |
43 | | |
44 | 80.9M | SIMD_INLINE v128 v256_low_v128(v256 a) { return _mm256_castsi256_si128(a); } |
45 | | |
46 | 36.6M | SIMD_INLINE v128 v256_high_v128(v256 a) { |
47 | 36.6M | return _mm256_extracti128_si256(a, 1); |
48 | 36.6M | } |
49 | | |
50 | 386M | SIMD_INLINE v256 v256_from_v128(v128 a, v128 b) { |
51 | | // gcc seems to be missing _mm256_set_m128i() |
52 | 386M | return _mm256_inserti128_si256(_mm256_castsi128_si256(b), a, 1); |
53 | 386M | } |
54 | | |
55 | 201M | SIMD_INLINE v256 v256_from_v64(v64 a, v64 b, v64 c, v64 d) { |
56 | 201M | return v256_from_v128(v128_from_v64(a, b), v128_from_v64(c, d)); |
57 | 201M | } |
58 | | |
59 | 0 | SIMD_INLINE v256 v256_from_64(uint64_t a, uint64_t b, uint64_t c, uint64_t d) { |
60 | 0 | return _mm256_set_epi64x((int64_t)a, (int64_t)b, (int64_t)c, (int64_t)d); |
61 | 0 | } |
62 | | |
63 | 0 | SIMD_INLINE v256 v256_load_aligned(const void *p) { |
64 | 0 | return _mm256_load_si256((const __m256i *)p); |
65 | 0 | } |
66 | | |
67 | 0 | SIMD_INLINE v256 v256_load_unaligned(const void *p) { |
68 | 0 | return _mm256_loadu_si256((const __m256i *)p); |
69 | 0 | } |
70 | | |
71 | 0 | SIMD_INLINE void v256_store_aligned(void *p, v256 a) { |
72 | 0 | _mm256_store_si256((__m256i *)p, a); |
73 | 0 | } |
74 | | |
75 | 0 | SIMD_INLINE void v256_store_unaligned(void *p, v256 a) { |
76 | 0 | _mm256_storeu_si256((__m256i *)p, a); |
77 | 0 | } |
78 | | |
79 | 124M | SIMD_INLINE v256 v256_zero(void) { return _mm256_setzero_si256(); } |
80 | | |
81 | 0 | SIMD_INLINE v256 v256_dup_8(uint8_t x) { return _mm256_set1_epi8((char)x); } |
82 | | |
83 | 463M | SIMD_INLINE v256 v256_dup_16(uint16_t x) { return _mm256_set1_epi16((short)x); } |
84 | | |
85 | 0 | SIMD_INLINE v256 v256_dup_32(uint32_t x) { return _mm256_set1_epi32((int)x); } |
86 | | |
87 | 0 | SIMD_INLINE v256 v256_dup_64(uint64_t x) { |
88 | 0 | return _mm256_set1_epi64x((int64_t)x); |
89 | 0 | } |
90 | | |
91 | 0 | SIMD_INLINE v256 v256_add_8(v256 a, v256 b) { return _mm256_add_epi8(a, b); } |
92 | | |
93 | 719M | SIMD_INLINE v256 v256_add_16(v256 a, v256 b) { return _mm256_add_epi16(a, b); } |
94 | | |
95 | 0 | SIMD_INLINE v256 v256_sadd_u8(v256 a, v256 b) { return _mm256_adds_epu8(a, b); } |
96 | | |
97 | 0 | SIMD_INLINE v256 v256_sadd_s8(v256 a, v256 b) { return _mm256_adds_epi8(a, b); } |
98 | | |
99 | 0 | SIMD_INLINE v256 v256_sadd_s16(v256 a, v256 b) { |
100 | 0 | return _mm256_adds_epi16(a, b); |
101 | 0 | } |
102 | | |
103 | 0 | SIMD_INLINE v256 v256_add_32(v256 a, v256 b) { return _mm256_add_epi32(a, b); } |
104 | | |
105 | 0 | SIMD_INLINE v256 v256_add_64(v256 a, v256 b) { return _mm256_add_epi64(a, b); } |
106 | | |
107 | 0 | SIMD_INLINE v256 v256_padd_u8(v256 a) { |
108 | 0 | return _mm256_maddubs_epi16(a, _mm256_set1_epi8(1)); |
109 | 0 | } |
110 | | |
111 | 0 | SIMD_INLINE v256 v256_padd_s16(v256 a) { |
112 | 0 | return _mm256_madd_epi16(a, _mm256_set1_epi16(1)); |
113 | 0 | } |
114 | | |
115 | 0 | SIMD_INLINE v256 v256_sub_8(v256 a, v256 b) { return _mm256_sub_epi8(a, b); } |
116 | | |
117 | 0 | SIMD_INLINE v256 v256_ssub_u8(v256 a, v256 b) { return _mm256_subs_epu8(a, b); } |
118 | | |
119 | 0 | SIMD_INLINE v256 v256_ssub_s8(v256 a, v256 b) { return _mm256_subs_epi8(a, b); } |
120 | | |
121 | 321M | SIMD_INLINE v256 v256_sub_16(v256 a, v256 b) { return _mm256_sub_epi16(a, b); } |
122 | | |
123 | 0 | SIMD_INLINE v256 v256_ssub_s16(v256 a, v256 b) { |
124 | 0 | return _mm256_subs_epi16(a, b); |
125 | 0 | } |
126 | | |
127 | 332M | SIMD_INLINE v256 v256_ssub_u16(v256 a, v256 b) { |
128 | 332M | return _mm256_subs_epu16(a, b); |
129 | 332M | } |
130 | | |
131 | 0 | SIMD_INLINE v256 v256_sub_32(v256 a, v256 b) { return _mm256_sub_epi32(a, b); } |
132 | | |
133 | 0 | SIMD_INLINE v256 v256_sub_64(v256 a, v256 b) { return _mm256_sub_epi64(a, b); } |
134 | | |
135 | 321M | SIMD_INLINE v256 v256_abs_s16(v256 a) { return _mm256_abs_epi16(a); } |
136 | | |
137 | 0 | SIMD_INLINE v256 v256_abs_s8(v256 a) { return _mm256_abs_epi8(a); } |
138 | | |
139 | | // AVX doesn't have the direct intrinsics to zip/unzip 8, 16, 32 bit |
140 | | // lanes of lower or upper halves of a 256bit vector because the |
141 | | // unpack/pack intrinsics operate on the 256 bit input vector as 2 |
142 | | // independent 128 bit vectors. |
143 | 0 | SIMD_INLINE v256 v256_ziplo_8(v256 a, v256 b) { |
144 | 0 | return _mm256_unpacklo_epi8( |
145 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
146 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
147 | 0 | } |
148 | | |
149 | 0 | SIMD_INLINE v256 v256_ziphi_8(v256 a, v256 b) { |
150 | 0 | return _mm256_unpackhi_epi8( |
151 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
152 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
153 | 0 | } |
154 | | |
155 | 0 | SIMD_INLINE v256 v256_ziplo_16(v256 a, v256 b) { |
156 | 0 | return _mm256_unpacklo_epi16( |
157 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
158 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
159 | 0 | } |
160 | | |
161 | 0 | SIMD_INLINE v256 v256_ziphi_16(v256 a, v256 b) { |
162 | 0 | return _mm256_unpackhi_epi16( |
163 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
164 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
165 | 0 | } |
166 | | |
167 | 0 | SIMD_INLINE v256 v256_ziplo_32(v256 a, v256 b) { |
168 | 0 | return _mm256_unpacklo_epi32( |
169 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
170 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
171 | 0 | } |
172 | | |
173 | 0 | SIMD_INLINE v256 v256_ziphi_32(v256 a, v256 b) { |
174 | 0 | return _mm256_unpackhi_epi32( |
175 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
176 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
177 | 0 | } |
178 | | |
179 | 0 | SIMD_INLINE v256 v256_ziplo_64(v256 a, v256 b) { |
180 | 0 | return _mm256_unpacklo_epi64( |
181 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
182 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
183 | 0 | } |
184 | | |
185 | 0 | SIMD_INLINE v256 v256_ziphi_64(v256 a, v256 b) { |
186 | 0 | return _mm256_unpackhi_epi64( |
187 | 0 | _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)), |
188 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))); |
189 | 0 | } |
190 | | |
191 | 0 | SIMD_INLINE v256 v256_ziplo_128(v256 a, v256 b) { |
192 | 0 | return _mm256_permute2x128_si256(a, b, 0x02); |
193 | 0 | } |
194 | | |
195 | 0 | SIMD_INLINE v256 v256_ziphi_128(v256 a, v256 b) { |
196 | 0 | return _mm256_permute2x128_si256(a, b, 0x13); |
197 | 0 | } |
198 | | |
199 | 0 | SIMD_INLINE v256 v256_zip_8(v128 a, v128 b) { |
200 | 0 | return v256_from_v128(v128_ziphi_8(a, b), v128_ziplo_8(a, b)); |
201 | 0 | } |
202 | | |
203 | 0 | SIMD_INLINE v256 v256_zip_16(v128 a, v128 b) { |
204 | 0 | return v256_from_v128(v128_ziphi_16(a, b), v128_ziplo_16(a, b)); |
205 | 0 | } |
206 | | |
207 | 0 | SIMD_INLINE v256 v256_zip_32(v128 a, v128 b) { |
208 | 0 | return v256_from_v128(v128_ziphi_32(a, b), v128_ziplo_32(a, b)); |
209 | 0 | } |
210 | | |
211 | 0 | SIMD_INLINE v256 v256_unziphi_8(v256 a, v256 b) { |
212 | 0 | return _mm256_permute4x64_epi64( |
213 | 0 | _mm256_packs_epi16(_mm256_srai_epi16(b, 8), _mm256_srai_epi16(a, 8)), |
214 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
215 | 0 | } |
216 | | |
217 | 0 | SIMD_INLINE v256 v256_unziplo_8(v256 a, v256 b) { |
218 | 0 | return v256_unziphi_8(_mm256_slli_si256(a, 1), _mm256_slli_si256(b, 1)); |
219 | 0 | } |
220 | | |
221 | 0 | SIMD_INLINE v256 v256_unziphi_16(v256 a, v256 b) { |
222 | 0 | return _mm256_permute4x64_epi64( |
223 | 0 | _mm256_packs_epi32(_mm256_srai_epi32(b, 16), _mm256_srai_epi32(a, 16)), |
224 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
225 | 0 | } |
226 | | |
227 | 0 | SIMD_INLINE v256 v256_unziplo_16(v256 a, v256 b) { |
228 | 0 | return v256_unziphi_16(_mm256_slli_si256(a, 2), _mm256_slli_si256(b, 2)); |
229 | 0 | } |
230 | | |
231 | 0 | SIMD_INLINE v256 v256_unziphi_32(v256 a, v256 b) { |
232 | 0 | return _mm256_permute4x64_epi64( |
233 | 0 | _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(b), |
234 | 0 | _mm256_castsi256_ps(a), |
235 | 0 | _MM_SHUFFLE(3, 1, 3, 1))), |
236 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
237 | 0 | } |
238 | | |
239 | 0 | SIMD_INLINE v256 v256_unziplo_32(v256 a, v256 b) { |
240 | 0 | return _mm256_permute4x64_epi64( |
241 | 0 | _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(b), |
242 | 0 | _mm256_castsi256_ps(a), |
243 | 0 | _MM_SHUFFLE(2, 0, 2, 0))), |
244 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
245 | 0 | } |
246 | | |
247 | 0 | SIMD_INLINE v256 v256_unziphi_64(v256 a, v256 b) { |
248 | 0 | return _mm256_permute4x64_epi64( |
249 | 0 | _mm256_castpd_si256(_mm256_shuffle_pd(_mm256_castsi256_pd(b), |
250 | 0 | _mm256_castsi256_pd(a), 15)), |
251 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
252 | 0 | } |
253 | | |
254 | 0 | SIMD_INLINE v256 v256_unziplo_64(v256 a, v256 b) { |
255 | 0 | return _mm256_permute4x64_epi64( |
256 | 0 | _mm256_castpd_si256( |
257 | 0 | _mm256_shuffle_pd(_mm256_castsi256_pd(b), _mm256_castsi256_pd(a), 0)), |
258 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
259 | 0 | } |
260 | | |
261 | 0 | SIMD_INLINE v256 v256_unpack_u8_s16(v128 a) { return _mm256_cvtepu8_epi16(a); } |
262 | | |
263 | 0 | SIMD_INLINE v256 v256_unpacklo_u8_s16(v256 a) { |
264 | 0 | return _mm256_unpacklo_epi8( |
265 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)), |
266 | 0 | _mm256_setzero_si256()); |
267 | 0 | } |
268 | | |
269 | 0 | SIMD_INLINE v256 v256_unpackhi_u8_s16(v256 a) { |
270 | 0 | return _mm256_unpackhi_epi8( |
271 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)), |
272 | 0 | _mm256_setzero_si256()); |
273 | 0 | } |
274 | | |
275 | 0 | SIMD_INLINE v256 v256_unpack_s8_s16(v128 a) { |
276 | 0 | return v256_from_v128(v128_unpackhi_s8_s16(a), v128_unpacklo_s8_s16(a)); |
277 | 0 | } |
278 | | |
279 | 0 | SIMD_INLINE v256 v256_unpacklo_s8_s16(v256 a) { |
280 | 0 | return _mm256_srai_epi16( |
281 | 0 | _mm256_unpacklo_epi8( |
282 | 0 | a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))), |
283 | 0 | 8); |
284 | 0 | } |
285 | | |
286 | 0 | SIMD_INLINE v256 v256_unpackhi_s8_s16(v256 a) { |
287 | 0 | return _mm256_srai_epi16( |
288 | 0 | _mm256_unpackhi_epi8( |
289 | 0 | a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))), |
290 | 0 | 8); |
291 | 0 | } |
292 | | |
293 | 0 | SIMD_INLINE v256 v256_pack_s32_s16(v256 a, v256 b) { |
294 | 0 | return _mm256_permute4x64_epi64(_mm256_packs_epi32(b, a), |
295 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
296 | 0 | } |
297 | | |
298 | 0 | SIMD_INLINE v256 v256_pack_s32_u16(v256 a, v256 b) { |
299 | 0 | return _mm256_permute4x64_epi64(_mm256_packus_epi32(b, a), |
300 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
301 | 0 | } |
302 | | |
303 | 42.5M | SIMD_INLINE v256 v256_pack_s16_u8(v256 a, v256 b) { |
304 | 42.5M | return _mm256_permute4x64_epi64(_mm256_packus_epi16(b, a), |
305 | 42.5M | _MM_SHUFFLE(3, 1, 2, 0)); |
306 | 42.5M | } |
307 | | |
308 | 0 | SIMD_INLINE v256 v256_pack_s16_s8(v256 a, v256 b) { |
309 | 0 | return _mm256_permute4x64_epi64(_mm256_packs_epi16(b, a), |
310 | 0 | _MM_SHUFFLE(3, 1, 2, 0)); |
311 | 0 | } |
312 | | |
313 | 0 | SIMD_INLINE v256 v256_unpack_u16_s32(v128 a) { |
314 | 0 | return _mm256_cvtepu16_epi32(a); |
315 | 0 | } |
316 | | |
317 | 0 | SIMD_INLINE v256 v256_unpack_s16_s32(v128 a) { |
318 | 0 | return _mm256_cvtepi16_epi32(a); |
319 | 0 | } |
320 | | |
321 | 0 | SIMD_INLINE v256 v256_unpacklo_u16_s32(v256 a) { |
322 | 0 | return _mm256_unpacklo_epi16( |
323 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)), |
324 | 0 | _mm256_setzero_si256()); |
325 | 0 | } |
326 | | |
327 | 0 | SIMD_INLINE v256 v256_unpacklo_s16_s32(v256 a) { |
328 | 0 | return _mm256_srai_epi32( |
329 | 0 | _mm256_unpacklo_epi16( |
330 | 0 | a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))), |
331 | 0 | 16); |
332 | 0 | } |
333 | | |
334 | 0 | SIMD_INLINE v256 v256_unpackhi_u16_s32(v256 a) { |
335 | 0 | return _mm256_unpackhi_epi16( |
336 | 0 | _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)), |
337 | 0 | _mm256_setzero_si256()); |
338 | 0 | } |
339 | | |
340 | 0 | SIMD_INLINE v256 v256_unpackhi_s16_s32(v256 a) { |
341 | 0 | return _mm256_srai_epi32( |
342 | 0 | _mm256_unpackhi_epi16( |
343 | 0 | a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))), |
344 | 0 | 16); |
345 | 0 | } |
346 | | |
347 | 0 | SIMD_INLINE v256 v256_shuffle_8(v256 a, v256 pattern) { |
348 | 0 | return _mm256_blendv_epi8( |
349 | 0 | _mm256_shuffle_epi8( |
350 | 0 | _mm256_permute2x128_si256(a, a, _MM_SHUFFLE(0, 1, 0, 1)), pattern), |
351 | 0 | _mm256_shuffle_epi8( |
352 | 0 | _mm256_permute2x128_si256(a, a, _MM_SHUFFLE(0, 0, 0, 0)), pattern), |
353 | 0 | _mm256_cmpgt_epi8(v256_dup_8(16), pattern)); |
354 | 0 | } |
355 | | |
356 | 0 | SIMD_INLINE v256 v256_wideshuffle_8(v256 a, v256 b, v256 pattern) { |
357 | 0 | v256 c32 = v256_dup_8(32); |
358 | 0 | v256 p32 = v256_sub_8(pattern, c32); |
359 | 0 | v256 r1 = _mm256_blendv_epi8( |
360 | 0 | _mm256_shuffle_epi8( |
361 | 0 | _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 1, 0, 1)), p32), |
362 | 0 | _mm256_shuffle_epi8( |
363 | 0 | _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 0, 0, 0)), p32), |
364 | 0 | _mm256_cmpgt_epi8(v256_dup_8(48), pattern)); |
365 | 0 | v256 r2 = _mm256_blendv_epi8( |
366 | 0 | _mm256_shuffle_epi8( |
367 | 0 | _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 3, 0, 3)), pattern), |
368 | 0 | _mm256_shuffle_epi8( |
369 | 0 | _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 2, 0, 2)), pattern), |
370 | 0 | _mm256_cmpgt_epi8(v256_dup_8(16), pattern)); |
371 | 0 | return _mm256_blendv_epi8(r1, r2, _mm256_cmpgt_epi8(c32, pattern)); |
372 | 0 | } |
373 | | |
374 | 0 | SIMD_INLINE v256 v256_pshuffle_8(v256 a, v256 pattern) { |
375 | 0 | return _mm256_shuffle_epi8(a, pattern); |
376 | 0 | } |
377 | | |
378 | 0 | SIMD_INLINE int64_t v256_dotp_su8(v256 a, v256 b) { |
379 | 0 | v256 t1 = _mm256_madd_epi16(v256_unpackhi_s8_s16(a), v256_unpackhi_u8_s16(b)); |
380 | 0 | v256 t2 = _mm256_madd_epi16(v256_unpacklo_s8_s16(a), v256_unpacklo_u8_s16(b)); |
381 | 0 | t1 = _mm256_add_epi32(t1, t2); |
382 | 0 | v128 t = _mm_add_epi32(_mm256_extracti128_si256(t1, 0), |
383 | 0 | _mm256_extracti128_si256(t1, 1)); |
384 | 0 | t = _mm_add_epi32(t, _mm_srli_si128(t, 8)); |
385 | 0 | t = _mm_add_epi32(t, _mm_srli_si128(t, 4)); |
386 | 0 | return (int32_t)v128_low_u32(t); |
387 | 0 | } |
388 | | |
389 | 0 | SIMD_INLINE int64_t v256_dotp_s16(v256 a, v256 b) { |
390 | 0 | v256 r = _mm256_madd_epi16(a, b); |
391 | 0 | #if defined(__x86_64__) |
392 | 0 | v128 t; |
393 | 0 | r = _mm256_add_epi64(_mm256_cvtepi32_epi64(v256_high_v128(r)), |
394 | 0 | _mm256_cvtepi32_epi64(v256_low_v128(r))); |
395 | 0 | t = v256_low_v128(_mm256_add_epi64( |
396 | 0 | r, _mm256_permute2x128_si256(r, r, _MM_SHUFFLE(2, 0, 0, 1)))); |
397 | 0 | return _mm_cvtsi128_si64(_mm_add_epi64(t, _mm_srli_si128(t, 8))); |
398 | 0 | #else |
399 | 0 | v128 l = v256_low_v128(r); |
400 | 0 | v128 h = v256_high_v128(r); |
401 | 0 | return (int64_t)_mm_cvtsi128_si32(l) + |
402 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 4)) + |
403 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 8)) + |
404 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 12)) + |
405 | 0 | (int64_t)_mm_cvtsi128_si32(h) + |
406 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 4)) + |
407 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 8)) + |
408 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 12)); |
409 | 0 | #endif |
410 | 0 | } |
411 | | |
412 | 0 | SIMD_INLINE int64_t v256_dotp_s32(v256 a, v256 b) { |
413 | 0 | v256 r = _mm256_mullo_epi32(a, b); |
414 | 0 | #if defined(__x86_64__) |
415 | 0 | v128 t; |
416 | 0 | r = _mm256_add_epi64(_mm256_cvtepi32_epi64(v256_high_v128(r)), |
417 | 0 | _mm256_cvtepi32_epi64(v256_low_v128(r))); |
418 | 0 | t = v256_low_v128(_mm256_add_epi64( |
419 | 0 | r, _mm256_permute2x128_si256(r, r, _MM_SHUFFLE(2, 0, 0, 1)))); |
420 | 0 | return _mm_cvtsi128_si64(_mm_add_epi64(t, _mm_srli_si128(t, 8))); |
421 | 0 | #else |
422 | 0 | v128 l = v256_low_v128(r); |
423 | 0 | v128 h = v256_high_v128(r); |
424 | 0 | return (int64_t)_mm_cvtsi128_si32(l) + |
425 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 4)) + |
426 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 8)) + |
427 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 12)) + |
428 | 0 | (int64_t)_mm_cvtsi128_si32(h) + |
429 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 4)) + |
430 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 8)) + |
431 | 0 | (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 12)); |
432 | 0 | #endif |
433 | 0 | } |
434 | | |
435 | 0 | SIMD_INLINE uint64_t v256_hadd_u8(v256 a) { |
436 | 0 | v256 t = _mm256_sad_epu8(a, _mm256_setzero_si256()); |
437 | 0 | v128 lo = v256_low_v128(t); |
438 | 0 | v128 hi = v256_high_v128(t); |
439 | 0 | lo = v128_add_32(lo, hi); |
440 | 0 | return v64_low_u32(v128_low_v64(lo)) + v128_low_u32(v128_high_v64(lo)); |
441 | 0 | } |
442 | | |
443 | | typedef v256 sad256_internal; |
444 | | |
445 | 0 | SIMD_INLINE sad256_internal v256_sad_u8_init(void) { |
446 | 0 | return _mm256_setzero_si256(); |
447 | 0 | } |
448 | | |
449 | | /* Implementation dependent return value. Result must be finalised with |
450 | | v256_sad_u8_sum(). |
451 | | The result for more than 32 v256_sad_u8() calls is undefined. */ |
452 | 0 | SIMD_INLINE sad256_internal v256_sad_u8(sad256_internal s, v256 a, v256 b) { |
453 | 0 | return _mm256_add_epi64(s, _mm256_sad_epu8(a, b)); |
454 | 0 | } |
455 | | |
456 | 0 | SIMD_INLINE uint32_t v256_sad_u8_sum(sad256_internal s) { |
457 | 0 | v256 t = _mm256_add_epi32(s, _mm256_unpackhi_epi64(s, s)); |
458 | 0 | return v128_low_u32(_mm_add_epi32(v256_high_v128(t), v256_low_v128(t))); |
459 | 0 | } |
460 | | |
461 | | typedef v256 ssd256_internal; |
462 | | |
463 | 0 | SIMD_INLINE ssd256_internal v256_ssd_u8_init(void) { |
464 | 0 | return _mm256_setzero_si256(); |
465 | 0 | } |
466 | | |
467 | | /* Implementation dependent return value. Result must be finalised with |
468 | | * v256_ssd_u8_sum(). */ |
469 | 0 | SIMD_INLINE ssd256_internal v256_ssd_u8(ssd256_internal s, v256 a, v256 b) { |
470 | 0 | v256 l = _mm256_sub_epi16(_mm256_unpacklo_epi8(a, _mm256_setzero_si256()), |
471 | 0 | _mm256_unpacklo_epi8(b, _mm256_setzero_si256())); |
472 | 0 | v256 h = _mm256_sub_epi16(_mm256_unpackhi_epi8(a, _mm256_setzero_si256()), |
473 | 0 | _mm256_unpackhi_epi8(b, _mm256_setzero_si256())); |
474 | 0 | v256 rl = _mm256_madd_epi16(l, l); |
475 | 0 | v256 rh = _mm256_madd_epi16(h, h); |
476 | 0 | v128 c = _mm_cvtsi32_si128(32); |
477 | 0 | rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 8)); |
478 | 0 | rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 4)); |
479 | 0 | rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 8)); |
480 | 0 | rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 4)); |
481 | 0 | return _mm256_add_epi64( |
482 | 0 | s, |
483 | 0 | _mm256_srl_epi64(_mm256_sll_epi64(_mm256_unpacklo_epi64(rl, rh), c), c)); |
484 | 0 | } |
485 | | |
486 | 0 | SIMD_INLINE uint32_t v256_ssd_u8_sum(ssd256_internal s) { |
487 | 0 | v256 t = _mm256_add_epi32(s, _mm256_unpackhi_epi64(s, s)); |
488 | 0 | return v128_low_u32(_mm_add_epi32(v256_high_v128(t), v256_low_v128(t))); |
489 | 0 | } |
490 | | |
491 | 0 | SIMD_INLINE v256 v256_or(v256 a, v256 b) { return _mm256_or_si256(a, b); } |
492 | | |
493 | 336M | SIMD_INLINE v256 v256_xor(v256 a, v256 b) { return _mm256_xor_si256(a, b); } |
494 | | |
495 | 242M | SIMD_INLINE v256 v256_and(v256 a, v256 b) { return _mm256_and_si256(a, b); } |
496 | | |
497 | 0 | SIMD_INLINE v256 v256_andn(v256 a, v256 b) { return _mm256_andnot_si256(b, a); } |
498 | | |
499 | 0 | SIMD_INLINE v256 v256_mul_s16(v64 a, v64 b) { |
500 | 0 | v128 lo_bits = v128_mullo_s16(a, b); |
501 | 0 | v128 hi_bits = v128_mulhi_s16(a, b); |
502 | 0 | return v256_from_v128(v128_ziphi_16(hi_bits, lo_bits), |
503 | 0 | v128_ziplo_16(hi_bits, lo_bits)); |
504 | 0 | } |
505 | | |
506 | 139M | SIMD_INLINE v256 v256_mullo_s16(v256 a, v256 b) { |
507 | 139M | return _mm256_mullo_epi16(a, b); |
508 | 139M | } |
509 | | |
510 | 0 | SIMD_INLINE v256 v256_mulhi_s16(v256 a, v256 b) { |
511 | 0 | return _mm256_mulhi_epi16(a, b); |
512 | 0 | } |
513 | | |
514 | 0 | SIMD_INLINE v256 v256_mullo_s32(v256 a, v256 b) { |
515 | 0 | return _mm256_mullo_epi32(a, b); |
516 | 0 | } |
517 | | |
518 | 0 | SIMD_INLINE v256 v256_madd_s16(v256 a, v256 b) { |
519 | 0 | return _mm256_madd_epi16(a, b); |
520 | 0 | } |
521 | | |
522 | 0 | SIMD_INLINE v256 v256_madd_us8(v256 a, v256 b) { |
523 | 0 | return _mm256_maddubs_epi16(a, b); |
524 | 0 | } |
525 | | |
526 | 0 | SIMD_INLINE v256 v256_avg_u8(v256 a, v256 b) { return _mm256_avg_epu8(a, b); } |
527 | | |
528 | 0 | SIMD_INLINE v256 v256_rdavg_u8(v256 a, v256 b) { |
529 | 0 | return _mm256_sub_epi8( |
530 | 0 | _mm256_avg_epu8(a, b), |
531 | 0 | _mm256_and_si256(_mm256_xor_si256(a, b), v256_dup_8(1))); |
532 | 0 | } |
533 | | |
534 | 0 | SIMD_INLINE v256 v256_rdavg_u16(v256 a, v256 b) { |
535 | 0 | return _mm256_sub_epi16( |
536 | 0 | _mm256_avg_epu16(a, b), |
537 | 0 | _mm256_and_si256(_mm256_xor_si256(a, b), v256_dup_16(1))); |
538 | 0 | } |
539 | | |
540 | 0 | SIMD_INLINE v256 v256_avg_u16(v256 a, v256 b) { return _mm256_avg_epu16(a, b); } |
541 | | |
542 | 0 | SIMD_INLINE v256 v256_min_u8(v256 a, v256 b) { return _mm256_min_epu8(a, b); } |
543 | | |
544 | 190M | SIMD_INLINE v256 v256_max_u8(v256 a, v256 b) { return _mm256_max_epu8(a, b); } |
545 | | |
546 | 0 | SIMD_INLINE v256 v256_min_s8(v256 a, v256 b) { return _mm256_min_epi8(a, b); } |
547 | | |
548 | 0 | SIMD_INLINE uint32_t v256_movemask_8(v256 a) { |
549 | 0 | return (uint32_t)_mm256_movemask_epi8(a); |
550 | 0 | } |
551 | | |
552 | 0 | SIMD_INLINE v256 v256_blend_8(v256 a, v256 b, v256 c) { |
553 | 0 | return _mm256_blendv_epi8(a, b, c); |
554 | 0 | } |
555 | | |
556 | 0 | SIMD_INLINE v256 v256_max_s8(v256 a, v256 b) { return _mm256_max_epi8(a, b); } |
557 | | |
558 | 490M | SIMD_INLINE v256 v256_min_s16(v256 a, v256 b) { return _mm256_min_epi16(a, b); } |
559 | | |
560 | 267M | SIMD_INLINE v256 v256_max_s16(v256 a, v256 b) { return _mm256_max_epi16(a, b); } |
561 | | |
562 | 0 | SIMD_INLINE v256 v256_min_s32(v256 a, v256 b) { return _mm256_min_epi32(a, b); } |
563 | | |
564 | 0 | SIMD_INLINE v256 v256_max_s32(v256 a, v256 b) { return _mm256_max_epi32(a, b); } |
565 | | |
566 | 0 | SIMD_INLINE v256 v256_cmpgt_s8(v256 a, v256 b) { |
567 | 0 | return _mm256_cmpgt_epi8(a, b); |
568 | 0 | } |
569 | | |
570 | 0 | SIMD_INLINE v256 v256_cmplt_s8(v256 a, v256 b) { |
571 | 0 | return _mm256_cmpgt_epi8(b, a); |
572 | 0 | } |
573 | | |
574 | 0 | SIMD_INLINE v256 v256_cmpeq_8(v256 a, v256 b) { |
575 | 0 | return _mm256_cmpeq_epi8(a, b); |
576 | 0 | } |
577 | | |
578 | 0 | SIMD_INLINE v256 v256_cmpgt_s16(v256 a, v256 b) { |
579 | 0 | return _mm256_cmpgt_epi16(a, b); |
580 | 0 | } |
581 | | |
582 | 68.2M | SIMD_INLINE v256 v256_cmplt_s16(v256 a, v256 b) { |
583 | 68.2M | return _mm256_cmpgt_epi16(b, a); |
584 | 68.2M | } |
585 | | |
586 | 0 | SIMD_INLINE v256 v256_cmpeq_16(v256 a, v256 b) { |
587 | 0 | return _mm256_cmpeq_epi16(a, b); |
588 | 0 | } |
589 | | |
590 | 0 | SIMD_INLINE v256 v256_cmpgt_s32(v256 a, v256 b) { |
591 | 0 | return _mm256_cmpgt_epi32(a, b); |
592 | 0 | } |
593 | | |
594 | 0 | SIMD_INLINE v256 v256_cmplt_s32(v256 a, v256 b) { |
595 | 0 | return _mm256_cmpgt_epi32(b, a); |
596 | 0 | } |
597 | | |
598 | 0 | SIMD_INLINE v256 v256_cmpeq_32(v256 a, v256 b) { |
599 | 0 | return _mm256_cmpeq_epi32(a, b); |
600 | 0 | } |
601 | | |
602 | 0 | SIMD_INLINE v256 v256_shl_8(v256 a, unsigned int c) { |
603 | 0 | return _mm256_and_si256(_mm256_set1_epi8((char)(0xff << c)), |
604 | 0 | _mm256_sll_epi16(a, _mm_cvtsi32_si128((int)c))); |
605 | 0 | } |
606 | | |
607 | 0 | SIMD_INLINE v256 v256_shr_u8(v256 a, unsigned int c) { |
608 | 0 | return _mm256_and_si256(_mm256_set1_epi8((char)(0xff >> c)), |
609 | 0 | _mm256_srl_epi16(a, _mm_cvtsi32_si128((int)c))); |
610 | 0 | } |
611 | | |
612 | 0 | SIMD_INLINE v256 v256_shr_s8(v256 a, unsigned int c) { |
613 | 0 | __m128i x = _mm_cvtsi32_si128((int)(c + 8)); |
614 | 0 | return _mm256_packs_epi16(_mm256_sra_epi16(_mm256_unpacklo_epi8(a, a), x), |
615 | 0 | _mm256_sra_epi16(_mm256_unpackhi_epi8(a, a), x)); |
616 | 0 | } |
617 | | |
618 | 0 | SIMD_INLINE v256 v256_shl_16(v256 a, unsigned int c) { |
619 | 0 | return _mm256_sll_epi16(a, _mm_cvtsi32_si128((int)c)); |
620 | 0 | } |
621 | | |
622 | 330M | SIMD_INLINE v256 v256_shr_u16(v256 a, unsigned int c) { |
623 | 330M | return _mm256_srl_epi16(a, _mm_cvtsi32_si128((int)c)); |
624 | 330M | } |
625 | | |
626 | 0 | SIMD_INLINE v256 v256_shr_s16(v256 a, unsigned int c) { |
627 | 0 | return _mm256_sra_epi16(a, _mm_cvtsi32_si128((int)c)); |
628 | 0 | } |
629 | | |
630 | 0 | SIMD_INLINE v256 v256_shl_32(v256 a, unsigned int c) { |
631 | 0 | return _mm256_sll_epi32(a, _mm_cvtsi32_si128((int)c)); |
632 | 0 | } |
633 | | |
634 | 0 | SIMD_INLINE v256 v256_shr_u32(v256 a, unsigned int c) { |
635 | 0 | return _mm256_srl_epi32(a, _mm_cvtsi32_si128((int)c)); |
636 | 0 | } |
637 | | |
638 | 0 | SIMD_INLINE v256 v256_shr_s32(v256 a, unsigned int c) { |
639 | 0 | return _mm256_sra_epi32(a, _mm_cvtsi32_si128((int)c)); |
640 | 0 | } |
641 | | |
642 | 0 | SIMD_INLINE v256 v256_shl_64(v256 a, unsigned int c) { |
643 | 0 | return _mm256_sll_epi64(a, _mm_cvtsi32_si128((int)c)); |
644 | 0 | } |
645 | | |
646 | 0 | SIMD_INLINE v256 v256_shr_u64(v256 a, unsigned int c) { |
647 | 0 | return _mm256_srl_epi64(a, _mm_cvtsi32_si128((int)c)); |
648 | 0 | } |
649 | | |
650 | 0 | SIMD_INLINE v256 v256_shr_s64(v256 a, unsigned int c) { |
651 | 0 | #if defined(__AVX512VL__) |
652 | 0 | return _mm256_sra_epi64(a, _mm_cvtsi32_si128((int)c)); |
653 | 0 | #else |
654 | 0 | return v256_from_v128(v128_shr_s64(v256_high_v128(a), c), |
655 | 0 | v128_shr_s64(v256_low_v128(a), c)); |
656 | 0 | #endif |
657 | 0 | } |
658 | | |
659 | | /* These intrinsics require immediate values, so we must use #defines |
660 | | to enforce that. */ |
661 | | // _mm256_slli_si256 works on 128 bit lanes and can't be used |
662 | | #define v256_shl_n_byte(a, n) \ |
663 | | ((n) < 16 ? v256_from_v128( \ |
664 | | v128_align(v256_high_v128(a), v256_low_v128(a), 16 - (n)), \ |
665 | | v128_shl_n_byte(v256_low_v128(a), n)) \ |
666 | | : _mm256_inserti128_si256( \ |
667 | | _mm256_setzero_si256(), \ |
668 | | v128_shl_n_byte(v256_low_v128(a), (n)-16), 1)) |
669 | | |
670 | | // _mm256_srli_si256 works on 128 bit lanes and can't be used |
671 | | #define v256_shr_n_byte(a, n) \ |
672 | | ((n) < 16 \ |
673 | | ? _mm256_alignr_epi8( \ |
674 | | _mm256_permute2x128_si256(a, a, _MM_SHUFFLE(2, 0, 0, 1)), a, n) \ |
675 | | : ((n) == 16 ? _mm256_permute2x128_si256(_mm256_setzero_si256(), a, 3) \ |
676 | | : _mm256_inserti128_si256( \ |
677 | | _mm256_setzero_si256(), \ |
678 | | v128_shr_n_byte(v256_high_v128(a), (n)-16), 0))) |
679 | | |
680 | | // _mm256_alignr_epi8 works on two 128 bit lanes and can't be used |
681 | | #define v256_align(a, b, c) \ |
682 | | ((c) ? v256_or(v256_shr_n_byte(b, c), v256_shl_n_byte(a, 32 - (c))) : b) |
683 | | |
684 | | #define v256_shl_n_8(a, c) \ |
685 | | _mm256_and_si256(_mm256_set1_epi8((char)(0xff << (c))), \ |
686 | | _mm256_slli_epi16(a, c)) |
687 | | #define v256_shr_n_u8(a, c) \ |
688 | | _mm256_and_si256(_mm256_set1_epi8((char)(0xff >> (c))), \ |
689 | | _mm256_srli_epi16(a, c)) |
690 | | #define v256_shr_n_s8(a, c) \ |
691 | | _mm256_packs_epi16(_mm256_srai_epi16(_mm256_unpacklo_epi8(a, a), (c) + 8), \ |
692 | | _mm256_srai_epi16(_mm256_unpackhi_epi8(a, a), (c) + 8)) |
693 | | #define v256_shl_n_16(a, c) _mm256_slli_epi16(a, c) |
694 | | #define v256_shr_n_u16(a, c) _mm256_srli_epi16(a, c) |
695 | 432M | #define v256_shr_n_s16(a, c) _mm256_srai_epi16(a, c) |
696 | | #define v256_shl_n_32(a, c) _mm256_slli_epi32(a, c) |
697 | | #define v256_shr_n_u32(a, c) _mm256_srli_epi32(a, c) |
698 | | #define v256_shr_n_s32(a, c) _mm256_srai_epi32(a, c) |
699 | | #define v256_shl_n_64(a, c) _mm256_slli_epi64(a, c) |
700 | | #define v256_shr_n_u64(a, c) _mm256_srli_epi64(a, c) |
701 | | #define v256_shr_n_s64(a, c) \ |
702 | | v256_shr_s64((a), (c)) // _mm256_srai_epi64 broken in gcc? |
703 | | #define v256_shr_n_word(a, n) v256_shr_n_byte(a, 2 * (n)) |
704 | | #define v256_shl_n_word(a, n) v256_shl_n_byte(a, 2 * (n)) |
705 | | |
706 | | typedef v256 sad256_internal_u16; |
707 | | |
708 | 0 | SIMD_INLINE sad256_internal_u16 v256_sad_u16_init(void) { return v256_zero(); } |
709 | | |
710 | | /* Implementation dependent return value. Result must be finalised with |
711 | | * v256_sad_u16_sum(). */ |
712 | | SIMD_INLINE sad256_internal_u16 v256_sad_u16(sad256_internal_u16 s, v256 a, |
713 | 0 | v256 b) { |
714 | 0 | #if defined(__SSE4_1__) |
715 | 0 | v256 t = v256_sub_16(_mm256_max_epu16(a, b), _mm256_min_epu16(a, b)); |
716 | 0 | #else |
717 | 0 | v256 t = v256_cmplt_s16(v256_xor(a, v256_dup_16(32768)), |
718 | 0 | v256_xor(b, v256_dup_16(32768))); |
719 | 0 | t = v256_sub_16(v256_or(v256_and(b, t), v256_andn(a, t)), |
720 | 0 | v256_or(v256_and(a, t), v256_andn(b, t))); |
721 | 0 | #endif |
722 | 0 | return v256_add_32( |
723 | 0 | s, v256_add_32(v256_unpackhi_u16_s32(t), v256_unpacklo_u16_s32(t))); |
724 | 0 | } |
725 | | |
726 | 0 | SIMD_INLINE uint32_t v256_sad_u16_sum(sad256_internal_u16 s) { |
727 | 0 | v128 t = v128_add_32(v256_high_v128(s), v256_low_v128(s)); |
728 | 0 | return v128_low_u32(t) + v128_low_u32(v128_shr_n_byte(t, 4)) + |
729 | 0 | v128_low_u32(v128_shr_n_byte(t, 8)) + |
730 | 0 | v128_low_u32(v128_shr_n_byte(t, 12)); |
731 | 0 | } |
732 | | |
733 | | typedef v256 ssd256_internal_s16; |
734 | | |
735 | 0 | SIMD_INLINE ssd256_internal_s16 v256_ssd_s16_init(void) { return v256_zero(); } |
736 | | |
737 | | /* Implementation dependent return value. Result must be finalised with |
738 | | * v256_ssd_s16_sum(). */ |
739 | | SIMD_INLINE ssd256_internal_s16 v256_ssd_s16(ssd256_internal_s16 s, v256 a, |
740 | 0 | v256 b) { |
741 | 0 | v256 d = v256_sub_16(a, b); |
742 | 0 | d = v256_madd_s16(d, d); |
743 | 0 | return v256_add_64(s, v256_add_64(_mm256_unpackhi_epi32(d, v256_zero()), |
744 | 0 | _mm256_unpacklo_epi32(d, v256_zero()))); |
745 | 0 | } |
746 | | |
747 | 0 | SIMD_INLINE uint64_t v256_ssd_s16_sum(ssd256_internal_s16 s) { |
748 | 0 | v128 t = v128_add_64(v256_high_v128(s), v256_low_v128(s)); |
749 | 0 | return v64_u64(v128_low_v64(t)) + v64_u64(v128_high_v64(t)); |
750 | 0 | } |
751 | | |
752 | | #endif |
753 | | |
754 | | #endif // AOM_AOM_DSP_SIMD_V256_INTRINSICS_X86_H_ |