/src/aom/aom_dsp/x86/synonyms_avx2.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2018, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #ifndef AOM_AOM_DSP_X86_SYNONYMS_AVX2_H_ |
13 | | #define AOM_AOM_DSP_X86_SYNONYMS_AVX2_H_ |
14 | | |
15 | | #include <immintrin.h> |
16 | | |
17 | | #include "config/aom_config.h" |
18 | | |
19 | | #include "aom/aom_integer.h" |
20 | | |
21 | | /** |
22 | | * Various reusable shorthands for x86 SIMD intrinsics. |
23 | | * |
24 | | * Intrinsics prefixed with xx_ operate on or return 128bit XMM registers. |
25 | | * Intrinsics prefixed with yy_ operate on or return 256bit YMM registers. |
26 | | */ |
27 | | |
28 | | // Loads and stores to do away with the tedium of casting the address |
29 | | // to the right type. |
30 | 367M | static INLINE __m256i yy_load_256(const void *a) { |
31 | 367M | return _mm256_load_si256((const __m256i *)a); |
32 | 367M | } Unexecuted instantiation: blend_a64_mask_avx2.c:yy_load_256 Unexecuted instantiation: jnt_convolve_avx2.c:yy_load_256 Unexecuted instantiation: reconinter_avx2.c:yy_load_256 selfguided_avx2.c:yy_load_256 Line | Count | Source | 30 | 367M | static INLINE __m256i yy_load_256(const void *a) { | 31 | 367M | return _mm256_load_si256((const __m256i *)a); | 32 | 367M | } |
Unexecuted instantiation: wiener_convolve_avx2.c:yy_load_256 Unexecuted instantiation: highbd_wiener_convolve_avx2.c:yy_load_256 |
33 | | |
34 | 2.70G | static INLINE __m256i yy_loadu_256(const void *a) { |
35 | 2.70G | return _mm256_loadu_si256((const __m256i *)a); |
36 | 2.70G | } blend_a64_mask_avx2.c:yy_loadu_256 Line | Count | Source | 34 | 27.1M | static INLINE __m256i yy_loadu_256(const void *a) { | 35 | 27.1M | return _mm256_loadu_si256((const __m256i *)a); | 36 | 27.1M | } |
Unexecuted instantiation: jnt_convolve_avx2.c:yy_loadu_256 reconinter_avx2.c:yy_loadu_256 Line | Count | Source | 34 | 11.7M | static INLINE __m256i yy_loadu_256(const void *a) { | 35 | 11.7M | return _mm256_loadu_si256((const __m256i *)a); | 36 | 11.7M | } |
selfguided_avx2.c:yy_loadu_256 Line | Count | Source | 34 | 2.50G | static INLINE __m256i yy_loadu_256(const void *a) { | 35 | 2.50G | return _mm256_loadu_si256((const __m256i *)a); | 36 | 2.50G | } |
Unexecuted instantiation: wiener_convolve_avx2.c:yy_loadu_256 highbd_wiener_convolve_avx2.c:yy_loadu_256 Line | Count | Source | 34 | 168M | static INLINE __m256i yy_loadu_256(const void *a) { | 35 | 168M | return _mm256_loadu_si256((const __m256i *)a); | 36 | 168M | } |
|
37 | | |
38 | 366M | static INLINE void yy_store_256(void *const a, const __m256i v) { |
39 | 366M | _mm256_store_si256((__m256i *)a, v); |
40 | 366M | } Unexecuted instantiation: blend_a64_mask_avx2.c:yy_store_256 Unexecuted instantiation: jnt_convolve_avx2.c:yy_store_256 Unexecuted instantiation: reconinter_avx2.c:yy_store_256 selfguided_avx2.c:yy_store_256 Line | Count | Source | 38 | 366M | static INLINE void yy_store_256(void *const a, const __m256i v) { | 39 | 366M | _mm256_store_si256((__m256i *)a, v); | 40 | 366M | } |
Unexecuted instantiation: wiener_convolve_avx2.c:yy_store_256 Unexecuted instantiation: highbd_wiener_convolve_avx2.c:yy_store_256 |
41 | | |
42 | 413M | static INLINE void yy_storeu_256(void *const a, const __m256i v) { |
43 | 413M | _mm256_storeu_si256((__m256i *)a, v); |
44 | 413M | } blend_a64_mask_avx2.c:yy_storeu_256 Line | Count | Source | 42 | 3.00M | static INLINE void yy_storeu_256(void *const a, const __m256i v) { | 43 | 3.00M | _mm256_storeu_si256((__m256i *)a, v); | 44 | 3.00M | } |
Unexecuted instantiation: jnt_convolve_avx2.c:yy_storeu_256 reconinter_avx2.c:yy_storeu_256 Line | Count | Source | 42 | 3.06M | static INLINE void yy_storeu_256(void *const a, const __m256i v) { | 43 | 3.06M | _mm256_storeu_si256((__m256i *)a, v); | 44 | 3.06M | } |
selfguided_avx2.c:yy_storeu_256 Line | Count | Source | 42 | 386M | static INLINE void yy_storeu_256(void *const a, const __m256i v) { | 43 | 386M | _mm256_storeu_si256((__m256i *)a, v); | 44 | 386M | } |
Unexecuted instantiation: wiener_convolve_avx2.c:yy_storeu_256 highbd_wiener_convolve_avx2.c:yy_storeu_256 Line | Count | Source | 42 | 21.0M | static INLINE void yy_storeu_256(void *const a, const __m256i v) { | 43 | 21.0M | _mm256_storeu_si256((__m256i *)a, v); | 44 | 21.0M | } |
|
45 | | |
46 | | // The _mm256_set1_epi64x() intrinsic is undefined for some Visual Studio |
47 | | // compilers. The following function is equivalent to _mm256_set1_epi64x() |
48 | | // acting on a 32-bit integer. |
49 | 0 | static INLINE __m256i yy_set1_64_from_32i(int32_t a) { |
50 | 0 | #if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 |
51 | 0 | return _mm256_set_epi32(0, a, 0, a, 0, a, 0, a); |
52 | 0 | #else |
53 | 0 | return _mm256_set1_epi64x((uint32_t)a); |
54 | 0 | #endif |
55 | 0 | } Unexecuted instantiation: blend_a64_mask_avx2.c:yy_set1_64_from_32i Unexecuted instantiation: jnt_convolve_avx2.c:yy_set1_64_from_32i Unexecuted instantiation: reconinter_avx2.c:yy_set1_64_from_32i Unexecuted instantiation: selfguided_avx2.c:yy_set1_64_from_32i Unexecuted instantiation: wiener_convolve_avx2.c:yy_set1_64_from_32i Unexecuted instantiation: highbd_wiener_convolve_avx2.c:yy_set1_64_from_32i |
56 | | |
57 | | // Some compilers don't have _mm256_set_m128i defined in immintrin.h. We |
58 | | // therefore define an equivalent function using a different intrinsic. |
59 | | // ([ hi ], [ lo ]) -> [ hi ][ lo ] |
60 | 2.15M | static INLINE __m256i yy_set_m128i(__m128i hi, __m128i lo) { |
61 | 2.15M | return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); |
62 | 2.15M | } blend_a64_mask_avx2.c:yy_set_m128i Line | Count | Source | 60 | 776k | static INLINE __m256i yy_set_m128i(__m128i hi, __m128i lo) { | 61 | 776k | return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); | 62 | 776k | } |
Unexecuted instantiation: jnt_convolve_avx2.c:yy_set_m128i reconinter_avx2.c:yy_set_m128i Line | Count | Source | 60 | 457k | static INLINE __m256i yy_set_m128i(__m128i hi, __m128i lo) { | 61 | 457k | return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); | 62 | 457k | } |
Unexecuted instantiation: selfguided_avx2.c:yy_set_m128i Unexecuted instantiation: wiener_convolve_avx2.c:yy_set_m128i highbd_wiener_convolve_avx2.c:yy_set_m128i Line | Count | Source | 60 | 921k | static INLINE __m256i yy_set_m128i(__m128i hi, __m128i lo) { | 61 | 921k | return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); | 62 | 921k | } |
|
63 | | |
64 | 1.23M | static INLINE __m256i yy_loadu2_128(const void *hi, const void *lo) { |
65 | 1.23M | __m128i mhi = _mm_loadu_si128((__m128i *)(hi)); |
66 | 1.23M | __m128i mlo = _mm_loadu_si128((__m128i *)(lo)); |
67 | 1.23M | return yy_set_m128i(mhi, mlo); |
68 | 1.23M | } blend_a64_mask_avx2.c:yy_loadu2_128 Line | Count | Source | 64 | 776k | static INLINE __m256i yy_loadu2_128(const void *hi, const void *lo) { | 65 | 776k | __m128i mhi = _mm_loadu_si128((__m128i *)(hi)); | 66 | 776k | __m128i mlo = _mm_loadu_si128((__m128i *)(lo)); | 67 | 776k | return yy_set_m128i(mhi, mlo); | 68 | 776k | } |
Unexecuted instantiation: jnt_convolve_avx2.c:yy_loadu2_128 reconinter_avx2.c:yy_loadu2_128 Line | Count | Source | 64 | 457k | static INLINE __m256i yy_loadu2_128(const void *hi, const void *lo) { | 65 | 457k | __m128i mhi = _mm_loadu_si128((__m128i *)(hi)); | 66 | 457k | __m128i mlo = _mm_loadu_si128((__m128i *)(lo)); | 67 | 457k | return yy_set_m128i(mhi, mlo); | 68 | 457k | } |
Unexecuted instantiation: selfguided_avx2.c:yy_loadu2_128 Unexecuted instantiation: wiener_convolve_avx2.c:yy_loadu2_128 Unexecuted instantiation: highbd_wiener_convolve_avx2.c:yy_loadu2_128 |
69 | | |
70 | 265k | static INLINE void yy_storeu2_128(void *hi, void *lo, const __m256i a) { |
71 | 265k | _mm_storeu_si128((__m128i *)hi, _mm256_extracti128_si256(a, 1)); |
72 | 265k | _mm_storeu_si128((__m128i *)lo, _mm256_castsi256_si128(a)); |
73 | 265k | } blend_a64_mask_avx2.c:yy_storeu2_128 Line | Count | Source | 70 | 265k | static INLINE void yy_storeu2_128(void *hi, void *lo, const __m256i a) { | 71 | 265k | _mm_storeu_si128((__m128i *)hi, _mm256_extracti128_si256(a, 1)); | 72 | 265k | _mm_storeu_si128((__m128i *)lo, _mm256_castsi256_si128(a)); | 73 | 265k | } |
Unexecuted instantiation: jnt_convolve_avx2.c:yy_storeu2_128 Unexecuted instantiation: reconinter_avx2.c:yy_storeu2_128 Unexecuted instantiation: selfguided_avx2.c:yy_storeu2_128 Unexecuted instantiation: wiener_convolve_avx2.c:yy_storeu2_128 Unexecuted instantiation: highbd_wiener_convolve_avx2.c:yy_storeu2_128 |
74 | | |
75 | 1.37M | static INLINE __m256i yy_roundn_epu16(__m256i v_val_w, int bits) { |
76 | 1.37M | const __m256i v_s_w = _mm256_srli_epi16(v_val_w, bits - 1); |
77 | 1.37M | return _mm256_avg_epu16(v_s_w, _mm256_setzero_si256()); |
78 | 1.37M | } blend_a64_mask_avx2.c:yy_roundn_epu16 Line | Count | Source | 75 | 1.37M | static INLINE __m256i yy_roundn_epu16(__m256i v_val_w, int bits) { | 76 | 1.37M | const __m256i v_s_w = _mm256_srli_epi16(v_val_w, bits - 1); | 77 | 1.37M | return _mm256_avg_epu16(v_s_w, _mm256_setzero_si256()); | 78 | 1.37M | } |
Unexecuted instantiation: jnt_convolve_avx2.c:yy_roundn_epu16 Unexecuted instantiation: reconinter_avx2.c:yy_roundn_epu16 Unexecuted instantiation: selfguided_avx2.c:yy_roundn_epu16 Unexecuted instantiation: wiener_convolve_avx2.c:yy_roundn_epu16 Unexecuted instantiation: highbd_wiener_convolve_avx2.c:yy_roundn_epu16 |
79 | | #endif // AOM_AOM_DSP_X86_SYNONYMS_AVX2_H_ |