/src/libvpx/vpx_dsp/x86/sad4d_avx512.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | #include <immintrin.h> // AVX512 |
11 | | #include "./vpx_dsp_rtcd.h" |
12 | | #include "vpx/vpx_integer.h" |
13 | | |
14 | | void vpx_sad64x64x4d_avx512(const uint8_t *src_ptr, int src_stride, |
15 | | const uint8_t *const ref_array[4], int ref_stride, |
16 | 0 | uint32_t sad_array[4]) { |
17 | 0 | __m512i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg; |
18 | 0 | __m512i sum_ref0, sum_ref1, sum_ref2, sum_ref3; |
19 | 0 | __m512i sum_mlow, sum_mhigh; |
20 | 0 | int i; |
21 | 0 | const uint8_t *ref0, *ref1, *ref2, *ref3; |
22 | |
|
23 | 0 | ref0 = ref_array[0]; |
24 | 0 | ref1 = ref_array[1]; |
25 | 0 | ref2 = ref_array[2]; |
26 | 0 | ref3 = ref_array[3]; |
27 | 0 | sum_ref0 = _mm512_set1_epi16(0); |
28 | 0 | sum_ref1 = _mm512_set1_epi16(0); |
29 | 0 | sum_ref2 = _mm512_set1_epi16(0); |
30 | 0 | sum_ref3 = _mm512_set1_epi16(0); |
31 | 0 | for (i = 0; i < 64; i++) { |
32 | | // load src and all ref[] |
33 | 0 | src_reg = _mm512_loadu_si512((const __m512i *)src_ptr); |
34 | 0 | ref0_reg = _mm512_loadu_si512((const __m512i *)ref0); |
35 | 0 | ref1_reg = _mm512_loadu_si512((const __m512i *)ref1); |
36 | 0 | ref2_reg = _mm512_loadu_si512((const __m512i *)ref2); |
37 | 0 | ref3_reg = _mm512_loadu_si512((const __m512i *)ref3); |
38 | | // sum of the absolute differences between every ref[] to src |
39 | 0 | ref0_reg = _mm512_sad_epu8(ref0_reg, src_reg); |
40 | 0 | ref1_reg = _mm512_sad_epu8(ref1_reg, src_reg); |
41 | 0 | ref2_reg = _mm512_sad_epu8(ref2_reg, src_reg); |
42 | 0 | ref3_reg = _mm512_sad_epu8(ref3_reg, src_reg); |
43 | | // sum every ref[] |
44 | 0 | sum_ref0 = _mm512_add_epi32(sum_ref0, ref0_reg); |
45 | 0 | sum_ref1 = _mm512_add_epi32(sum_ref1, ref1_reg); |
46 | 0 | sum_ref2 = _mm512_add_epi32(sum_ref2, ref2_reg); |
47 | 0 | sum_ref3 = _mm512_add_epi32(sum_ref3, ref3_reg); |
48 | |
|
49 | 0 | src_ptr += src_stride; |
50 | 0 | ref0 += ref_stride; |
51 | 0 | ref1 += ref_stride; |
52 | 0 | ref2 += ref_stride; |
53 | 0 | ref3 += ref_stride; |
54 | 0 | } |
55 | 0 | { |
56 | 0 | __m256i sum256; |
57 | 0 | __m128i sum128; |
58 | | // in sum_ref[] the result is saved in the first 4 bytes |
59 | | // the other 4 bytes are zeroed. |
60 | | // sum_ref1 and sum_ref3 are shifted left by 4 bytes |
61 | 0 | sum_ref1 = _mm512_bslli_epi128(sum_ref1, 4); |
62 | 0 | sum_ref3 = _mm512_bslli_epi128(sum_ref3, 4); |
63 | | |
64 | | // merge sum_ref0 and sum_ref1 also sum_ref2 and sum_ref3 |
65 | 0 | sum_ref0 = _mm512_or_si512(sum_ref0, sum_ref1); |
66 | 0 | sum_ref2 = _mm512_or_si512(sum_ref2, sum_ref3); |
67 | | |
68 | | // merge every 64 bit from each sum_ref[] |
69 | 0 | sum_mlow = _mm512_unpacklo_epi64(sum_ref0, sum_ref2); |
70 | 0 | sum_mhigh = _mm512_unpackhi_epi64(sum_ref0, sum_ref2); |
71 | | |
72 | | // add the low 64 bit to the high 64 bit |
73 | 0 | sum_mlow = _mm512_add_epi32(sum_mlow, sum_mhigh); |
74 | | |
75 | | // add the low 128 bit to the high 128 bit |
76 | 0 | sum256 = _mm256_add_epi32(_mm512_castsi512_si256(sum_mlow), |
77 | 0 | _mm512_extracti32x8_epi32(sum_mlow, 1)); |
78 | 0 | sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum256), |
79 | 0 | _mm256_extractf128_si256(sum256, 1)); |
80 | |
|
81 | 0 | _mm_storeu_si128((__m128i *)(sad_array), sum128); |
82 | 0 | } |
83 | 0 | } |