/src/zlib-ng/arch/x86/adler32_avx512.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* adler32_avx512.c -- compute the Adler-32 checksum of a data stream |
2 | | * Copyright (C) 1995-2011 Mark Adler |
3 | | * Authors: |
4 | | * Adam Stylinski <kungfujesus06@gmail.com> |
5 | | * Brian Bockelman <bockelman@gmail.com> |
6 | | * For conditions of distribution and use, see copyright notice in zlib.h |
7 | | */ |
8 | | |
9 | | #ifdef X86_AVX512 |
10 | | |
11 | | #include "zbuild.h" |
12 | | #include "adler32_p.h" |
13 | | #include "arch_functions.h" |
14 | | #include <immintrin.h> |
15 | | #include "x86_intrins.h" |
16 | | #include "adler32_avx512_p.h" |
17 | | |
18 | 0 | static inline uint32_t adler32_fold_copy_impl(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len, const int COPY) { |
19 | 0 | if (src == NULL) return 1L; |
20 | 0 | if (len == 0) return adler; |
21 | | |
22 | 0 | uint32_t adler0, adler1; |
23 | 0 | adler1 = (adler >> 16) & 0xffff; |
24 | 0 | adler0 = adler & 0xffff; |
25 | |
|
26 | 0 | rem_peel: |
27 | 0 | if (len < 64) { |
28 | | /* This handles the remaining copies, just call normal adler checksum after this */ |
29 | 0 | if (COPY) { |
30 | 0 | __mmask64 storemask = (0xFFFFFFFFFFFFFFFFUL >> (64 - len)); |
31 | 0 | __m512i copy_vec = _mm512_maskz_loadu_epi8(storemask, src); |
32 | 0 | _mm512_mask_storeu_epi8(dst, storemask, copy_vec); |
33 | 0 | } |
34 | |
|
35 | 0 | return adler32_avx2(adler, src, len); |
36 | 0 | } |
37 | | |
38 | 0 | __m512i vbuf, vs1_0, vs3; |
39 | |
|
40 | 0 | const __m512i dot2v = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, |
41 | 0 | 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, |
42 | 0 | 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, |
43 | 0 | 56, 57, 58, 59, 60, 61, 62, 63, 64); |
44 | 0 | const __m512i dot3v = _mm512_set1_epi16(1); |
45 | 0 | const __m512i zero = _mm512_setzero_si512(); |
46 | 0 | size_t k; |
47 | |
|
48 | 0 | while (len >= 64) { |
49 | 0 | __m512i vs1 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler0)); |
50 | 0 | __m512i vs2 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler1)); |
51 | 0 | vs1_0 = vs1; |
52 | 0 | vs3 = _mm512_setzero_si512(); |
53 | |
|
54 | 0 | k = MIN(len, NMAX); |
55 | 0 | k -= k % 64; |
56 | 0 | len -= k; |
57 | |
|
58 | 0 | while (k >= 64) { |
59 | | /* |
60 | | vs1 = adler + sum(c[i]) |
61 | | vs2 = sum2 + 64 vs1 + sum( (64-i+1) c[i] ) |
62 | | */ |
63 | 0 | vbuf = _mm512_loadu_si512(src); |
64 | |
|
65 | 0 | if (COPY) { |
66 | 0 | _mm512_storeu_si512(dst, vbuf); |
67 | 0 | dst += 64; |
68 | 0 | } |
69 | |
|
70 | 0 | src += 64; |
71 | 0 | k -= 64; |
72 | |
|
73 | 0 | __m512i vs1_sad = _mm512_sad_epu8(vbuf, zero); |
74 | 0 | __m512i v_short_sum2 = _mm512_maddubs_epi16(vbuf, dot2v); |
75 | 0 | vs1 = _mm512_add_epi32(vs1_sad, vs1); |
76 | 0 | vs3 = _mm512_add_epi32(vs3, vs1_0); |
77 | 0 | __m512i vsum2 = _mm512_madd_epi16(v_short_sum2, dot3v); |
78 | 0 | vs2 = _mm512_add_epi32(vsum2, vs2); |
79 | 0 | vs1_0 = vs1; |
80 | 0 | } |
81 | |
|
82 | 0 | vs3 = _mm512_slli_epi32(vs3, 6); |
83 | 0 | vs2 = _mm512_add_epi32(vs2, vs3); |
84 | |
|
85 | 0 | adler0 = partial_hsum(vs1) % BASE; |
86 | 0 | adler1 = _mm512_reduce_add_epu32(vs2) % BASE; |
87 | 0 | } |
88 | |
|
89 | 0 | adler = adler0 | (adler1 << 16); |
90 | | |
91 | | /* Process tail (len < 64). */ |
92 | 0 | if (len) { |
93 | 0 | goto rem_peel; |
94 | 0 | } |
95 | | |
96 | 0 | return adler; |
97 | 0 | } |
98 | | |
99 | 0 | Z_INTERNAL uint32_t adler32_fold_copy_avx512(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) { |
100 | 0 | return adler32_fold_copy_impl(adler, dst, src, len, 1); |
101 | 0 | } |
102 | | |
103 | 0 | Z_INTERNAL uint32_t adler32_avx512(uint32_t adler, const uint8_t *src, size_t len) { |
104 | 0 | return adler32_fold_copy_impl(adler, NULL, src, len, 0); |
105 | 0 | } |
106 | | |
107 | | #endif |
108 | | |