/src/zlib-ng/arch/x86/adler32_sse42.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* adler32_sse42.c -- compute the Adler-32 checksum of a data stream |
2 | | * Copyright (C) 1995-2011 Mark Adler |
3 | | * Authors: |
4 | | * Adam Stylinski <kungfujesus06@gmail.com> |
5 | | * Brian Bockelman <bockelman@gmail.com> |
6 | | * For conditions of distribution and use, see copyright notice in zlib.h |
7 | | */ |
8 | | |
9 | | #include "zbuild.h" |
10 | | #include "adler32_p.h" |
11 | | #include "adler32_ssse3_p.h" |
12 | | #include <immintrin.h> |
13 | | |
14 | | #ifdef X86_SSE42 |
15 | | |
16 | 16.3k | Z_INTERNAL uint32_t adler32_fold_copy_sse42(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) { |
17 | 16.3k | uint32_t adler0, adler1; |
18 | 16.3k | adler1 = (adler >> 16) & 0xffff; |
19 | 16.3k | adler0 = adler & 0xffff; |
20 | | |
21 | 31.6k | rem_peel: |
22 | 31.6k | if (len < 16) { |
23 | 15.2k | return adler32_copy_len_16(adler0, src, dst, len, adler1); |
24 | 15.2k | } |
25 | | |
26 | 16.3k | __m128i vbuf, vbuf_0; |
27 | 16.3k | __m128i vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, |
28 | 16.3k | v_sad_sum2, vsum2, vsum2_0; |
29 | 16.3k | __m128i zero = _mm_setzero_si128(); |
30 | 16.3k | const __m128i dot2v = _mm_setr_epi8(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17); |
31 | 16.3k | const __m128i dot2v_0 = _mm_setr_epi8(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); |
32 | 16.3k | const __m128i dot3v = _mm_set1_epi16(1); |
33 | 16.3k | size_t k; |
34 | | |
35 | 32.7k | while (len >= 16) { |
36 | | |
37 | 16.3k | k = MIN(len, NMAX); |
38 | 16.3k | k -= k % 16; |
39 | 16.3k | len -= k; |
40 | | |
41 | 16.3k | vs1 = _mm_cvtsi32_si128(adler0); |
42 | 16.3k | vs2 = _mm_cvtsi32_si128(adler1); |
43 | | |
44 | 16.3k | vs3 = _mm_setzero_si128(); |
45 | 16.3k | vs2_0 = _mm_setzero_si128(); |
46 | 16.3k | vs1_0 = vs1; |
47 | | |
48 | 16.3k | while (k >= 32) { |
49 | | /* |
50 | | vs1 = adler + sum(c[i]) |
51 | | vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) |
52 | | */ |
53 | 0 | vbuf = _mm_loadu_si128((__m128i*)src); |
54 | 0 | vbuf_0 = _mm_loadu_si128((__m128i*)(src + 16)); |
55 | 0 | src += 32; |
56 | 0 | k -= 32; |
57 | |
|
58 | 0 | v_sad_sum1 = _mm_sad_epu8(vbuf, zero); |
59 | 0 | v_sad_sum2 = _mm_sad_epu8(vbuf_0, zero); |
60 | 0 | _mm_storeu_si128((__m128i*)dst, vbuf); |
61 | 0 | _mm_storeu_si128((__m128i*)(dst + 16), vbuf_0); |
62 | 0 | dst += 32; |
63 | |
|
64 | 0 | v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v); |
65 | 0 | v_short_sum2_0 = _mm_maddubs_epi16(vbuf_0, dot2v_0); |
66 | |
|
67 | 0 | vs1 = _mm_add_epi32(v_sad_sum1, vs1); |
68 | 0 | vs3 = _mm_add_epi32(vs1_0, vs3); |
69 | |
|
70 | 0 | vsum2 = _mm_madd_epi16(v_short_sum2, dot3v); |
71 | 0 | vsum2_0 = _mm_madd_epi16(v_short_sum2_0, dot3v); |
72 | 0 | vs1 = _mm_add_epi32(v_sad_sum2, vs1); |
73 | 0 | vs2 = _mm_add_epi32(vsum2, vs2); |
74 | 0 | vs2_0 = _mm_add_epi32(vsum2_0, vs2_0); |
75 | 0 | vs1_0 = vs1; |
76 | 0 | } |
77 | | |
78 | 16.3k | vs2 = _mm_add_epi32(vs2_0, vs2); |
79 | 16.3k | vs3 = _mm_slli_epi32(vs3, 5); |
80 | 16.3k | vs2 = _mm_add_epi32(vs3, vs2); |
81 | 16.3k | vs3 = _mm_setzero_si128(); |
82 | | |
83 | 32.7k | while (k >= 16) { |
84 | | /* |
85 | | vs1 = adler + sum(c[i]) |
86 | | vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) |
87 | | */ |
88 | 16.3k | vbuf = _mm_loadu_si128((__m128i*)src); |
89 | 16.3k | src += 16; |
90 | 16.3k | k -= 16; |
91 | | |
92 | 16.3k | v_sad_sum1 = _mm_sad_epu8(vbuf, zero); |
93 | 16.3k | v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v_0); |
94 | | |
95 | 16.3k | vs1 = _mm_add_epi32(v_sad_sum1, vs1); |
96 | 16.3k | vs3 = _mm_add_epi32(vs1_0, vs3); |
97 | 16.3k | vsum2 = _mm_madd_epi16(v_short_sum2, dot3v); |
98 | 16.3k | vs2 = _mm_add_epi32(vsum2, vs2); |
99 | 16.3k | vs1_0 = vs1; |
100 | | |
101 | 16.3k | _mm_storeu_si128((__m128i*)dst, vbuf); |
102 | 16.3k | dst += 16; |
103 | 16.3k | } |
104 | | |
105 | 16.3k | vs3 = _mm_slli_epi32(vs3, 4); |
106 | 16.3k | vs2 = _mm_add_epi32(vs2, vs3); |
107 | | |
108 | 16.3k | adler0 = partial_hsum(vs1) % BASE; |
109 | 16.3k | adler1 = hsum(vs2) % BASE; |
110 | 16.3k | } |
111 | | |
112 | | /* If this is true, there's fewer than 16 elements remaining */ |
113 | 16.3k | if (len) { |
114 | 15.2k | goto rem_peel; |
115 | 15.2k | } |
116 | | |
117 | 1.11k | return adler0 | (adler1 << 16); |
118 | 16.3k | } |
119 | | |
120 | | #endif |