/src/zlib-ng/arch/x86/adler32_ssse3.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* adler32_ssse3.c -- compute the Adler-32 checksum of a data stream |
2 | | * Copyright (C) 1995-2011 Mark Adler |
3 | | * Authors: |
4 | | * Adam Stylinski <kungfujesus06@gmail.com> |
5 | | * Brian Bockelman <bockelman@gmail.com> |
6 | | * For conditions of distribution and use, see copyright notice in zlib.h |
7 | | */ |
8 | | |
9 | | #include "zbuild.h" |
10 | | #include "adler32_p.h" |
11 | | #include "adler32_ssse3_p.h" |
12 | | |
13 | | #ifdef X86_SSSE3 |
14 | | |
15 | | #include <immintrin.h> |
16 | | |
17 | 436k | Z_INTERNAL uint32_t adler32_ssse3(uint32_t adler, const uint8_t *buf, size_t len) { |
18 | 436k | uint32_t sum2; |
19 | | |
20 | | /* split Adler-32 into component sums */ |
21 | 436k | sum2 = (adler >> 16) & 0xffff; |
22 | 436k | adler &= 0xffff; |
23 | | |
24 | | /* in case user likes doing a byte at a time, keep it fast */ |
25 | 436k | if (UNLIKELY(len == 1)) |
26 | 0 | return adler32_len_1(adler, buf, sum2); |
27 | | |
28 | | /* initial Adler-32 value (deferred check for len == 1 speed) */ |
29 | 436k | if (UNLIKELY(buf == NULL)) |
30 | 0 | return 1L; |
31 | | |
32 | | /* in case short lengths are provided, keep it somewhat fast */ |
33 | 436k | if (UNLIKELY(len < 16)) |
34 | 0 | return adler32_len_16(adler, buf, len, sum2); |
35 | | |
36 | 436k | const __m128i dot2v = _mm_setr_epi8(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17); |
37 | 436k | const __m128i dot2v_0 = _mm_setr_epi8(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); |
38 | 436k | const __m128i dot3v = _mm_set1_epi16(1); |
39 | 436k | const __m128i zero = _mm_setzero_si128(); |
40 | | |
41 | 436k | __m128i vbuf, vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, |
42 | 436k | vbuf_0, v_sad_sum2, vsum2, vsum2_0; |
43 | | |
44 | | /* If our buffer is unaligned (likely), make the determination whether |
45 | | * or not there's enough of a buffer to consume to make the scalar, aligning |
46 | | * additions worthwhile or if it's worth it to just eat the cost of an unaligned |
47 | | * load. This is a pretty simple test, just test if 16 - the remainder + len is |
48 | | * < 16 */ |
49 | 436k | size_t max_iters = NMAX; |
50 | 436k | size_t rem = (uintptr_t)buf & 15; |
51 | 436k | size_t align_offset = 16 - rem; |
52 | 436k | size_t k = 0; |
53 | 436k | if (rem) { |
54 | 359k | if (len < 16 + align_offset) { |
55 | | /* Let's eat the cost of this one unaligned load so that |
56 | | * we don't completely skip over the vectorization. Doing |
57 | | * 16 bytes at a time unaligned is better than 16 + <= 15 |
58 | | * sums */ |
59 | 173k | vbuf = _mm_loadu_si128((__m128i*)buf); |
60 | 173k | len -= 16; |
61 | 173k | buf += 16; |
62 | 173k | vs1 = _mm_cvtsi32_si128(adler); |
63 | 173k | vs2 = _mm_cvtsi32_si128(sum2); |
64 | 173k | vs3 = _mm_setzero_si128(); |
65 | 173k | vs1_0 = vs1; |
66 | 173k | goto unaligned_jmp; |
67 | 173k | } |
68 | | |
69 | 1.50M | for (size_t i = 0; i < align_offset; ++i) { |
70 | 1.31M | adler += *(buf++); |
71 | 1.31M | sum2 += adler; |
72 | 1.31M | } |
73 | | |
74 | | /* lop off the max number of sums based on the scalar sums done |
75 | | * above */ |
76 | 186k | len -= align_offset; |
77 | 186k | max_iters -= align_offset; |
78 | 186k | } |
79 | | |
80 | | |
81 | 700k | while (len >= 16) { |
82 | 263k | vs1 = _mm_cvtsi32_si128(adler); |
83 | 263k | vs2 = _mm_cvtsi32_si128(sum2); |
84 | 263k | vs3 = _mm_setzero_si128(); |
85 | 263k | vs2_0 = _mm_setzero_si128(); |
86 | 263k | vs1_0 = vs1; |
87 | | |
88 | 263k | k = (len < max_iters ? len : max_iters); |
89 | 263k | k -= k % 16; |
90 | 263k | len -= k; |
91 | | |
92 | 263k | while (k >= 32) { |
93 | | /* |
94 | | vs1 = adler + sum(c[i]) |
95 | | vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) |
96 | | */ |
97 | 0 | vbuf = _mm_load_si128((__m128i*)buf); |
98 | 0 | vbuf_0 = _mm_load_si128((__m128i*)(buf + 16)); |
99 | 0 | buf += 32; |
100 | 0 | k -= 32; |
101 | |
|
102 | 0 | v_sad_sum1 = _mm_sad_epu8(vbuf, zero); |
103 | 0 | v_sad_sum2 = _mm_sad_epu8(vbuf_0, zero); |
104 | 0 | vs1 = _mm_add_epi32(v_sad_sum1, vs1); |
105 | 0 | vs3 = _mm_add_epi32(vs1_0, vs3); |
106 | |
|
107 | 0 | vs1 = _mm_add_epi32(v_sad_sum2, vs1); |
108 | 0 | v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v); |
109 | 0 | vsum2 = _mm_madd_epi16(v_short_sum2, dot3v); |
110 | 0 | v_short_sum2_0 = _mm_maddubs_epi16(vbuf_0, dot2v_0); |
111 | 0 | vs2 = _mm_add_epi32(vsum2, vs2); |
112 | 0 | vsum2_0 = _mm_madd_epi16(v_short_sum2_0, dot3v); |
113 | 0 | vs2_0 = _mm_add_epi32(vsum2_0, vs2_0); |
114 | 0 | vs1_0 = vs1; |
115 | 0 | } |
116 | | |
117 | 263k | vs2 = _mm_add_epi32(vs2_0, vs2); |
118 | 263k | vs3 = _mm_slli_epi32(vs3, 5); |
119 | 263k | vs2 = _mm_add_epi32(vs3, vs2); |
120 | 263k | vs3 = _mm_setzero_si128(); |
121 | | |
122 | 700k | while (k >= 16) { |
123 | | /* |
124 | | vs1 = adler + sum(c[i]) |
125 | | vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) |
126 | | */ |
127 | 263k | vbuf = _mm_load_si128((__m128i*)buf); |
128 | 263k | buf += 16; |
129 | 263k | k -= 16; |
130 | | |
131 | 436k | unaligned_jmp: |
132 | 436k | v_sad_sum1 = _mm_sad_epu8(vbuf, zero); |
133 | 436k | vs1 = _mm_add_epi32(v_sad_sum1, vs1); |
134 | 436k | vs3 = _mm_add_epi32(vs1_0, vs3); |
135 | 436k | v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v_0); |
136 | 436k | vsum2 = _mm_madd_epi16(v_short_sum2, dot3v); |
137 | 436k | vs2 = _mm_add_epi32(vsum2, vs2); |
138 | 436k | vs1_0 = vs1; |
139 | 436k | } |
140 | | |
141 | 436k | vs3 = _mm_slli_epi32(vs3, 4); |
142 | 436k | vs2 = _mm_add_epi32(vs2, vs3); |
143 | | |
144 | | /* We don't actually need to do a full horizontal sum, since psadbw is actually doing |
145 | | * a partial reduction sum implicitly and only summing to integers in vector positions |
146 | | * 0 and 2. This saves us some contention on the shuffle port(s) */ |
147 | 436k | adler = partial_hsum(vs1) % BASE; |
148 | 436k | sum2 = hsum(vs2) % BASE; |
149 | 436k | max_iters = NMAX; |
150 | 436k | } |
151 | | |
152 | | /* Process tail (len < 16). */ |
153 | 436k | return adler32_len_16(adler, buf, len, sum2); |
154 | 263k | } |
155 | | |
156 | | #endif |