/rust/registry/src/index.crates.io-6f17d22bba15001f/simd-adler32-0.3.7/src/imp/sse2.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use super::Adler32Imp; |
2 | | |
3 | | /// Resolves update implementation if CPU supports sse2 instructions. |
4 | 0 | pub fn get_imp() -> Option<Adler32Imp> { |
5 | 0 | get_imp_inner() |
6 | 0 | } |
7 | | |
8 | | #[inline] |
9 | | #[cfg(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))] |
10 | 0 | fn get_imp_inner() -> Option<Adler32Imp> { |
11 | 0 | if std::is_x86_feature_detected!("sse2") { |
12 | 0 | Some(imp::update) |
13 | | } else { |
14 | 0 | None |
15 | | } |
16 | 0 | } |
17 | | |
18 | | #[inline] |
19 | | #[cfg(all( |
20 | | target_feature = "sse2", |
21 | | not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64"))) |
22 | | ))] |
23 | | fn get_imp_inner() -> Option<Adler32Imp> { |
24 | | Some(imp::update) |
25 | | } |
26 | | |
27 | | #[inline] |
28 | | #[cfg(all( |
29 | | not(target_feature = "sse2"), |
30 | | not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64"))) |
31 | | ))] |
32 | | fn get_imp_inner() -> Option<Adler32Imp> { |
33 | | None |
34 | | } |
35 | | |
36 | | #[cfg(all( |
37 | | any(target_arch = "x86", target_arch = "x86_64"), |
38 | | any(feature = "std", target_feature = "sse2") |
39 | | ))] |
40 | | mod imp { |
41 | | const MOD: u32 = 65521; |
42 | | const NMAX: usize = 5552; |
43 | | const BLOCK_SIZE: usize = 32; |
44 | | const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE; |
45 | | |
46 | | #[cfg(target_arch = "x86")] |
47 | | use core::arch::x86::*; |
48 | | #[cfg(target_arch = "x86_64")] |
49 | | use core::arch::x86_64::*; |
50 | | |
51 | 0 | pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) { |
52 | 0 | unsafe { update_imp(a, b, data) } |
53 | 0 | } |
54 | | |
55 | | #[inline] |
56 | | #[target_feature(enable = "sse2")] |
57 | 0 | unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) { |
58 | 0 | let mut a = a as u32; |
59 | 0 | let mut b = b as u32; |
60 | 0 |
|
61 | 0 | let chunks = data.chunks_exact(CHUNK_SIZE); |
62 | 0 | let remainder = chunks.remainder(); |
63 | 0 | for chunk in chunks { |
64 | 0 | update_chunk_block(&mut a, &mut b, chunk); |
65 | 0 | } |
66 | | |
67 | 0 | update_block(&mut a, &mut b, remainder); |
68 | 0 |
|
69 | 0 | (a as u16, b as u16) |
70 | 0 | } |
71 | | |
72 | 0 | unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) { |
73 | 0 | debug_assert_eq!( |
74 | 0 | chunk.len(), |
75 | | CHUNK_SIZE, |
76 | 0 | "Unexpected chunk size (expected {}, got {})", |
77 | 0 | CHUNK_SIZE, |
78 | 0 | chunk.len() |
79 | | ); |
80 | | |
81 | 0 | reduce_add_blocks(a, b, chunk); |
82 | 0 |
|
83 | 0 | *a %= MOD; |
84 | 0 | *b %= MOD; |
85 | 0 | } |
86 | | |
87 | 0 | unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) { |
88 | 0 | debug_assert!( |
89 | 0 | chunk.len() <= CHUNK_SIZE, |
90 | 0 | "Unexpected chunk size (expected <= {}, got {})", |
91 | 0 | CHUNK_SIZE, |
92 | 0 | chunk.len() |
93 | | ); |
94 | | |
95 | 0 | for byte in reduce_add_blocks(a, b, chunk) { |
96 | 0 | *a += *byte as u32; |
97 | 0 | *b += *a; |
98 | 0 | } |
99 | | |
100 | 0 | *a %= MOD; |
101 | 0 | *b %= MOD; |
102 | 0 | } |
103 | | |
104 | | #[inline(always)] |
105 | 0 | unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] { |
106 | 0 | if chunk.len() < BLOCK_SIZE { |
107 | 0 | return chunk; |
108 | 0 | } |
109 | 0 |
|
110 | 0 | let blocks = chunk.chunks_exact(BLOCK_SIZE); |
111 | 0 | let blocks_remainder = blocks.remainder(); |
112 | 0 |
|
113 | 0 | let zero_v = _mm_setzero_si128(); |
114 | 0 | let weight_hi_v = get_weight_hi(); |
115 | 0 | let weight_lo_v = get_weight_lo(); |
116 | 0 |
|
117 | 0 | let mut p_v = _mm_set_epi32(0, 0, 0, (*a * blocks.len() as u32) as _); |
118 | 0 | let mut a_v = _mm_setzero_si128(); |
119 | 0 | let mut b_v = _mm_set_epi32(0, 0, 0, *b as _); |
120 | | |
121 | 0 | for block in blocks { |
122 | 0 | let block_ptr = block.as_ptr() as *const _; |
123 | 0 | let left_v = _mm_loadu_si128(block_ptr); |
124 | 0 | let right_v = _mm_loadu_si128(block_ptr.add(1)); |
125 | 0 |
|
126 | 0 | p_v = _mm_add_epi32(p_v, a_v); |
127 | 0 |
|
128 | 0 | a_v = _mm_add_epi32(a_v, _mm_sad_epu8(left_v, zero_v)); |
129 | 0 | let mad = maddubs(left_v, weight_hi_v); |
130 | 0 | b_v = _mm_add_epi32(b_v, mad); |
131 | 0 |
|
132 | 0 | a_v = _mm_add_epi32(a_v, _mm_sad_epu8(right_v, zero_v)); |
133 | 0 | let mad = maddubs(right_v, weight_lo_v); |
134 | 0 | b_v = _mm_add_epi32(b_v, mad); |
135 | 0 | } |
136 | | |
137 | 0 | b_v = _mm_add_epi32(b_v, _mm_slli_epi32(p_v, 5)); |
138 | 0 |
|
139 | 0 | *a += reduce_add(a_v); |
140 | 0 | *b = reduce_add(b_v); |
141 | 0 |
|
142 | 0 | blocks_remainder |
143 | 0 | } |
144 | | |
145 | | #[inline(always)] |
146 | 0 | unsafe fn maddubs(a: __m128i, b: __m128i) -> __m128i { |
147 | 0 | let a_lo = _mm_unpacklo_epi8(a, _mm_setzero_si128()); |
148 | 0 | let a_hi = _mm_unpackhi_epi8(a, _mm_setzero_si128()); |
149 | 0 |
|
150 | 0 | let b_lo = _mm_unpacklo_epi8(b, _mm_setzero_si128()); |
151 | 0 | let b_hi = _mm_unpackhi_epi8(b, _mm_setzero_si128()); |
152 | 0 |
|
153 | 0 | let lo = _mm_madd_epi16(a_lo, b_lo); |
154 | 0 | let hi = _mm_madd_epi16(a_hi, b_hi); |
155 | 0 |
|
156 | 0 | _mm_add_epi32(lo, hi) |
157 | 0 | } |
158 | | |
159 | | #[inline(always)] |
160 | 0 | unsafe fn reduce_add(v: __m128i) -> u32 { |
161 | 0 | let hi = _mm_unpackhi_epi64(v, v); |
162 | 0 | let sum = _mm_add_epi32(hi, v); |
163 | 0 | let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1)); |
164 | 0 |
|
165 | 0 | let sum = _mm_add_epi32(sum, hi); |
166 | 0 |
|
167 | 0 | _mm_cvtsi128_si32(sum) as _ |
168 | 0 | } |
169 | | |
170 | | #[inline(always)] |
171 | 0 | unsafe fn get_weight_lo() -> __m128i { |
172 | 0 | _mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) |
173 | 0 | } |
174 | | |
175 | | #[inline(always)] |
176 | 0 | unsafe fn get_weight_hi() -> __m128i { |
177 | 0 | _mm_set_epi8( |
178 | 0 | 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, |
179 | 0 | ) |
180 | 0 | } |
181 | | } |
182 | | |
183 | | #[cfg(test)] |
184 | | mod tests { |
185 | | use rand::Rng; |
186 | | |
187 | | #[test] |
188 | | fn zeroes() { |
189 | | assert_sum_eq(&[]); |
190 | | assert_sum_eq(&[0]); |
191 | | assert_sum_eq(&[0, 0]); |
192 | | assert_sum_eq(&[0; 100]); |
193 | | assert_sum_eq(&[0; 1024]); |
194 | | assert_sum_eq(&[0; 1024 * 1024]); |
195 | | } |
196 | | |
197 | | #[test] |
198 | | fn ones() { |
199 | | assert_sum_eq(&[]); |
200 | | assert_sum_eq(&[1]); |
201 | | assert_sum_eq(&[1, 1]); |
202 | | assert_sum_eq(&[1; 100]); |
203 | | assert_sum_eq(&[1; 1024]); |
204 | | assert_sum_eq(&[1; 1024 * 1024]); |
205 | | } |
206 | | |
207 | | #[test] |
208 | | fn random() { |
209 | | let mut random = [0; 1024 * 1024]; |
210 | | rand::thread_rng().fill(&mut random[..]); |
211 | | |
212 | | assert_sum_eq(&random[..1]); |
213 | | assert_sum_eq(&random[..100]); |
214 | | assert_sum_eq(&random[..1024]); |
215 | | assert_sum_eq(&random[..1024 * 1024]); |
216 | | } |
217 | | |
218 | | /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. |
219 | | #[test] |
220 | | fn wiki() { |
221 | | assert_sum_eq(b"Wikipedia"); |
222 | | } |
223 | | |
224 | | fn assert_sum_eq(data: &[u8]) { |
225 | | if let Some(update) = super::get_imp() { |
226 | | let (a, b) = update(1, 0, data); |
227 | | let left = u32::from(b) << 16 | u32::from(a); |
228 | | let right = adler::adler32_slice(data); |
229 | | |
230 | | assert_eq!(left, right, "len({})", data.len()); |
231 | | } |
232 | | } |
233 | | } |