/rust/registry/src/index.crates.io-1949cf8c6b5b557f/foldhash-0.1.5/src/lib.rs
Line | Count | Source |
1 | | //! This crate provides foldhash, a fast, non-cryptographic, minimally |
2 | | //! DoS-resistant hashing algorithm designed for computational uses such as |
3 | | //! hashmaps, bloom filters, count sketching, etc. |
4 | | //! |
5 | | //! When should you **not** use foldhash: |
6 | | //! |
7 | | //! - You are afraid of people studying your long-running program's behavior |
8 | | //! to reverse engineer its internal random state and using this knowledge to |
9 | | //! create many colliding inputs for computational complexity attacks. |
10 | | //! |
11 | | //! - You expect foldhash to have a consistent output across versions or |
12 | | //! platforms, such as for persistent file formats or communication protocols. |
13 | | //! |
14 | | //! - You are relying on foldhash's properties for any kind of security. |
15 | | //! Foldhash is **not appropriate for any cryptographic purpose**. |
16 | | //! |
17 | | //! Foldhash has two variants, one optimized for speed which is ideal for data |
18 | | //! structures such as hash maps and bloom filters, and one optimized for |
19 | | //! statistical quality which is ideal for algorithms such as |
20 | | //! [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) and |
21 | | //! [MinHash](https://en.wikipedia.org/wiki/MinHash). |
22 | | //! |
23 | | //! Foldhash can be used in a `#![no_std]` environment by disabling its default |
24 | | //! `"std"` feature. |
25 | | //! |
26 | | //! # Usage |
27 | | //! |
28 | | //! The easiest way to use this crate with the standard library [`HashMap`] or |
29 | | //! [`HashSet`] is to import them from `foldhash` instead, along with the |
30 | | //! extension traits to make [`HashMap::new`] and [`HashMap::with_capacity`] |
31 | | //! work out-of-the-box: |
32 | | //! |
33 | | //! ```rust |
34 | | //! use foldhash::{HashMap, HashMapExt}; |
35 | | //! |
36 | | //! let mut hm = HashMap::new(); |
37 | | //! hm.insert(42, "hello"); |
38 | | //! ``` |
39 | | //! |
40 | | //! You can also avoid the convenience types and do it manually by initializing |
41 | | //! a [`RandomState`](fast::RandomState), for example if you are using a different hash map |
42 | | //! implementation like [`hashbrown`](https://docs.rs/hashbrown/): |
43 | | //! |
44 | | //! ```rust |
45 | | //! use hashbrown::HashMap; |
46 | | //! use foldhash::fast::RandomState; |
47 | | //! |
48 | | //! let mut hm = HashMap::with_hasher(RandomState::default()); |
49 | | //! hm.insert("foo", "bar"); |
50 | | //! ``` |
51 | | //! |
52 | | //! The above methods are the recommended way to use foldhash, which will |
53 | | //! automatically generate a randomly generated hasher instance for you. If you |
54 | | //! absolutely must have determinism you can use [`FixedState`](fast::FixedState) |
55 | | //! instead, but note that this makes you trivially vulnerable to HashDoS |
56 | | //! attacks and might lead to quadratic runtime when moving data from one |
57 | | //! hashmap/set into another: |
58 | | //! |
59 | | //! ```rust |
60 | | //! use std::collections::HashSet; |
61 | | //! use foldhash::fast::FixedState; |
62 | | //! |
63 | | //! let mut hm = HashSet::with_hasher(FixedState::with_seed(42)); |
64 | | //! hm.insert([1, 10, 100]); |
65 | | //! ``` |
66 | | //! |
67 | | //! If you rely on statistical properties of the hash for the correctness of |
68 | | //! your algorithm, such as in [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog), |
69 | | //! it is suggested to use the [`RandomState`](quality::RandomState) |
70 | | //! or [`FixedState`](quality::FixedState) from the [`quality`] module instead |
71 | | //! of the [`fast`] module. The latter is optimized purely for speed in hash |
72 | | //! tables and has known statistical imperfections. |
73 | | //! |
74 | | //! Finally, you can also directly use the [`RandomState`](quality::RandomState) |
75 | | //! or [`FixedState`](quality::FixedState) to manually hash items using the |
76 | | //! [`BuildHasher`](std::hash::BuildHasher) trait: |
77 | | //! ```rust |
78 | | //! use std::hash::BuildHasher; |
79 | | //! use foldhash::quality::RandomState; |
80 | | //! |
81 | | //! let random_state = RandomState::default(); |
82 | | //! let hash = random_state.hash_one("hello world"); |
83 | | //! ``` |
84 | | //! |
85 | | //! ## Seeding |
86 | | //! |
87 | | //! Foldhash relies on a single 8-byte per-hasher seed which should be ideally |
88 | | //! be different from each instance to instance, and also a larger |
89 | | //! [`SharedSeed`] which may be shared by many different instances. |
90 | | //! |
91 | | //! To reduce overhead, this [`SharedSeed`] is typically initialized once and |
92 | | //! stored. To prevent each hashmap unnecessarily containing a reference to this |
93 | | //! value there are three kinds of [`BuildHasher`](core::hash::BuildHasher)s |
94 | | //! foldhash provides (both for [`fast`] and [`quality`]): |
95 | | //! |
96 | | //! 1. [`RandomState`](fast::RandomState), which always generates a |
97 | | //! random per-hasher seed and implicitly stores a reference to [`SharedSeed::global_random`]. |
98 | | //! 2. [`FixedState`](fast::FixedState), which by default uses a fixed |
99 | | //! per-hasher seed and implicitly stores a reference to [`SharedSeed::global_fixed`]. |
100 | | //! 3. [`SeedableRandomState`](fast::SeedableRandomState), which works like |
101 | | //! [`RandomState`](fast::RandomState) by default but can be seeded in any manner. |
102 | | //! This state must include an explicit reference to a [`SharedSeed`], and thus |
103 | | //! this struct is 16 bytes as opposed to just 8 bytes for the previous two. |
104 | | |
105 | | #![cfg_attr(all(not(test), not(feature = "std")), no_std)] |
106 | | #![warn(missing_docs)] |
107 | | |
108 | | pub mod fast; |
109 | | pub mod quality; |
110 | | mod seed; |
111 | | pub use seed::SharedSeed; |
112 | | |
113 | | #[cfg(feature = "std")] |
114 | | mod convenience; |
115 | | #[cfg(feature = "std")] |
116 | | pub use convenience::*; |
117 | | |
118 | | // Arbitrary constants with high entropy. Hexadecimal digits of pi were used. |
119 | | const ARBITRARY0: u64 = 0x243f6a8885a308d3; |
120 | | const ARBITRARY1: u64 = 0x13198a2e03707344; |
121 | | const ARBITRARY2: u64 = 0xa4093822299f31d0; |
122 | | const ARBITRARY3: u64 = 0x082efa98ec4e6c89; |
123 | | const ARBITRARY4: u64 = 0x452821e638d01377; |
124 | | const ARBITRARY5: u64 = 0xbe5466cf34e90c6c; |
125 | | const ARBITRARY6: u64 = 0xc0ac29b7c97c50dd; |
126 | | const ARBITRARY7: u64 = 0x3f84d5b5b5470917; |
127 | | const ARBITRARY8: u64 = 0x9216d5d98979fb1b; |
128 | | const ARBITRARY9: u64 = 0xd1310ba698dfb5ac; |
129 | | |
130 | | #[inline(always)] |
131 | 0 | const fn folded_multiply(x: u64, y: u64) -> u64 { |
132 | | // The following code path is only fast if 64-bit to 128-bit widening |
133 | | // multiplication is supported by the architecture. Most 64-bit |
134 | | // architectures except SPARC64 and Wasm64 support it. However, the target |
135 | | // pointer width doesn't always indicate that we are dealing with a 64-bit |
136 | | // architecture, as there are ABIs that reduce the pointer width, especially |
137 | | // on AArch64 and x86-64. WebAssembly (regardless of pointer width) supports |
138 | | // 64-bit to 128-bit widening multiplication with the `wide-arithmetic` |
139 | | // proposal. |
140 | | #[cfg(any( |
141 | | all( |
142 | | target_pointer_width = "64", |
143 | | not(any(target_arch = "sparc64", target_arch = "wasm64")), |
144 | | ), |
145 | | target_arch = "aarch64", |
146 | | target_arch = "x86_64", |
147 | | all(target_family = "wasm", target_feature = "wide-arithmetic"), |
148 | | ))] |
149 | | { |
150 | | // We compute the full u64 x u64 -> u128 product, this is a single mul |
151 | | // instruction on x86-64, one mul plus one mulhi on ARM64. |
152 | 0 | let full = (x as u128).wrapping_mul(y as u128); |
153 | 0 | let lo = full as u64; |
154 | 0 | let hi = (full >> 64) as u64; |
155 | | |
156 | | // The middle bits of the full product fluctuate the most with small |
157 | | // changes in the input. This is the top bits of lo and the bottom bits |
158 | | // of hi. We can thus make the entire output fluctuate with small |
159 | | // changes to the input by XOR'ing these two halves. |
160 | 0 | lo ^ hi |
161 | | } |
162 | | |
163 | | #[cfg(not(any( |
164 | | all( |
165 | | target_pointer_width = "64", |
166 | | not(any(target_arch = "sparc64", target_arch = "wasm64")), |
167 | | ), |
168 | | target_arch = "aarch64", |
169 | | target_arch = "x86_64", |
170 | | all(target_family = "wasm", target_feature = "wide-arithmetic"), |
171 | | )))] |
172 | | { |
173 | | // u64 x u64 -> u128 product is quite expensive on 32-bit. |
174 | | // We approximate it by expanding the multiplication and eliminating |
175 | | // carries by replacing additions with XORs: |
176 | | // (2^32 hx + lx)*(2^32 hy + ly) = |
177 | | // 2^64 hx*hy + 2^32 (hx*ly + lx*hy) + lx*ly ~= |
178 | | // 2^64 hx*hy ^ 2^32 (hx*ly ^ lx*hy) ^ lx*ly |
179 | | // Which when folded becomes: |
180 | | // (hx*hy ^ lx*ly) ^ (hx*ly ^ lx*hy).rotate_right(32) |
181 | | |
182 | | let lx = x as u32; |
183 | | let ly = y as u32; |
184 | | let hx = (x >> 32) as u32; |
185 | | let hy = (y >> 32) as u32; |
186 | | |
187 | | let ll = (lx as u64).wrapping_mul(ly as u64); |
188 | | let lh = (lx as u64).wrapping_mul(hy as u64); |
189 | | let hl = (hx as u64).wrapping_mul(ly as u64); |
190 | | let hh = (hx as u64).wrapping_mul(hy as u64); |
191 | | |
192 | | (hh ^ ll) ^ (hl ^ lh).rotate_right(32) |
193 | | } |
194 | 0 | } |
195 | | |
196 | | #[inline(always)] |
197 | 0 | const fn rotate_right(x: u64, r: u32) -> u64 { |
198 | | #[cfg(any( |
199 | | target_pointer_width = "64", |
200 | | target_arch = "aarch64", |
201 | | target_arch = "x86_64", |
202 | | target_family = "wasm", |
203 | | ))] |
204 | | { |
205 | 0 | x.rotate_right(r) |
206 | | } |
207 | | |
208 | | #[cfg(not(any( |
209 | | target_pointer_width = "64", |
210 | | target_arch = "aarch64", |
211 | | target_arch = "x86_64", |
212 | | target_family = "wasm", |
213 | | )))] |
214 | | { |
215 | | // On platforms without 64-bit arithmetic rotation can be slow, rotate |
216 | | // each 32-bit half independently. |
217 | | let lo = (x as u32).rotate_right(r); |
218 | | let hi = ((x >> 32) as u32).rotate_right(r); |
219 | | ((hi as u64) << 32) | lo as u64 |
220 | | } |
221 | 0 | } |
222 | | |
223 | | /// Hashes strings >= 16 bytes, has unspecified behavior when bytes.len() < 16. |
224 | 0 | fn hash_bytes_medium(bytes: &[u8], mut s0: u64, mut s1: u64, fold_seed: u64) -> u64 { |
225 | | // Process 32 bytes per iteration, 16 bytes from the start, 16 bytes from |
226 | | // the end. On the last iteration these two chunks can overlap, but that is |
227 | | // perfectly fine. |
228 | 0 | let left_to_right = bytes.chunks_exact(16); |
229 | 0 | let mut right_to_left = bytes.rchunks_exact(16); |
230 | 0 | for lo in left_to_right { |
231 | 0 | let hi = right_to_left.next().unwrap(); |
232 | 0 | let unconsumed_start = lo.as_ptr(); |
233 | 0 | let unconsumed_end = hi.as_ptr_range().end; |
234 | 0 | if unconsumed_start >= unconsumed_end { |
235 | 0 | break; |
236 | 0 | } |
237 | | |
238 | 0 | let a = u64::from_ne_bytes(lo[0..8].try_into().unwrap()); |
239 | 0 | let b = u64::from_ne_bytes(lo[8..16].try_into().unwrap()); |
240 | 0 | let c = u64::from_ne_bytes(hi[0..8].try_into().unwrap()); |
241 | 0 | let d = u64::from_ne_bytes(hi[8..16].try_into().unwrap()); |
242 | 0 | s0 = folded_multiply(a ^ s0, c ^ fold_seed); |
243 | 0 | s1 = folded_multiply(b ^ s1, d ^ fold_seed); |
244 | | } |
245 | | |
246 | 0 | s0 ^ s1 |
247 | 0 | } |
248 | | |
249 | | /// Hashes strings >= 16 bytes, has unspecified behavior when bytes.len() < 16. |
250 | | #[cold] |
251 | | #[inline(never)] |
252 | 0 | fn hash_bytes_long( |
253 | 0 | bytes: &[u8], |
254 | 0 | mut s0: u64, |
255 | 0 | mut s1: u64, |
256 | 0 | mut s2: u64, |
257 | 0 | mut s3: u64, |
258 | 0 | fold_seed: u64, |
259 | 0 | ) -> u64 { |
260 | 0 | let chunks = bytes.chunks_exact(64); |
261 | 0 | let remainder = chunks.remainder().len(); |
262 | 0 | for chunk in chunks { |
263 | 0 | let a = u64::from_ne_bytes(chunk[0..8].try_into().unwrap()); |
264 | 0 | let b = u64::from_ne_bytes(chunk[8..16].try_into().unwrap()); |
265 | 0 | let c = u64::from_ne_bytes(chunk[16..24].try_into().unwrap()); |
266 | 0 | let d = u64::from_ne_bytes(chunk[24..32].try_into().unwrap()); |
267 | 0 | let e = u64::from_ne_bytes(chunk[32..40].try_into().unwrap()); |
268 | 0 | let f = u64::from_ne_bytes(chunk[40..48].try_into().unwrap()); |
269 | 0 | let g = u64::from_ne_bytes(chunk[48..56].try_into().unwrap()); |
270 | 0 | let h = u64::from_ne_bytes(chunk[56..64].try_into().unwrap()); |
271 | 0 | s0 = folded_multiply(a ^ s0, e ^ fold_seed); |
272 | 0 | s1 = folded_multiply(b ^ s1, f ^ fold_seed); |
273 | 0 | s2 = folded_multiply(c ^ s2, g ^ fold_seed); |
274 | 0 | s3 = folded_multiply(d ^ s3, h ^ fold_seed); |
275 | 0 | } |
276 | 0 | s0 ^= s2; |
277 | 0 | s1 ^= s3; |
278 | | |
279 | 0 | if remainder > 0 { |
280 | 0 | hash_bytes_medium(&bytes[bytes.len() - remainder.max(16)..], s0, s1, fold_seed) |
281 | | } else { |
282 | 0 | s0 ^ s1 |
283 | | } |
284 | 0 | } |