/rust/registry/src/index.crates.io-6f17d22bba15001f/sha2-0.10.9/src/sha512/soft.rs
Line | Count | Source (jump to first uncovered line) |
1 | | #![allow(clippy::many_single_char_names)] |
2 | | use crate::consts::{BLOCK_LEN, K64X2}; |
3 | | use core::convert::TryInto; |
4 | | |
5 | 0 | fn add(a: [u64; 2], b: [u64; 2]) -> [u64; 2] { |
6 | 0 | [a[0].wrapping_add(b[0]), a[1].wrapping_add(b[1])] |
7 | 0 | } |
8 | | |
9 | | /// Not an intrinsic, but works like an unaligned load. |
10 | 0 | fn sha512load(v0: [u64; 2], v1: [u64; 2]) -> [u64; 2] { |
11 | 0 | [v1[1], v0[0]] |
12 | 0 | } |
13 | | |
14 | | /// Performs 2 rounds of the SHA-512 message schedule update. |
15 | 0 | pub fn sha512_schedule_x2(v0: [u64; 2], v1: [u64; 2], v4to5: [u64; 2], v7: [u64; 2]) -> [u64; 2] { |
16 | | // sigma 0 |
17 | 0 | fn sigma0(x: u64) -> u64 { |
18 | 0 | ((x << 63) | (x >> 1)) ^ ((x << 56) | (x >> 8)) ^ (x >> 7) |
19 | 0 | } |
20 | | |
21 | | // sigma 1 |
22 | 0 | fn sigma1(x: u64) -> u64 { |
23 | 0 | ((x << 45) | (x >> 19)) ^ ((x << 3) | (x >> 61)) ^ (x >> 6) |
24 | 0 | } |
25 | | |
26 | 0 | let [w1, w0] = v0; |
27 | 0 | let [_, w2] = v1; |
28 | 0 | let [w10, w9] = v4to5; |
29 | 0 | let [w15, w14] = v7; |
30 | 0 |
|
31 | 0 | let w16 = sigma1(w14) |
32 | 0 | .wrapping_add(w9) |
33 | 0 | .wrapping_add(sigma0(w1)) |
34 | 0 | .wrapping_add(w0); |
35 | 0 | let w17 = sigma1(w15) |
36 | 0 | .wrapping_add(w10) |
37 | 0 | .wrapping_add(sigma0(w2)) |
38 | 0 | .wrapping_add(w1); |
39 | 0 |
|
40 | 0 | [w17, w16] |
41 | 0 | } |
42 | | |
43 | | /// Performs one round of the SHA-512 message block digest. |
44 | 0 | pub fn sha512_digest_round( |
45 | 0 | ae: [u64; 2], |
46 | 0 | bf: [u64; 2], |
47 | 0 | cg: [u64; 2], |
48 | 0 | dh: [u64; 2], |
49 | 0 | wk0: u64, |
50 | 0 | ) -> [u64; 2] { |
51 | | macro_rules! big_sigma0 { |
52 | | ($a:expr) => { |
53 | | ($a.rotate_right(28) ^ $a.rotate_right(34) ^ $a.rotate_right(39)) |
54 | | }; |
55 | | } |
56 | | macro_rules! big_sigma1 { |
57 | | ($a:expr) => { |
58 | | ($a.rotate_right(14) ^ $a.rotate_right(18) ^ $a.rotate_right(41)) |
59 | | }; |
60 | | } |
61 | | macro_rules! bool3ary_202 { |
62 | | ($a:expr, $b:expr, $c:expr) => { |
63 | | $c ^ ($a & ($b ^ $c)) |
64 | | }; |
65 | | } // Choose, MD5F, SHA1C |
66 | | macro_rules! bool3ary_232 { |
67 | | ($a:expr, $b:expr, $c:expr) => { |
68 | | ($a & $b) ^ ($a & $c) ^ ($b & $c) |
69 | | }; |
70 | | } // Majority, SHA1M |
71 | | |
72 | 0 | let [a0, e0] = ae; |
73 | 0 | let [b0, f0] = bf; |
74 | 0 | let [c0, g0] = cg; |
75 | 0 | let [d0, h0] = dh; |
76 | 0 |
|
77 | 0 | // a round |
78 | 0 | let x0 = big_sigma1!(e0) |
79 | 0 | .wrapping_add(bool3ary_202!(e0, f0, g0)) |
80 | 0 | .wrapping_add(wk0) |
81 | 0 | .wrapping_add(h0); |
82 | 0 | let y0 = big_sigma0!(a0).wrapping_add(bool3ary_232!(a0, b0, c0)); |
83 | 0 | let (a1, _, _, _, e1, _, _, _) = ( |
84 | 0 | x0.wrapping_add(y0), |
85 | 0 | a0, |
86 | 0 | b0, |
87 | 0 | c0, |
88 | 0 | x0.wrapping_add(d0), |
89 | 0 | e0, |
90 | 0 | f0, |
91 | 0 | g0, |
92 | 0 | ); |
93 | 0 |
|
94 | 0 | [a1, e1] |
95 | 0 | } |
96 | | |
97 | | /// Process a block with the SHA-512 algorithm. |
98 | 0 | pub fn sha512_digest_block_u64(state: &mut [u64; 8], block: &[u64; 16]) { |
99 | 0 | let k = &K64X2; |
100 | | |
101 | | macro_rules! schedule { |
102 | | ($v0:expr, $v1:expr, $v4:expr, $v5:expr, $v7:expr) => { |
103 | | sha512_schedule_x2($v0, $v1, sha512load($v4, $v5), $v7) |
104 | | }; |
105 | | } |
106 | | |
107 | | macro_rules! rounds4 { |
108 | | ($ae:ident, $bf:ident, $cg:ident, $dh:ident, $wk0:expr, $wk1:expr) => {{ |
109 | | let [u, t] = $wk0; |
110 | | let [w, v] = $wk1; |
111 | | |
112 | | $dh = sha512_digest_round($ae, $bf, $cg, $dh, t); |
113 | | $cg = sha512_digest_round($dh, $ae, $bf, $cg, u); |
114 | | $bf = sha512_digest_round($cg, $dh, $ae, $bf, v); |
115 | | $ae = sha512_digest_round($bf, $cg, $dh, $ae, w); |
116 | | }}; |
117 | | } |
118 | | |
119 | 0 | let mut ae = [state[0], state[4]]; |
120 | 0 | let mut bf = [state[1], state[5]]; |
121 | 0 | let mut cg = [state[2], state[6]]; |
122 | 0 | let mut dh = [state[3], state[7]]; |
123 | 0 |
|
124 | 0 | // Rounds 0..20 |
125 | 0 | let (mut w1, mut w0) = ([block[3], block[2]], [block[1], block[0]]); |
126 | 0 | rounds4!(ae, bf, cg, dh, add(k[0], w0), add(k[1], w1)); |
127 | 0 | let (mut w3, mut w2) = ([block[7], block[6]], [block[5], block[4]]); |
128 | 0 | rounds4!(ae, bf, cg, dh, add(k[2], w2), add(k[3], w3)); |
129 | 0 | let (mut w5, mut w4) = ([block[11], block[10]], [block[9], block[8]]); |
130 | 0 | rounds4!(ae, bf, cg, dh, add(k[4], w4), add(k[5], w5)); |
131 | 0 | let (mut w7, mut w6) = ([block[15], block[14]], [block[13], block[12]]); |
132 | 0 | rounds4!(ae, bf, cg, dh, add(k[6], w6), add(k[7], w7)); |
133 | 0 | let mut w8 = schedule!(w0, w1, w4, w5, w7); |
134 | 0 | let mut w9 = schedule!(w1, w2, w5, w6, w8); |
135 | 0 | rounds4!(ae, bf, cg, dh, add(k[8], w8), add(k[9], w9)); |
136 | 0 |
|
137 | 0 | // Rounds 20..40 |
138 | 0 | w0 = schedule!(w2, w3, w6, w7, w9); |
139 | 0 | w1 = schedule!(w3, w4, w7, w8, w0); |
140 | 0 | rounds4!(ae, bf, cg, dh, add(k[10], w0), add(k[11], w1)); |
141 | 0 | w2 = schedule!(w4, w5, w8, w9, w1); |
142 | 0 | w3 = schedule!(w5, w6, w9, w0, w2); |
143 | 0 | rounds4!(ae, bf, cg, dh, add(k[12], w2), add(k[13], w3)); |
144 | 0 | w4 = schedule!(w6, w7, w0, w1, w3); |
145 | 0 | w5 = schedule!(w7, w8, w1, w2, w4); |
146 | 0 | rounds4!(ae, bf, cg, dh, add(k[14], w4), add(k[15], w5)); |
147 | 0 | w6 = schedule!(w8, w9, w2, w3, w5); |
148 | 0 | w7 = schedule!(w9, w0, w3, w4, w6); |
149 | 0 | rounds4!(ae, bf, cg, dh, add(k[16], w6), add(k[17], w7)); |
150 | 0 | w8 = schedule!(w0, w1, w4, w5, w7); |
151 | 0 | w9 = schedule!(w1, w2, w5, w6, w8); |
152 | 0 | rounds4!(ae, bf, cg, dh, add(k[18], w8), add(k[19], w9)); |
153 | 0 |
|
154 | 0 | // Rounds 40..60 |
155 | 0 | w0 = schedule!(w2, w3, w6, w7, w9); |
156 | 0 | w1 = schedule!(w3, w4, w7, w8, w0); |
157 | 0 | rounds4!(ae, bf, cg, dh, add(k[20], w0), add(k[21], w1)); |
158 | 0 | w2 = schedule!(w4, w5, w8, w9, w1); |
159 | 0 | w3 = schedule!(w5, w6, w9, w0, w2); |
160 | 0 | rounds4!(ae, bf, cg, dh, add(k[22], w2), add(k[23], w3)); |
161 | 0 | w4 = schedule!(w6, w7, w0, w1, w3); |
162 | 0 | w5 = schedule!(w7, w8, w1, w2, w4); |
163 | 0 | rounds4!(ae, bf, cg, dh, add(k[24], w4), add(k[25], w5)); |
164 | 0 | w6 = schedule!(w8, w9, w2, w3, w5); |
165 | 0 | w7 = schedule!(w9, w0, w3, w4, w6); |
166 | 0 | rounds4!(ae, bf, cg, dh, add(k[26], w6), add(k[27], w7)); |
167 | 0 | w8 = schedule!(w0, w1, w4, w5, w7); |
168 | 0 | w9 = schedule!(w1, w2, w5, w6, w8); |
169 | 0 | rounds4!(ae, bf, cg, dh, add(k[28], w8), add(k[29], w9)); |
170 | 0 |
|
171 | 0 | // Rounds 60..80 |
172 | 0 | w0 = schedule!(w2, w3, w6, w7, w9); |
173 | 0 | w1 = schedule!(w3, w4, w7, w8, w0); |
174 | 0 | rounds4!(ae, bf, cg, dh, add(k[30], w0), add(k[31], w1)); |
175 | 0 | w2 = schedule!(w4, w5, w8, w9, w1); |
176 | 0 | w3 = schedule!(w5, w6, w9, w0, w2); |
177 | 0 | rounds4!(ae, bf, cg, dh, add(k[32], w2), add(k[33], w3)); |
178 | 0 | w4 = schedule!(w6, w7, w0, w1, w3); |
179 | 0 | w5 = schedule!(w7, w8, w1, w2, w4); |
180 | 0 | rounds4!(ae, bf, cg, dh, add(k[34], w4), add(k[35], w5)); |
181 | 0 | w6 = schedule!(w8, w9, w2, w3, w5); |
182 | 0 | w7 = schedule!(w9, w0, w3, w4, w6); |
183 | 0 | rounds4!(ae, bf, cg, dh, add(k[36], w6), add(k[37], w7)); |
184 | 0 | w8 = schedule!(w0, w1, w4, w5, w7); |
185 | 0 | w9 = schedule!(w1, w2, w5, w6, w8); |
186 | 0 | rounds4!(ae, bf, cg, dh, add(k[38], w8), add(k[39], w9)); |
187 | 0 |
|
188 | 0 | let [a, e] = ae; |
189 | 0 | let [b, f] = bf; |
190 | 0 | let [c, g] = cg; |
191 | 0 | let [d, h] = dh; |
192 | 0 |
|
193 | 0 | state[0] = state[0].wrapping_add(a); |
194 | 0 | state[1] = state[1].wrapping_add(b); |
195 | 0 | state[2] = state[2].wrapping_add(c); |
196 | 0 | state[3] = state[3].wrapping_add(d); |
197 | 0 | state[4] = state[4].wrapping_add(e); |
198 | 0 | state[5] = state[5].wrapping_add(f); |
199 | 0 | state[6] = state[6].wrapping_add(g); |
200 | 0 | state[7] = state[7].wrapping_add(h); |
201 | 0 | } |
202 | | |
203 | 0 | pub fn compress(state: &mut [u64; 8], blocks: &[[u8; 128]]) { |
204 | 0 | let mut block_u32 = [0u64; BLOCK_LEN]; |
205 | 0 | // since LLVM can't properly use aliasing yet it will make |
206 | 0 | // unnecessary state stores without this copy |
207 | 0 | let mut state_cpy = *state; |
208 | 0 | for block in blocks { |
209 | 0 | for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(8)) { |
210 | 0 | *o = u64::from_be_bytes(chunk.try_into().unwrap()); |
211 | 0 | } |
212 | 0 | sha512_digest_block_u64(&mut state_cpy, &block_u32); |
213 | | } |
214 | 0 | *state = state_cpy; |
215 | 0 | } |