Coverage Report

Created: 2021-10-13 08:49

/src/botan/src/lib/hash/sha1/sha1_sse2/sha1_sse2.cpp
Line
Count
Source
1
/*
2
* SHA-1 using SSE2
3
* Based on public domain code by Dean Gaudet
4
*    (http://arctic.org/~dean/crypto/sha1.html)
5
* (C) 2009-2011 Jack Lloyd
6
*
7
* Botan is released under the Simplified BSD License (see license.txt)
8
*/
9
10
#include <botan/internal/sha160.h>
11
#include <botan/internal/rotate.h>
12
#include <botan/internal/bit_ops.h>
13
#include <emmintrin.h>
14
15
namespace Botan {
16
17
namespace SHA1_SSE2_F {
18
19
namespace {
20
21
/*
22
* First 16 bytes just need byte swapping. Preparing just means
23
* adding in the round constants.
24
*/
25
26
#define prep00_15(P, W)                                      \
27
771k
   do {                                                      \
28
771k
      W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
29
771k
      W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
30
771k
      W = _mm_or_si128(_mm_slli_epi16(W, 8),                 \
31
771k
                       _mm_srli_epi16(W, 8));                \
32
771k
      P.u128 = _mm_add_epi32(W, K00_19);                     \
33
771k
   } while(0)
34
35
/*
36
For each multiple of 4, t, we want to calculate this:
37
38
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
39
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
40
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
41
W[t+3] = rol(W[t]   ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
42
43
we'll actually calculate this:
44
45
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
46
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
47
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
48
W[t+3] = rol(  0    ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
49
W[t+3] ^= rol(W[t+0], 1);
50
51
the parameters are:
52
53
W0 = &W[t-16];
54
W1 = &W[t-12];
55
W2 = &W[t- 8];
56
W3 = &W[t- 4];
57
58
and on output:
59
prepared = W0 + K
60
W0 = W[t]..W[t+3]
61
*/
62
63
/* note that there is a step here where i want to do a rol by 1, which
64
* normally would look like this:
65
*
66
* r1 = psrld r0,$31
67
* r0 = pslld r0,$1
68
* r0 = por r0,r1
69
*
70
* but instead i do this:
71
*
72
* r1 = pcmpltd r0,zero
73
* r0 = paddd r0,r0
74
* r0 = psub r0,r1
75
*
76
* because pcmpltd and paddd are available in both MMX units on
77
* efficeon, pentium-m, and opteron but shifts are available in
78
* only one unit.
79
*/
80
#define prep(prep, XW0, XW1, XW2, XW3, K)                               \
81
3.08M
   do {                                                                 \
82
3.08M
      __m128i r0, r1, r2, r3;                                           \
83
3.08M
                                                                        \
84
3.08M
      /* load W[t-4] 16-byte aligned, and shift */                      \
85
3.08M
      r3 = _mm_srli_si128((XW3), 4);                                    \
86
3.08M
      r0 = (XW0);                                                       \
87
3.08M
      /* get high 64-bits of XW0 into low 64-bits */                    \
88
3.08M
      r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2));              \
89
3.08M
      /* load high 64-bits of r1 */                                     \
90
3.08M
      r1 = _mm_unpacklo_epi64(r1, (XW1));                               \
91
3.08M
      r2 = (XW2);                                                       \
92
3.08M
                                                                        \
93
3.08M
      r0 = _mm_xor_si128(r1, r0);                                       \
94
3.08M
      r2 = _mm_xor_si128(r3, r2);                                       \
95
3.08M
      r0 = _mm_xor_si128(r2, r0);                                       \
96
3.08M
      /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */          \
97
3.08M
                                                                        \
98
3.08M
      r2 = _mm_slli_si128(r0, 12);                                      \
99
3.08M
      r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128());                    \
100
3.08M
      r0 = _mm_add_epi32(r0, r0);   /* shift left by 1 */               \
101
3.08M
      r0 = _mm_sub_epi32(r0, r1);   /* r0 has W[t]..W[t+2] */           \
102
3.08M
                                                                        \
103
3.08M
      r3 = _mm_srli_epi32(r2, 30);                                      \
104
3.08M
      r2 = _mm_slli_epi32(r2, 2);                                       \
105
3.08M
                                                                        \
106
3.08M
      r0 = _mm_xor_si128(r0, r3);                                       \
107
3.08M
      r0 = _mm_xor_si128(r0, r2);   /* r0 now has W[t+3] */             \
108
3.08M
                                                                        \
109
3.08M
      (XW0) = r0;                                                       \
110
3.08M
      (prep).u128 = _mm_add_epi32(r0, K);                               \
111
3.08M
   } while(0)
112
113
/*
114
* SHA-160 F1 Function
115
*/
116
inline void F1(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
117
3.85M
   {
118
3.85M
   E += choose(B, C, D) + msg + rotl<5>(A);
119
3.85M
   B  = rotl<30>(B);
120
3.85M
   }
121
122
/*
123
* SHA-160 F2 Function
124
*/
125
inline void F2(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
126
3.85M
   {
127
3.85M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
128
3.85M
   B  = rotl<30>(B);
129
3.85M
   }
130
131
/*
132
* SHA-160 F3 Function
133
*/
134
inline void F3(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
135
3.85M
   {
136
3.85M
   E += majority(B, C, D) + msg + rotl<5>(A);
137
3.85M
   B  = rotl<30>(B);
138
3.85M
   }
139
140
/*
141
* SHA-160 F4 Function
142
*/
143
inline void F4(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
144
3.85M
   {
145
3.85M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
146
3.85M
   B  = rotl<30>(B);
147
3.85M
   }
148
149
}
150
151
}
152
153
/*
154
* SHA-160 Compression Function using SSE for message expansion
155
*/
156
//static
157
BOTAN_FUNC_ISA("sse2")
158
void SHA_160::sse2_compress_n(secure_vector<uint32_t>& digest, const uint8_t input[], size_t blocks)
159
40.9k
   {
160
40.9k
   using namespace SHA1_SSE2_F;
161
162
40.9k
   const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
163
40.9k
   const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
164
40.9k
   const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
165
40.9k
   const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
166
167
40.9k
   uint32_t A = digest[0],
168
40.9k
          B = digest[1],
169
40.9k
          C = digest[2],
170
40.9k
          D = digest[3],
171
40.9k
          E = digest[4];
172
173
40.9k
   const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
174
175
233k
   for(size_t i = 0; i != blocks; ++i)
176
192k
      {
177
192k
      union v4si {
178
192k
         uint32_t u32[4];
179
192k
         __m128i u128;
180
192k
         };
181
182
192k
      v4si P0, P1, P2, P3;
183
184
192k
      __m128i W0 = _mm_loadu_si128(&input_mm[0]);
185
192k
      prep00_15(P0, W0);
186
187
192k
      __m128i W1 = _mm_loadu_si128(&input_mm[1]);
188
192k
      prep00_15(P1, W1);
189
190
192k
      __m128i W2 = _mm_loadu_si128(&input_mm[2]);
191
192k
      prep00_15(P2, W2);
192
193
192k
      __m128i W3 = _mm_loadu_si128(&input_mm[3]);
194
192k
      prep00_15(P3, W3);
195
196
      /*
197
      Using SSE4; slower on Core2 and Nehalem
198
      #define GET_P_32(P, i) _mm_extract_epi32(P.u128, i)
199
200
      Much slower on all tested platforms
201
      #define GET_P_32(P,i) _mm_cvtsi128_si32(_mm_srli_si128(P.u128, i*4))
202
      */
203
204
15.4M
#define GET_P_32(P, i) P.u32[i]
205
206
192k
      F1(A, B, C, D, E, GET_P_32(P0, 0));
207
192k
      F1(E, A, B, C, D, GET_P_32(P0, 1));
208
192k
      F1(D, E, A, B, C, GET_P_32(P0, 2));
209
192k
      F1(C, D, E, A, B, GET_P_32(P0, 3));
210
192k
      prep(P0, W0, W1, W2, W3, K00_19);
211
212
192k
      F1(B, C, D, E, A, GET_P_32(P1, 0));
213
192k
      F1(A, B, C, D, E, GET_P_32(P1, 1));
214
192k
      F1(E, A, B, C, D, GET_P_32(P1, 2));
215
192k
      F1(D, E, A, B, C, GET_P_32(P1, 3));
216
192k
      prep(P1, W1, W2, W3, W0, K20_39);
217
218
192k
      F1(C, D, E, A, B, GET_P_32(P2, 0));
219
192k
      F1(B, C, D, E, A, GET_P_32(P2, 1));
220
192k
      F1(A, B, C, D, E, GET_P_32(P2, 2));
221
192k
      F1(E, A, B, C, D, GET_P_32(P2, 3));
222
192k
      prep(P2, W2, W3, W0, W1, K20_39);
223
224
192k
      F1(D, E, A, B, C, GET_P_32(P3, 0));
225
192k
      F1(C, D, E, A, B, GET_P_32(P3, 1));
226
192k
      F1(B, C, D, E, A, GET_P_32(P3, 2));
227
192k
      F1(A, B, C, D, E, GET_P_32(P3, 3));
228
192k
      prep(P3, W3, W0, W1, W2, K20_39);
229
230
192k
      F1(E, A, B, C, D, GET_P_32(P0, 0));
231
192k
      F1(D, E, A, B, C, GET_P_32(P0, 1));
232
192k
      F1(C, D, E, A, B, GET_P_32(P0, 2));
233
192k
      F1(B, C, D, E, A, GET_P_32(P0, 3));
234
192k
      prep(P0, W0, W1, W2, W3, K20_39);
235
236
192k
      F2(A, B, C, D, E, GET_P_32(P1, 0));
237
192k
      F2(E, A, B, C, D, GET_P_32(P1, 1));
238
192k
      F2(D, E, A, B, C, GET_P_32(P1, 2));
239
192k
      F2(C, D, E, A, B, GET_P_32(P1, 3));
240
192k
      prep(P1, W1, W2, W3, W0, K20_39);
241
242
192k
      F2(B, C, D, E, A, GET_P_32(P2, 0));
243
192k
      F2(A, B, C, D, E, GET_P_32(P2, 1));
244
192k
      F2(E, A, B, C, D, GET_P_32(P2, 2));
245
192k
      F2(D, E, A, B, C, GET_P_32(P2, 3));
246
192k
      prep(P2, W2, W3, W0, W1, K40_59);
247
248
192k
      F2(C, D, E, A, B, GET_P_32(P3, 0));
249
192k
      F2(B, C, D, E, A, GET_P_32(P3, 1));
250
192k
      F2(A, B, C, D, E, GET_P_32(P3, 2));
251
192k
      F2(E, A, B, C, D, GET_P_32(P3, 3));
252
192k
      prep(P3, W3, W0, W1, W2, K40_59);
253
254
192k
      F2(D, E, A, B, C, GET_P_32(P0, 0));
255
192k
      F2(C, D, E, A, B, GET_P_32(P0, 1));
256
192k
      F2(B, C, D, E, A, GET_P_32(P0, 2));
257
192k
      F2(A, B, C, D, E, GET_P_32(P0, 3));
258
192k
      prep(P0, W0, W1, W2, W3, K40_59);
259
260
192k
      F2(E, A, B, C, D, GET_P_32(P1, 0));
261
192k
      F2(D, E, A, B, C, GET_P_32(P1, 1));
262
192k
      F2(C, D, E, A, B, GET_P_32(P1, 2));
263
192k
      F2(B, C, D, E, A, GET_P_32(P1, 3));
264
192k
      prep(P1, W1, W2, W3, W0, K40_59);
265
266
192k
      F3(A, B, C, D, E, GET_P_32(P2, 0));
267
192k
      F3(E, A, B, C, D, GET_P_32(P2, 1));
268
192k
      F3(D, E, A, B, C, GET_P_32(P2, 2));
269
192k
      F3(C, D, E, A, B, GET_P_32(P2, 3));
270
192k
      prep(P2, W2, W3, W0, W1, K40_59);
271
272
192k
      F3(B, C, D, E, A, GET_P_32(P3, 0));
273
192k
      F3(A, B, C, D, E, GET_P_32(P3, 1));
274
192k
      F3(E, A, B, C, D, GET_P_32(P3, 2));
275
192k
      F3(D, E, A, B, C, GET_P_32(P3, 3));
276
192k
      prep(P3, W3, W0, W1, W2, K60_79);
277
278
192k
      F3(C, D, E, A, B, GET_P_32(P0, 0));
279
192k
      F3(B, C, D, E, A, GET_P_32(P0, 1));
280
192k
      F3(A, B, C, D, E, GET_P_32(P0, 2));
281
192k
      F3(E, A, B, C, D, GET_P_32(P0, 3));
282
192k
      prep(P0, W0, W1, W2, W3, K60_79);
283
284
192k
      F3(D, E, A, B, C, GET_P_32(P1, 0));
285
192k
      F3(C, D, E, A, B, GET_P_32(P1, 1));
286
192k
      F3(B, C, D, E, A, GET_P_32(P1, 2));
287
192k
      F3(A, B, C, D, E, GET_P_32(P1, 3));
288
192k
      prep(P1, W1, W2, W3, W0, K60_79);
289
290
192k
      F3(E, A, B, C, D, GET_P_32(P2, 0));
291
192k
      F3(D, E, A, B, C, GET_P_32(P2, 1));
292
192k
      F3(C, D, E, A, B, GET_P_32(P2, 2));
293
192k
      F3(B, C, D, E, A, GET_P_32(P2, 3));
294
192k
      prep(P2, W2, W3, W0, W1, K60_79);
295
296
192k
      F4(A, B, C, D, E, GET_P_32(P3, 0));
297
192k
      F4(E, A, B, C, D, GET_P_32(P3, 1));
298
192k
      F4(D, E, A, B, C, GET_P_32(P3, 2));
299
192k
      F4(C, D, E, A, B, GET_P_32(P3, 3));
300
192k
      prep(P3, W3, W0, W1, W2, K60_79);
301
302
192k
      F4(B, C, D, E, A, GET_P_32(P0, 0));
303
192k
      F4(A, B, C, D, E, GET_P_32(P0, 1));
304
192k
      F4(E, A, B, C, D, GET_P_32(P0, 2));
305
192k
      F4(D, E, A, B, C, GET_P_32(P0, 3));
306
307
192k
      F4(C, D, E, A, B, GET_P_32(P1, 0));
308
192k
      F4(B, C, D, E, A, GET_P_32(P1, 1));
309
192k
      F4(A, B, C, D, E, GET_P_32(P1, 2));
310
192k
      F4(E, A, B, C, D, GET_P_32(P1, 3));
311
312
192k
      F4(D, E, A, B, C, GET_P_32(P2, 0));
313
192k
      F4(C, D, E, A, B, GET_P_32(P2, 1));
314
192k
      F4(B, C, D, E, A, GET_P_32(P2, 2));
315
192k
      F4(A, B, C, D, E, GET_P_32(P2, 3));
316
317
192k
      F4(E, A, B, C, D, GET_P_32(P3, 0));
318
192k
      F4(D, E, A, B, C, GET_P_32(P3, 1));
319
192k
      F4(C, D, E, A, B, GET_P_32(P3, 2));
320
192k
      F4(B, C, D, E, A, GET_P_32(P3, 3));
321
322
192k
      A = (digest[0] += A);
323
192k
      B = (digest[1] += B);
324
192k
      C = (digest[2] += C);
325
192k
      D = (digest[3] += D);
326
192k
      E = (digest[4] += E);
327
328
192k
      input_mm += (64 / 16);
329
192k
      }
330
331
40.9k
#undef GET_P_32
332
40.9k
   }
333
334
#undef prep00_15
335
#undef prep
336
337
}