Coverage Report

Created: 2021-02-21 07:20

/src/botan/src/lib/hash/sha1/sha1_sse2/sha1_sse2.cpp
Line
Count
Source
1
/*
2
* SHA-1 using SSE2
3
* Based on public domain code by Dean Gaudet
4
*    (http://arctic.org/~dean/crypto/sha1.html)
5
* (C) 2009-2011 Jack Lloyd
6
*
7
* Botan is released under the Simplified BSD License (see license.txt)
8
*/
9
10
#include <botan/internal/sha160.h>
11
#include <botan/internal/rotate.h>
12
#include <botan/internal/bit_ops.h>
13
#include <emmintrin.h>
14
15
namespace Botan {
16
17
namespace SHA1_SSE2_F {
18
19
namespace {
20
21
/*
22
* First 16 bytes just need byte swapping. Preparing just means
23
* adding in the round constants.
24
*/
25
26
#define prep00_15(P, W)                                      \
27
761k
   do {                                                      \
28
761k
      W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
29
761k
      W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
30
761k
      W = _mm_or_si128(_mm_slli_epi16(W, 8),                 \
31
761k
                       _mm_srli_epi16(W, 8));                \
32
761k
      P.u128 = _mm_add_epi32(W, K00_19);                     \
33
761k
   } while(0)
34
35
/*
36
For each multiple of 4, t, we want to calculate this:
37
38
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
39
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
40
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
41
W[t+3] = rol(W[t]   ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
42
43
we'll actually calculate this:
44
45
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
46
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
47
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
48
W[t+3] = rol(  0    ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
49
W[t+3] ^= rol(W[t+0], 1);
50
51
the parameters are:
52
53
W0 = &W[t-16];
54
W1 = &W[t-12];
55
W2 = &W[t- 8];
56
W3 = &W[t- 4];
57
58
and on output:
59
prepared = W0 + K
60
W0 = W[t]..W[t+3]
61
*/
62
63
/* note that there is a step here where i want to do a rol by 1, which
64
* normally would look like this:
65
*
66
* r1 = psrld r0,$31
67
* r0 = pslld r0,$1
68
* r0 = por r0,r1
69
*
70
* but instead i do this:
71
*
72
* r1 = pcmpltd r0,zero
73
* r0 = paddd r0,r0
74
* r0 = psub r0,r1
75
*
76
* because pcmpltd and paddd are available in both MMX units on
77
* efficeon, pentium-m, and opteron but shifts are available in
78
* only one unit.
79
*/
80
#define prep(prep, XW0, XW1, XW2, XW3, K)                               \
81
3.04M
   do {                                                                 \
82
3.04M
      __m128i r0, r1, r2, r3;                                           \
83
3.04M
                                                                        \
84
3.04M
      /* load W[t-4] 16-byte aligned, and shift */                      \
85
3.04M
      r3 = _mm_srli_si128((XW3), 4);                                    \
86
3.04M
      r0 = (XW0);                                                       \
87
3.04M
      /* get high 64-bits of XW0 into low 64-bits */                    \
88
3.04M
      r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2));              \
89
3.04M
      /* load high 64-bits of r1 */                                     \
90
3.04M
      r1 = _mm_unpacklo_epi64(r1, (XW1));                               \
91
3.04M
      r2 = (XW2);                                                       \
92
3.04M
                                                                        \
93
3.04M
      r0 = _mm_xor_si128(r1, r0);                                       \
94
3.04M
      r2 = _mm_xor_si128(r3, r2);                                       \
95
3.04M
      r0 = _mm_xor_si128(r2, r0);                                       \
96
3.04M
      /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */          \
97
3.04M
                                                                        \
98
3.04M
      r2 = _mm_slli_si128(r0, 12);                                      \
99
3.04M
      r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128());                    \
100
3.04M
      r0 = _mm_add_epi32(r0, r0);   /* shift left by 1 */               \
101
3.04M
      r0 = _mm_sub_epi32(r0, r1);   /* r0 has W[t]..W[t+2] */           \
102
3.04M
                                                                        \
103
3.04M
      r3 = _mm_srli_epi32(r2, 30);                                      \
104
3.04M
      r2 = _mm_slli_epi32(r2, 2);                                       \
105
3.04M
                                                                        \
106
3.04M
      r0 = _mm_xor_si128(r0, r3);                                       \
107
3.04M
      r0 = _mm_xor_si128(r0, r2);   /* r0 now has W[t+3] */             \
108
3.04M
                                                                        \
109
3.04M
      (XW0) = r0;                                                       \
110
3.04M
      (prep).u128 = _mm_add_epi32(r0, K);                               \
111
3.04M
   } while(0)
112
113
/*
114
* SHA-160 F1 Function
115
*/
116
inline void F1(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
117
3.80M
   {
118
3.80M
   E += choose(B, C, D) + msg + rotl<5>(A);
119
3.80M
   B  = rotl<30>(B);
120
3.80M
   }
121
122
/*
123
* SHA-160 F2 Function
124
*/
125
inline void F2(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
126
3.80M
   {
127
3.80M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
128
3.80M
   B  = rotl<30>(B);
129
3.80M
   }
130
131
/*
132
* SHA-160 F3 Function
133
*/
134
inline void F3(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
135
3.80M
   {
136
3.80M
   E += majority(B, C, D) + msg + rotl<5>(A);
137
3.80M
   B  = rotl<30>(B);
138
3.80M
   }
139
140
/*
141
* SHA-160 F4 Function
142
*/
143
inline void F4(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
144
3.80M
   {
145
3.80M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
146
3.80M
   B  = rotl<30>(B);
147
3.80M
   }
148
149
}
150
151
}
152
153
/*
154
* SHA-160 Compression Function using SSE for message expansion
155
*/
156
//static
157
BOTAN_FUNC_ISA("sse2")
158
void SHA_160::sse2_compress_n(secure_vector<uint32_t>& digest, const uint8_t input[], size_t blocks)
159
50.3k
   {
160
50.3k
   using namespace SHA1_SSE2_F;
161
162
50.3k
   const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
163
50.3k
   const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
164
50.3k
   const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
165
50.3k
   const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
166
167
50.3k
   uint32_t A = digest[0],
168
50.3k
          B = digest[1],
169
50.3k
          C = digest[2],
170
50.3k
          D = digest[3],
171
50.3k
          E = digest[4];
172
173
50.3k
   const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
174
175
240k
   for(size_t i = 0; i != blocks; ++i)
176
190k
      {
177
190k
      union v4si {
178
190k
         uint32_t u32[4];
179
190k
         __m128i u128;
180
190k
         };
181
182
190k
      v4si P0, P1, P2, P3;
183
184
190k
      __m128i W0 = _mm_loadu_si128(&input_mm[0]);
185
190k
      prep00_15(P0, W0);
186
187
190k
      __m128i W1 = _mm_loadu_si128(&input_mm[1]);
188
190k
      prep00_15(P1, W1);
189
190
190k
      __m128i W2 = _mm_loadu_si128(&input_mm[2]);
191
190k
      prep00_15(P2, W2);
192
193
190k
      __m128i W3 = _mm_loadu_si128(&input_mm[3]);
194
190k
      prep00_15(P3, W3);
195
196
      /*
197
      Using SSE4; slower on Core2 and Nehalem
198
      #define GET_P_32(P, i) _mm_extract_epi32(P.u128, i)
199
200
      Much slower on all tested platforms
201
      #define GET_P_32(P,i) _mm_cvtsi128_si32(_mm_srli_si128(P.u128, i*4))
202
      */
203
204
15.2M
#define GET_P_32(P, i) P.u32[i]
205
206
190k
      F1(A, B, C, D, E, GET_P_32(P0, 0));
207
190k
      F1(E, A, B, C, D, GET_P_32(P0, 1));
208
190k
      F1(D, E, A, B, C, GET_P_32(P0, 2));
209
190k
      F1(C, D, E, A, B, GET_P_32(P0, 3));
210
190k
      prep(P0, W0, W1, W2, W3, K00_19);
211
212
190k
      F1(B, C, D, E, A, GET_P_32(P1, 0));
213
190k
      F1(A, B, C, D, E, GET_P_32(P1, 1));
214
190k
      F1(E, A, B, C, D, GET_P_32(P1, 2));
215
190k
      F1(D, E, A, B, C, GET_P_32(P1, 3));
216
190k
      prep(P1, W1, W2, W3, W0, K20_39);
217
218
190k
      F1(C, D, E, A, B, GET_P_32(P2, 0));
219
190k
      F1(B, C, D, E, A, GET_P_32(P2, 1));
220
190k
      F1(A, B, C, D, E, GET_P_32(P2, 2));
221
190k
      F1(E, A, B, C, D, GET_P_32(P2, 3));
222
190k
      prep(P2, W2, W3, W0, W1, K20_39);
223
224
190k
      F1(D, E, A, B, C, GET_P_32(P3, 0));
225
190k
      F1(C, D, E, A, B, GET_P_32(P3, 1));
226
190k
      F1(B, C, D, E, A, GET_P_32(P3, 2));
227
190k
      F1(A, B, C, D, E, GET_P_32(P3, 3));
228
190k
      prep(P3, W3, W0, W1, W2, K20_39);
229
230
190k
      F1(E, A, B, C, D, GET_P_32(P0, 0));
231
190k
      F1(D, E, A, B, C, GET_P_32(P0, 1));
232
190k
      F1(C, D, E, A, B, GET_P_32(P0, 2));
233
190k
      F1(B, C, D, E, A, GET_P_32(P0, 3));
234
190k
      prep(P0, W0, W1, W2, W3, K20_39);
235
236
190k
      F2(A, B, C, D, E, GET_P_32(P1, 0));
237
190k
      F2(E, A, B, C, D, GET_P_32(P1, 1));
238
190k
      F2(D, E, A, B, C, GET_P_32(P1, 2));
239
190k
      F2(C, D, E, A, B, GET_P_32(P1, 3));
240
190k
      prep(P1, W1, W2, W3, W0, K20_39);
241
242
190k
      F2(B, C, D, E, A, GET_P_32(P2, 0));
243
190k
      F2(A, B, C, D, E, GET_P_32(P2, 1));
244
190k
      F2(E, A, B, C, D, GET_P_32(P2, 2));
245
190k
      F2(D, E, A, B, C, GET_P_32(P2, 3));
246
190k
      prep(P2, W2, W3, W0, W1, K40_59);
247
248
190k
      F2(C, D, E, A, B, GET_P_32(P3, 0));
249
190k
      F2(B, C, D, E, A, GET_P_32(P3, 1));
250
190k
      F2(A, B, C, D, E, GET_P_32(P3, 2));
251
190k
      F2(E, A, B, C, D, GET_P_32(P3, 3));
252
190k
      prep(P3, W3, W0, W1, W2, K40_59);
253
254
190k
      F2(D, E, A, B, C, GET_P_32(P0, 0));
255
190k
      F2(C, D, E, A, B, GET_P_32(P0, 1));
256
190k
      F2(B, C, D, E, A, GET_P_32(P0, 2));
257
190k
      F2(A, B, C, D, E, GET_P_32(P0, 3));
258
190k
      prep(P0, W0, W1, W2, W3, K40_59);
259
260
190k
      F2(E, A, B, C, D, GET_P_32(P1, 0));
261
190k
      F2(D, E, A, B, C, GET_P_32(P1, 1));
262
190k
      F2(C, D, E, A, B, GET_P_32(P1, 2));
263
190k
      F2(B, C, D, E, A, GET_P_32(P1, 3));
264
190k
      prep(P1, W1, W2, W3, W0, K40_59);
265
266
190k
      F3(A, B, C, D, E, GET_P_32(P2, 0));
267
190k
      F3(E, A, B, C, D, GET_P_32(P2, 1));
268
190k
      F3(D, E, A, B, C, GET_P_32(P2, 2));
269
190k
      F3(C, D, E, A, B, GET_P_32(P2, 3));
270
190k
      prep(P2, W2, W3, W0, W1, K40_59);
271
272
190k
      F3(B, C, D, E, A, GET_P_32(P3, 0));
273
190k
      F3(A, B, C, D, E, GET_P_32(P3, 1));
274
190k
      F3(E, A, B, C, D, GET_P_32(P3, 2));
275
190k
      F3(D, E, A, B, C, GET_P_32(P3, 3));
276
190k
      prep(P3, W3, W0, W1, W2, K60_79);
277
278
190k
      F3(C, D, E, A, B, GET_P_32(P0, 0));
279
190k
      F3(B, C, D, E, A, GET_P_32(P0, 1));
280
190k
      F3(A, B, C, D, E, GET_P_32(P0, 2));
281
190k
      F3(E, A, B, C, D, GET_P_32(P0, 3));
282
190k
      prep(P0, W0, W1, W2, W3, K60_79);
283
284
190k
      F3(D, E, A, B, C, GET_P_32(P1, 0));
285
190k
      F3(C, D, E, A, B, GET_P_32(P1, 1));
286
190k
      F3(B, C, D, E, A, GET_P_32(P1, 2));
287
190k
      F3(A, B, C, D, E, GET_P_32(P1, 3));
288
190k
      prep(P1, W1, W2, W3, W0, K60_79);
289
290
190k
      F3(E, A, B, C, D, GET_P_32(P2, 0));
291
190k
      F3(D, E, A, B, C, GET_P_32(P2, 1));
292
190k
      F3(C, D, E, A, B, GET_P_32(P2, 2));
293
190k
      F3(B, C, D, E, A, GET_P_32(P2, 3));
294
190k
      prep(P2, W2, W3, W0, W1, K60_79);
295
296
190k
      F4(A, B, C, D, E, GET_P_32(P3, 0));
297
190k
      F4(E, A, B, C, D, GET_P_32(P3, 1));
298
190k
      F4(D, E, A, B, C, GET_P_32(P3, 2));
299
190k
      F4(C, D, E, A, B, GET_P_32(P3, 3));
300
190k
      prep(P3, W3, W0, W1, W2, K60_79);
301
302
190k
      F4(B, C, D, E, A, GET_P_32(P0, 0));
303
190k
      F4(A, B, C, D, E, GET_P_32(P0, 1));
304
190k
      F4(E, A, B, C, D, GET_P_32(P0, 2));
305
190k
      F4(D, E, A, B, C, GET_P_32(P0, 3));
306
307
190k
      F4(C, D, E, A, B, GET_P_32(P1, 0));
308
190k
      F4(B, C, D, E, A, GET_P_32(P1, 1));
309
190k
      F4(A, B, C, D, E, GET_P_32(P1, 2));
310
190k
      F4(E, A, B, C, D, GET_P_32(P1, 3));
311
312
190k
      F4(D, E, A, B, C, GET_P_32(P2, 0));
313
190k
      F4(C, D, E, A, B, GET_P_32(P2, 1));
314
190k
      F4(B, C, D, E, A, GET_P_32(P2, 2));
315
190k
      F4(A, B, C, D, E, GET_P_32(P2, 3));
316
317
190k
      F4(E, A, B, C, D, GET_P_32(P3, 0));
318
190k
      F4(D, E, A, B, C, GET_P_32(P3, 1));
319
190k
      F4(C, D, E, A, B, GET_P_32(P3, 2));
320
190k
      F4(B, C, D, E, A, GET_P_32(P3, 3));
321
322
190k
      A = (digest[0] += A);
323
190k
      B = (digest[1] += B);
324
190k
      C = (digest[2] += C);
325
190k
      D = (digest[3] += D);
326
190k
      E = (digest[4] += E);
327
328
190k
      input_mm += (64 / 16);
329
190k
      }
330
331
50.3k
#undef GET_P_32
332
50.3k
   }
333
334
#undef prep00_15
335
#undef prep
336
337
}