Coverage Report

Created: 2020-06-30 13:58

/src/botan/src/lib/hash/sha1/sha1_sse2/sha1_sse2.cpp
Line
Count
Source
1
/*
2
* SHA-1 using SSE2
3
* Based on public domain code by Dean Gaudet
4
*    (http://arctic.org/~dean/crypto/sha1.html)
5
* (C) 2009-2011 Jack Lloyd
6
*
7
* Botan is released under the Simplified BSD License (see license.txt)
8
*/
9
10
#include <botan/sha160.h>
11
#include <botan/rotate.h>
12
#include <emmintrin.h>
13
14
namespace Botan {
15
16
namespace SHA1_SSE2_F {
17
18
namespace {
19
20
/*
21
* First 16 bytes just need byte swapping. Preparing just means
22
* adding in the round constants.
23
*/
24
25
#define prep00_15(P, W)                                      \
26
765k
   do {                                                      \
27
765k
      W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
28
765k
      W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
29
765k
      W = _mm_or_si128(_mm_slli_epi16(W, 8),                 \
30
765k
                       _mm_srli_epi16(W, 8));                \
31
765k
      P.u128 = _mm_add_epi32(W, K00_19);                     \
32
765k
   } while(0)
33
34
/*
35
For each multiple of 4, t, we want to calculate this:
36
37
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
38
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
39
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
40
W[t+3] = rol(W[t]   ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
41
42
we'll actually calculate this:
43
44
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
45
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
46
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
47
W[t+3] = rol(  0    ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
48
W[t+3] ^= rol(W[t+0], 1);
49
50
the parameters are:
51
52
W0 = &W[t-16];
53
W1 = &W[t-12];
54
W2 = &W[t- 8];
55
W3 = &W[t- 4];
56
57
and on output:
58
prepared = W0 + K
59
W0 = W[t]..W[t+3]
60
*/
61
62
/* note that there is a step here where i want to do a rol by 1, which
63
* normally would look like this:
64
*
65
* r1 = psrld r0,$31
66
* r0 = pslld r0,$1
67
* r0 = por r0,r1
68
*
69
* but instead i do this:
70
*
71
* r1 = pcmpltd r0,zero
72
* r0 = paddd r0,r0
73
* r0 = psub r0,r1
74
*
75
* because pcmpltd and paddd are available in both MMX units on
76
* efficeon, pentium-m, and opteron but shifts are available in
77
* only one unit.
78
*/
79
#define prep(prep, XW0, XW1, XW2, XW3, K)                               \
80
3.06M
   do {                                                                 \
81
3.06M
      __m128i r0, r1, r2, r3;                                           \
82
3.06M
                                                                        \
83
3.06M
      /* load W[t-4] 16-byte aligned, and shift */                      \
84
3.06M
      r3 = _mm_srli_si128((XW3), 4);                                    \
85
3.06M
      r0 = (XW0);                                                       \
86
3.06M
      /* get high 64-bits of XW0 into low 64-bits */                    \
87
3.06M
      r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2));              \
88
3.06M
      /* load high 64-bits of r1 */                                     \
89
3.06M
      r1 = _mm_unpacklo_epi64(r1, (XW1));                               \
90
3.06M
      r2 = (XW2);                                                       \
91
3.06M
                                                                        \
92
3.06M
      r0 = _mm_xor_si128(r1, r0);                                       \
93
3.06M
      r2 = _mm_xor_si128(r3, r2);                                       \
94
3.06M
      r0 = _mm_xor_si128(r2, r0);                                       \
95
3.06M
      /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */          \
96
3.06M
                                                                        \
97
3.06M
      r2 = _mm_slli_si128(r0, 12);                                      \
98
3.06M
      r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128());                    \
99
3.06M
      r0 = _mm_add_epi32(r0, r0);   /* shift left by 1 */               \
100
3.06M
      r0 = _mm_sub_epi32(r0, r1);   /* r0 has W[t]..W[t+2] */           \
101
3.06M
                                                                        \
102
3.06M
      r3 = _mm_srli_epi32(r2, 30);                                      \
103
3.06M
      r2 = _mm_slli_epi32(r2, 2);                                       \
104
3.06M
                                                                        \
105
3.06M
      r0 = _mm_xor_si128(r0, r3);                                       \
106
3.06M
      r0 = _mm_xor_si128(r0, r2);   /* r0 now has W[t+3] */             \
107
3.06M
                                                                        \
108
3.06M
      (XW0) = r0;                                                       \
109
3.06M
      (prep).u128 = _mm_add_epi32(r0, K);                               \
110
3.06M
   } while(0)
111
112
/*
113
* SHA-160 F1 Function
114
*/
115
inline void F1(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
116
3.82M
   {
117
3.82M
   E += (D ^ (B & (C ^ D))) + msg + rotl<5>(A);
118
3.82M
   B  = rotl<30>(B);
119
3.82M
   }
120
121
/*
122
* SHA-160 F2 Function
123
*/
124
inline void F2(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
125
3.82M
   {
126
3.82M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
127
3.82M
   B  = rotl<30>(B);
128
3.82M
   }
129
130
/*
131
* SHA-160 F3 Function
132
*/
133
inline void F3(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
134
3.82M
   {
135
3.82M
   E += ((B & C) | ((B | C) & D)) + msg + rotl<5>(A);
136
3.82M
   B  = rotl<30>(B);
137
3.82M
   }
138
139
/*
140
* SHA-160 F4 Function
141
*/
142
inline void F4(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
143
3.82M
   {
144
3.82M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
145
3.82M
   B  = rotl<30>(B);
146
3.82M
   }
147
148
}
149
150
}
151
152
/*
153
* SHA-160 Compression Function using SSE for message expansion
154
*/
155
//static
156
BOTAN_FUNC_ISA("sse2")
157
void SHA_160::sse2_compress_n(secure_vector<uint32_t>& digest, const uint8_t input[], size_t blocks)
158
51.0k
   {
159
51.0k
   using namespace SHA1_SSE2_F;
160
51.0k
161
51.0k
   const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
162
51.0k
   const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
163
51.0k
   const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
164
51.0k
   const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
165
51.0k
166
51.0k
   uint32_t A = digest[0],
167
51.0k
          B = digest[1],
168
51.0k
          C = digest[2],
169
51.0k
          D = digest[3],
170
51.0k
          E = digest[4];
171
51.0k
172
51.0k
   const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
173
51.0k
174
242k
   for(size_t i = 0; i != blocks; ++i)
175
191k
      {
176
191k
      union v4si {
177
191k
         uint32_t u32[4];
178
191k
         __m128i u128;
179
191k
         };
180
191k
181
191k
      v4si P0, P1, P2, P3;
182
191k
183
191k
      __m128i W0 = _mm_loadu_si128(&input_mm[0]);
184
191k
      prep00_15(P0, W0);
185
191k
186
191k
      __m128i W1 = _mm_loadu_si128(&input_mm[1]);
187
191k
      prep00_15(P1, W1);
188
191k
189
191k
      __m128i W2 = _mm_loadu_si128(&input_mm[2]);
190
191k
      prep00_15(P2, W2);
191
191k
192
191k
      __m128i W3 = _mm_loadu_si128(&input_mm[3]);
193
191k
      prep00_15(P3, W3);
194
191k
195
191k
      /*
196
191k
      Using SSE4; slower on Core2 and Nehalem
197
191k
      #define GET_P_32(P, i) _mm_extract_epi32(P.u128, i)
198
191k
199
191k
      Much slower on all tested platforms
200
191k
      #define GET_P_32(P,i) _mm_cvtsi128_si32(_mm_srli_si128(P.u128, i*4))
201
191k
      */
202
191k
203
15.3M
#define GET_P_32(P, i) P.u32[i]
204
191k
205
191k
      F1(A, B, C, D, E, GET_P_32(P0, 0));
206
191k
      F1(E, A, B, C, D, GET_P_32(P0, 1));
207
191k
      F1(D, E, A, B, C, GET_P_32(P0, 2));
208
191k
      F1(C, D, E, A, B, GET_P_32(P0, 3));
209
191k
      prep(P0, W0, W1, W2, W3, K00_19);
210
191k
211
191k
      F1(B, C, D, E, A, GET_P_32(P1, 0));
212
191k
      F1(A, B, C, D, E, GET_P_32(P1, 1));
213
191k
      F1(E, A, B, C, D, GET_P_32(P1, 2));
214
191k
      F1(D, E, A, B, C, GET_P_32(P1, 3));
215
191k
      prep(P1, W1, W2, W3, W0, K20_39);
216
191k
217
191k
      F1(C, D, E, A, B, GET_P_32(P2, 0));
218
191k
      F1(B, C, D, E, A, GET_P_32(P2, 1));
219
191k
      F1(A, B, C, D, E, GET_P_32(P2, 2));
220
191k
      F1(E, A, B, C, D, GET_P_32(P2, 3));
221
191k
      prep(P2, W2, W3, W0, W1, K20_39);
222
191k
223
191k
      F1(D, E, A, B, C, GET_P_32(P3, 0));
224
191k
      F1(C, D, E, A, B, GET_P_32(P3, 1));
225
191k
      F1(B, C, D, E, A, GET_P_32(P3, 2));
226
191k
      F1(A, B, C, D, E, GET_P_32(P3, 3));
227
191k
      prep(P3, W3, W0, W1, W2, K20_39);
228
191k
229
191k
      F1(E, A, B, C, D, GET_P_32(P0, 0));
230
191k
      F1(D, E, A, B, C, GET_P_32(P0, 1));
231
191k
      F1(C, D, E, A, B, GET_P_32(P0, 2));
232
191k
      F1(B, C, D, E, A, GET_P_32(P0, 3));
233
191k
      prep(P0, W0, W1, W2, W3, K20_39);
234
191k
235
191k
      F2(A, B, C, D, E, GET_P_32(P1, 0));
236
191k
      F2(E, A, B, C, D, GET_P_32(P1, 1));
237
191k
      F2(D, E, A, B, C, GET_P_32(P1, 2));
238
191k
      F2(C, D, E, A, B, GET_P_32(P1, 3));
239
191k
      prep(P1, W1, W2, W3, W0, K20_39);
240
191k
241
191k
      F2(B, C, D, E, A, GET_P_32(P2, 0));
242
191k
      F2(A, B, C, D, E, GET_P_32(P2, 1));
243
191k
      F2(E, A, B, C, D, GET_P_32(P2, 2));
244
191k
      F2(D, E, A, B, C, GET_P_32(P2, 3));
245
191k
      prep(P2, W2, W3, W0, W1, K40_59);
246
191k
247
191k
      F2(C, D, E, A, B, GET_P_32(P3, 0));
248
191k
      F2(B, C, D, E, A, GET_P_32(P3, 1));
249
191k
      F2(A, B, C, D, E, GET_P_32(P3, 2));
250
191k
      F2(E, A, B, C, D, GET_P_32(P3, 3));
251
191k
      prep(P3, W3, W0, W1, W2, K40_59);
252
191k
253
191k
      F2(D, E, A, B, C, GET_P_32(P0, 0));
254
191k
      F2(C, D, E, A, B, GET_P_32(P0, 1));
255
191k
      F2(B, C, D, E, A, GET_P_32(P0, 2));
256
191k
      F2(A, B, C, D, E, GET_P_32(P0, 3));
257
191k
      prep(P0, W0, W1, W2, W3, K40_59);
258
191k
259
191k
      F2(E, A, B, C, D, GET_P_32(P1, 0));
260
191k
      F2(D, E, A, B, C, GET_P_32(P1, 1));
261
191k
      F2(C, D, E, A, B, GET_P_32(P1, 2));
262
191k
      F2(B, C, D, E, A, GET_P_32(P1, 3));
263
191k
      prep(P1, W1, W2, W3, W0, K40_59);
264
191k
265
191k
      F3(A, B, C, D, E, GET_P_32(P2, 0));
266
191k
      F3(E, A, B, C, D, GET_P_32(P2, 1));
267
191k
      F3(D, E, A, B, C, GET_P_32(P2, 2));
268
191k
      F3(C, D, E, A, B, GET_P_32(P2, 3));
269
191k
      prep(P2, W2, W3, W0, W1, K40_59);
270
191k
271
191k
      F3(B, C, D, E, A, GET_P_32(P3, 0));
272
191k
      F3(A, B, C, D, E, GET_P_32(P3, 1));
273
191k
      F3(E, A, B, C, D, GET_P_32(P3, 2));
274
191k
      F3(D, E, A, B, C, GET_P_32(P3, 3));
275
191k
      prep(P3, W3, W0, W1, W2, K60_79);
276
191k
277
191k
      F3(C, D, E, A, B, GET_P_32(P0, 0));
278
191k
      F3(B, C, D, E, A, GET_P_32(P0, 1));
279
191k
      F3(A, B, C, D, E, GET_P_32(P0, 2));
280
191k
      F3(E, A, B, C, D, GET_P_32(P0, 3));
281
191k
      prep(P0, W0, W1, W2, W3, K60_79);
282
191k
283
191k
      F3(D, E, A, B, C, GET_P_32(P1, 0));
284
191k
      F3(C, D, E, A, B, GET_P_32(P1, 1));
285
191k
      F3(B, C, D, E, A, GET_P_32(P1, 2));
286
191k
      F3(A, B, C, D, E, GET_P_32(P1, 3));
287
191k
      prep(P1, W1, W2, W3, W0, K60_79);
288
191k
289
191k
      F3(E, A, B, C, D, GET_P_32(P2, 0));
290
191k
      F3(D, E, A, B, C, GET_P_32(P2, 1));
291
191k
      F3(C, D, E, A, B, GET_P_32(P2, 2));
292
191k
      F3(B, C, D, E, A, GET_P_32(P2, 3));
293
191k
      prep(P2, W2, W3, W0, W1, K60_79);
294
191k
295
191k
      F4(A, B, C, D, E, GET_P_32(P3, 0));
296
191k
      F4(E, A, B, C, D, GET_P_32(P3, 1));
297
191k
      F4(D, E, A, B, C, GET_P_32(P3, 2));
298
191k
      F4(C, D, E, A, B, GET_P_32(P3, 3));
299
191k
      prep(P3, W3, W0, W1, W2, K60_79);
300
191k
301
191k
      F4(B, C, D, E, A, GET_P_32(P0, 0));
302
191k
      F4(A, B, C, D, E, GET_P_32(P0, 1));
303
191k
      F4(E, A, B, C, D, GET_P_32(P0, 2));
304
191k
      F4(D, E, A, B, C, GET_P_32(P0, 3));
305
191k
306
191k
      F4(C, D, E, A, B, GET_P_32(P1, 0));
307
191k
      F4(B, C, D, E, A, GET_P_32(P1, 1));
308
191k
      F4(A, B, C, D, E, GET_P_32(P1, 2));
309
191k
      F4(E, A, B, C, D, GET_P_32(P1, 3));
310
191k
311
191k
      F4(D, E, A, B, C, GET_P_32(P2, 0));
312
191k
      F4(C, D, E, A, B, GET_P_32(P2, 1));
313
191k
      F4(B, C, D, E, A, GET_P_32(P2, 2));
314
191k
      F4(A, B, C, D, E, GET_P_32(P2, 3));
315
191k
316
191k
      F4(E, A, B, C, D, GET_P_32(P3, 0));
317
191k
      F4(D, E, A, B, C, GET_P_32(P3, 1));
318
191k
      F4(C, D, E, A, B, GET_P_32(P3, 2));
319
191k
      F4(B, C, D, E, A, GET_P_32(P3, 3));
320
191k
321
191k
      A = (digest[0] += A);
322
191k
      B = (digest[1] += B);
323
191k
      C = (digest[2] += C);
324
191k
      D = (digest[3] += D);
325
191k
      E = (digest[4] += E);
326
191k
327
191k
      input_mm += (64 / 16);
328
191k
      }
329
51.0k
330
51.0k
#undef GET_P_32
331
51.0k
   }
332
333
#undef prep00_15
334
#undef prep
335
336
}