Coverage Report

Created: 2020-02-14 15:38

/src/botan/src/lib/hash/sha1/sha1_sse2/sha1_sse2.cpp
Line
Count
Source
1
/*
2
* SHA-1 using SSE2
3
* Based on public domain code by Dean Gaudet
4
*    (http://arctic.org/~dean/crypto/sha1.html)
5
* (C) 2009-2011 Jack Lloyd
6
*
7
* Botan is released under the Simplified BSD License (see license.txt)
8
*/
9
10
#include <botan/sha160.h>
11
#include <botan/rotate.h>
12
#include <emmintrin.h>
13
14
namespace Botan {
15
16
namespace SHA1_SSE2_F {
17
18
namespace {
19
20
/*
21
* First 16 bytes just need byte swapping. Preparing just means
22
* adding in the round constants.
23
*/
24
25
#define prep00_15(P, W)                                      \
26
773k
   do {                                                      \
27
773k
      W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
28
773k
      W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1));   \
29
773k
      W = _mm_or_si128(_mm_slli_epi16(W, 8),                 \
30
773k
                       _mm_srli_epi16(W, 8));                \
31
773k
      P.u128 = _mm_add_epi32(W, K00_19);                     \
32
773k
   } while(0)
33
34
/*
35
For each multiple of 4, t, we want to calculate this:
36
37
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
38
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
39
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
40
W[t+3] = rol(W[t]   ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
41
42
we'll actually calculate this:
43
44
W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
45
W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
46
W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
47
W[t+3] = rol(  0    ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
48
W[t+3] ^= rol(W[t+0], 1);
49
50
the parameters are:
51
52
W0 = &W[t-16];
53
W1 = &W[t-12];
54
W2 = &W[t- 8];
55
W3 = &W[t- 4];
56
57
and on output:
58
prepared = W0 + K
59
W0 = W[t]..W[t+3]
60
*/
61
62
/* note that there is a step here where i want to do a rol by 1, which
63
* normally would look like this:
64
*
65
* r1 = psrld r0,$31
66
* r0 = pslld r0,$1
67
* r0 = por r0,r1
68
*
69
* but instead i do this:
70
*
71
* r1 = pcmpltd r0,zero
72
* r0 = paddd r0,r0
73
* r0 = psub r0,r1
74
*
75
* because pcmpltd and paddd are available in both MMX units on
76
* efficeon, pentium-m, and opteron but shifts are available in
77
* only one unit.
78
*/
79
#define prep(prep, XW0, XW1, XW2, XW3, K)                               \
80
3.09M
   do {                                                                 \
81
3.09M
      __m128i r0, r1, r2, r3;                                           \
82
3.09M
                                                                        \
83
3.09M
      /* load W[t-4] 16-byte aligned, and shift */                      \
84
3.09M
      r3 = _mm_srli_si128((XW3), 4);                                    \
85
3.09M
      r0 = (XW0);                                                       \
86
3.09M
      /* get high 64-bits of XW0 into low 64-bits */                    \
87
3.09M
      r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2));              \
88
3.09M
      /* load high 64-bits of r1 */                                     \
89
3.09M
      r1 = _mm_unpacklo_epi64(r1, (XW1));                               \
90
3.09M
      r2 = (XW2);                                                       \
91
3.09M
                                                                        \
92
3.09M
      r0 = _mm_xor_si128(r1, r0);                                       \
93
3.09M
      r2 = _mm_xor_si128(r3, r2);                                       \
94
3.09M
      r0 = _mm_xor_si128(r2, r0);                                       \
95
3.09M
      /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */          \
96
3.09M
                                                                        \
97
3.09M
      r2 = _mm_slli_si128(r0, 12);                                      \
98
3.09M
      r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128());                    \
99
3.09M
      r0 = _mm_add_epi32(r0, r0);   /* shift left by 1 */               \
100
3.09M
      r0 = _mm_sub_epi32(r0, r1);   /* r0 has W[t]..W[t+2] */           \
101
3.09M
                                                                        \
102
3.09M
      r3 = _mm_srli_epi32(r2, 30);                                      \
103
3.09M
      r2 = _mm_slli_epi32(r2, 2);                                       \
104
3.09M
                                                                        \
105
3.09M
      r0 = _mm_xor_si128(r0, r3);                                       \
106
3.09M
      r0 = _mm_xor_si128(r0, r2);   /* r0 now has W[t+3] */             \
107
3.09M
                                                                        \
108
3.09M
      (XW0) = r0;                                                       \
109
3.09M
      (prep).u128 = _mm_add_epi32(r0, K);                               \
110
3.09M
   } while(0)
111
112
/*
113
* SHA-160 F1 Function
114
*/
115
inline void F1(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
116
3.86M
   {
117
3.86M
   E += (D ^ (B & (C ^ D))) + msg + rotl<5>(A);
118
3.86M
   B  = rotl<30>(B);
119
3.86M
   }
120
121
/*
122
* SHA-160 F2 Function
123
*/
124
inline void F2(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
125
3.86M
   {
126
3.86M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
127
3.86M
   B  = rotl<30>(B);
128
3.86M
   }
129
130
/*
131
* SHA-160 F3 Function
132
*/
133
inline void F3(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
134
3.86M
   {
135
3.86M
   E += ((B & C) | ((B | C) & D)) + msg + rotl<5>(A);
136
3.86M
   B  = rotl<30>(B);
137
3.86M
   }
138
139
/*
140
* SHA-160 F4 Function
141
*/
142
inline void F4(uint32_t A, uint32_t& B, uint32_t C, uint32_t D, uint32_t& E, uint32_t msg)
143
3.86M
   {
144
3.86M
   E += (B ^ C ^ D) + msg + rotl<5>(A);
145
3.86M
   B  = rotl<30>(B);
146
3.86M
   }
147
148
}
149
150
}
151
152
/*
153
* SHA-160 Compression Function using SSE for message expansion
154
*/
155
//static
156
BOTAN_FUNC_ISA("sse2")
157
void SHA_160::sse2_compress_n(secure_vector<uint32_t>& digest, const uint8_t input[], size_t blocks)
158
51.4k
   {
159
51.4k
   using namespace SHA1_SSE2_F;
160
51.4k
161
51.4k
   const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
162
51.4k
   const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
163
51.4k
   const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
164
51.4k
   const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
165
51.4k
166
51.4k
   uint32_t A = digest[0],
167
51.4k
          B = digest[1],
168
51.4k
          C = digest[2],
169
51.4k
          D = digest[3],
170
51.4k
          E = digest[4];
171
51.4k
172
51.4k
   const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
173
51.4k
174
244k
   for(size_t i = 0; i != blocks; ++i)
175
193k
      {
176
193k
      union v4si {
177
193k
         uint32_t u32[4];
178
193k
         __m128i u128;
179
193k
         };
180
193k
181
193k
      v4si P0, P1, P2, P3;
182
193k
183
193k
      __m128i W0 = _mm_loadu_si128(&input_mm[0]);
184
193k
      prep00_15(P0, W0);
185
193k
186
193k
      __m128i W1 = _mm_loadu_si128(&input_mm[1]);
187
193k
      prep00_15(P1, W1);
188
193k
189
193k
      __m128i W2 = _mm_loadu_si128(&input_mm[2]);
190
193k
      prep00_15(P2, W2);
191
193k
192
193k
      __m128i W3 = _mm_loadu_si128(&input_mm[3]);
193
193k
      prep00_15(P3, W3);
194
193k
195
193k
      /*
196
193k
      Using SSE4; slower on Core2 and Nehalem
197
193k
      #define GET_P_32(P, i) _mm_extract_epi32(P.u128, i)
198
193k
199
193k
      Much slower on all tested platforms
200
193k
      #define GET_P_32(P,i) _mm_cvtsi128_si32(_mm_srli_si128(P.u128, i*4))
201
193k
      */
202
193k
203
15.4M
#define GET_P_32(P, i) P.u32[i]
204
193k
205
193k
      F1(A, B, C, D, E, GET_P_32(P0, 0));
206
193k
      F1(E, A, B, C, D, GET_P_32(P0, 1));
207
193k
      F1(D, E, A, B, C, GET_P_32(P0, 2));
208
193k
      F1(C, D, E, A, B, GET_P_32(P0, 3));
209
193k
      prep(P0, W0, W1, W2, W3, K00_19);
210
193k
211
193k
      F1(B, C, D, E, A, GET_P_32(P1, 0));
212
193k
      F1(A, B, C, D, E, GET_P_32(P1, 1));
213
193k
      F1(E, A, B, C, D, GET_P_32(P1, 2));
214
193k
      F1(D, E, A, B, C, GET_P_32(P1, 3));
215
193k
      prep(P1, W1, W2, W3, W0, K20_39);
216
193k
217
193k
      F1(C, D, E, A, B, GET_P_32(P2, 0));
218
193k
      F1(B, C, D, E, A, GET_P_32(P2, 1));
219
193k
      F1(A, B, C, D, E, GET_P_32(P2, 2));
220
193k
      F1(E, A, B, C, D, GET_P_32(P2, 3));
221
193k
      prep(P2, W2, W3, W0, W1, K20_39);
222
193k
223
193k
      F1(D, E, A, B, C, GET_P_32(P3, 0));
224
193k
      F1(C, D, E, A, B, GET_P_32(P3, 1));
225
193k
      F1(B, C, D, E, A, GET_P_32(P3, 2));
226
193k
      F1(A, B, C, D, E, GET_P_32(P3, 3));
227
193k
      prep(P3, W3, W0, W1, W2, K20_39);
228
193k
229
193k
      F1(E, A, B, C, D, GET_P_32(P0, 0));
230
193k
      F1(D, E, A, B, C, GET_P_32(P0, 1));
231
193k
      F1(C, D, E, A, B, GET_P_32(P0, 2));
232
193k
      F1(B, C, D, E, A, GET_P_32(P0, 3));
233
193k
      prep(P0, W0, W1, W2, W3, K20_39);
234
193k
235
193k
      F2(A, B, C, D, E, GET_P_32(P1, 0));
236
193k
      F2(E, A, B, C, D, GET_P_32(P1, 1));
237
193k
      F2(D, E, A, B, C, GET_P_32(P1, 2));
238
193k
      F2(C, D, E, A, B, GET_P_32(P1, 3));
239
193k
      prep(P1, W1, W2, W3, W0, K20_39);
240
193k
241
193k
      F2(B, C, D, E, A, GET_P_32(P2, 0));
242
193k
      F2(A, B, C, D, E, GET_P_32(P2, 1));
243
193k
      F2(E, A, B, C, D, GET_P_32(P2, 2));
244
193k
      F2(D, E, A, B, C, GET_P_32(P2, 3));
245
193k
      prep(P2, W2, W3, W0, W1, K40_59);
246
193k
247
193k
      F2(C, D, E, A, B, GET_P_32(P3, 0));
248
193k
      F2(B, C, D, E, A, GET_P_32(P3, 1));
249
193k
      F2(A, B, C, D, E, GET_P_32(P3, 2));
250
193k
      F2(E, A, B, C, D, GET_P_32(P3, 3));
251
193k
      prep(P3, W3, W0, W1, W2, K40_59);
252
193k
253
193k
      F2(D, E, A, B, C, GET_P_32(P0, 0));
254
193k
      F2(C, D, E, A, B, GET_P_32(P0, 1));
255
193k
      F2(B, C, D, E, A, GET_P_32(P0, 2));
256
193k
      F2(A, B, C, D, E, GET_P_32(P0, 3));
257
193k
      prep(P0, W0, W1, W2, W3, K40_59);
258
193k
259
193k
      F2(E, A, B, C, D, GET_P_32(P1, 0));
260
193k
      F2(D, E, A, B, C, GET_P_32(P1, 1));
261
193k
      F2(C, D, E, A, B, GET_P_32(P1, 2));
262
193k
      F2(B, C, D, E, A, GET_P_32(P1, 3));
263
193k
      prep(P1, W1, W2, W3, W0, K40_59);
264
193k
265
193k
      F3(A, B, C, D, E, GET_P_32(P2, 0));
266
193k
      F3(E, A, B, C, D, GET_P_32(P2, 1));
267
193k
      F3(D, E, A, B, C, GET_P_32(P2, 2));
268
193k
      F3(C, D, E, A, B, GET_P_32(P2, 3));
269
193k
      prep(P2, W2, W3, W0, W1, K40_59);
270
193k
271
193k
      F3(B, C, D, E, A, GET_P_32(P3, 0));
272
193k
      F3(A, B, C, D, E, GET_P_32(P3, 1));
273
193k
      F3(E, A, B, C, D, GET_P_32(P3, 2));
274
193k
      F3(D, E, A, B, C, GET_P_32(P3, 3));
275
193k
      prep(P3, W3, W0, W1, W2, K60_79);
276
193k
277
193k
      F3(C, D, E, A, B, GET_P_32(P0, 0));
278
193k
      F3(B, C, D, E, A, GET_P_32(P0, 1));
279
193k
      F3(A, B, C, D, E, GET_P_32(P0, 2));
280
193k
      F3(E, A, B, C, D, GET_P_32(P0, 3));
281
193k
      prep(P0, W0, W1, W2, W3, K60_79);
282
193k
283
193k
      F3(D, E, A, B, C, GET_P_32(P1, 0));
284
193k
      F3(C, D, E, A, B, GET_P_32(P1, 1));
285
193k
      F3(B, C, D, E, A, GET_P_32(P1, 2));
286
193k
      F3(A, B, C, D, E, GET_P_32(P1, 3));
287
193k
      prep(P1, W1, W2, W3, W0, K60_79);
288
193k
289
193k
      F3(E, A, B, C, D, GET_P_32(P2, 0));
290
193k
      F3(D, E, A, B, C, GET_P_32(P2, 1));
291
193k
      F3(C, D, E, A, B, GET_P_32(P2, 2));
292
193k
      F3(B, C, D, E, A, GET_P_32(P2, 3));
293
193k
      prep(P2, W2, W3, W0, W1, K60_79);
294
193k
295
193k
      F4(A, B, C, D, E, GET_P_32(P3, 0));
296
193k
      F4(E, A, B, C, D, GET_P_32(P3, 1));
297
193k
      F4(D, E, A, B, C, GET_P_32(P3, 2));
298
193k
      F4(C, D, E, A, B, GET_P_32(P3, 3));
299
193k
      prep(P3, W3, W0, W1, W2, K60_79);
300
193k
301
193k
      F4(B, C, D, E, A, GET_P_32(P0, 0));
302
193k
      F4(A, B, C, D, E, GET_P_32(P0, 1));
303
193k
      F4(E, A, B, C, D, GET_P_32(P0, 2));
304
193k
      F4(D, E, A, B, C, GET_P_32(P0, 3));
305
193k
306
193k
      F4(C, D, E, A, B, GET_P_32(P1, 0));
307
193k
      F4(B, C, D, E, A, GET_P_32(P1, 1));
308
193k
      F4(A, B, C, D, E, GET_P_32(P1, 2));
309
193k
      F4(E, A, B, C, D, GET_P_32(P1, 3));
310
193k
311
193k
      F4(D, E, A, B, C, GET_P_32(P2, 0));
312
193k
      F4(C, D, E, A, B, GET_P_32(P2, 1));
313
193k
      F4(B, C, D, E, A, GET_P_32(P2, 2));
314
193k
      F4(A, B, C, D, E, GET_P_32(P2, 3));
315
193k
316
193k
      F4(E, A, B, C, D, GET_P_32(P3, 0));
317
193k
      F4(D, E, A, B, C, GET_P_32(P3, 1));
318
193k
      F4(C, D, E, A, B, GET_P_32(P3, 2));
319
193k
      F4(B, C, D, E, A, GET_P_32(P3, 3));
320
193k
321
193k
      A = (digest[0] += A);
322
193k
      B = (digest[1] += B);
323
193k
      C = (digest[2] += C);
324
193k
      D = (digest[3] += D);
325
193k
      E = (digest[4] += E);
326
193k
327
193k
      input_mm += (64 / 16);
328
193k
      }
329
51.4k
330
51.4k
#undef GET_P_32
331
51.4k
   }
332
333
#undef prep00_15
334
#undef prep
335
336
}