Coverage Report

Created: 2022-09-23 06:05

/src/botan/src/lib/hash/sha2_32/sha2_32_x86/sha2_32_x86.cpp
Line
Count
Source (jump to first uncovered line)
1
/*
2
* Support for SHA-256 x86 instrinsic
3
* Based on public domain code by Sean Gulley
4
*    (https://github.com/mitls/hacl-star/tree/master/experimental/hash)
5
*
6
* Botan is released under the Simplified BSD License (see license.txt)
7
*/
8
9
#include <botan/internal/sha2_32.h>
10
#include <immintrin.h>
11
12
namespace Botan {
13
14
// called from sha2_32.cpp
15
#if defined(BOTAN_HAS_SHA2_32_X86)
16
BOTAN_FUNC_ISA("sha,sse4.1,ssse3")
17
void SHA_256::compress_digest_x86(secure_vector<uint32_t>& digest, const uint8_t input[], size_t blocks)
18
0
   {
19
0
   alignas(64) static const uint32_t K[] = {
20
0
      0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
21
0
      0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
22
0
      0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
23
0
      0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
24
0
      0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
25
0
      0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
26
0
      0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
27
0
      0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
28
0
      0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
29
0
      0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
30
0
      0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
31
0
      0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
32
0
      0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
33
0
      0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
34
0
      0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
35
0
      0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
36
0
   };
37
38
0
   const __m128i* K_mm = reinterpret_cast<const __m128i*>(K);
39
40
0
   uint32_t* state = &digest[0];
41
42
0
   const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
43
0
   const __m128i MASK = _mm_set_epi64x(0x0c0d0e0f08090a0b, 0x0405060700010203);
44
45
   // Load initial values
46
0
   __m128i STATE0 = _mm_loadu_si128(reinterpret_cast<__m128i*>(&state[0]));
47
0
   __m128i STATE1 = _mm_loadu_si128(reinterpret_cast<__m128i*>(&state[4]));
48
49
0
   STATE0 = _mm_shuffle_epi32(STATE0, 0xB1); // CDAB
50
0
   STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH
51
52
0
   __m128i TMP = _mm_alignr_epi8(STATE0, STATE1, 8); // ABEF
53
0
   STATE1 = _mm_blend_epi16(STATE1, STATE0, 0xF0); // CDGH
54
0
   STATE0 = TMP;
55
56
0
   while(blocks > 0)
57
0
      {
58
      // Save current state
59
0
      const __m128i ABEF_SAVE = STATE0;
60
0
      const __m128i CDGH_SAVE = STATE1;
61
62
0
      __m128i MSG;
63
64
0
      __m128i TMSG0 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm), MASK);;
65
0
      __m128i TMSG1 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm + 1), MASK);
66
0
      __m128i TMSG2 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm + 2), MASK);
67
0
      __m128i TMSG3 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm + 3), MASK);
68
69
      // Rounds 0-3
70
0
      MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm));
71
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
72
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
73
74
      // Rounds 4-7
75
0
      MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 1));
76
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
77
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
78
79
0
      TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
80
81
      // Rounds 8-11
82
0
      MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 2));
83
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
84
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
85
86
0
      TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
87
88
      // Rounds 12-15
89
0
      MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 3));
90
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
91
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
92
93
0
      TMSG0 = _mm_add_epi32(TMSG0, _mm_alignr_epi8(TMSG3, TMSG2, 4));
94
0
      TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
95
0
      TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
96
97
      // Rounds 16-19
98
0
      MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm + 4));
99
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
100
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
101
102
0
      TMSG1 = _mm_add_epi32(TMSG1, _mm_alignr_epi8(TMSG0, TMSG3, 4));
103
0
      TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
104
0
      TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
105
106
      // Rounds 20-23
107
0
      MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 5));
108
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
109
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
110
111
0
      TMSG2 = _mm_add_epi32(TMSG2, _mm_alignr_epi8(TMSG1, TMSG0, 4));
112
0
      TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
113
0
      TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
114
115
      // Rounds 24-27
116
0
      MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 6));
117
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
118
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
119
120
0
      TMSG3 = _mm_add_epi32(TMSG3, _mm_alignr_epi8(TMSG2, TMSG1, 4));
121
0
      TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
122
0
      TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
123
124
      // Rounds 28-31
125
0
      MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 7));
126
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
127
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
128
129
0
      TMSG0 = _mm_add_epi32(TMSG0, _mm_alignr_epi8(TMSG3, TMSG2, 4));
130
0
      TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
131
0
      TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
132
133
      // Rounds 32-35
134
0
      MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm + 8));
135
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
136
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
137
138
0
      TMSG1 = _mm_add_epi32(TMSG1, _mm_alignr_epi8(TMSG0, TMSG3, 4));
139
0
      TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
140
0
      TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
141
142
      // Rounds 36-39
143
0
      MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 9));
144
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
145
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
146
147
0
      TMSG2 = _mm_add_epi32(TMSG2, _mm_alignr_epi8(TMSG1, TMSG0, 4));
148
0
      TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
149
0
      TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
150
151
      // Rounds 40-43
152
0
      MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 10));
153
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
154
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
155
156
0
      TMSG3 = _mm_add_epi32(TMSG3, _mm_alignr_epi8(TMSG2, TMSG1, 4));
157
0
      TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
158
0
      TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
159
160
      // Rounds 44-47
161
0
      MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 11));
162
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
163
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
164
165
0
      TMSG0 = _mm_add_epi32(TMSG0, _mm_alignr_epi8(TMSG3, TMSG2, 4));
166
0
      TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
167
0
      TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
168
169
      // Rounds 48-51
170
0
      MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm + 12));
171
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
172
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
173
174
0
      TMSG1 = _mm_add_epi32(TMSG1, _mm_alignr_epi8(TMSG0, TMSG3, 4));
175
0
      TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
176
0
      TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
177
178
      // Rounds 52-55
179
0
      MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 13));
180
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
181
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
182
183
0
      TMSG2 = _mm_add_epi32(TMSG2, _mm_alignr_epi8(TMSG1, TMSG0, 4));
184
0
      TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
185
186
      // Rounds 56-59
187
0
      MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 14));
188
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
189
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
190
191
0
      TMSG3 = _mm_add_epi32(TMSG3, _mm_alignr_epi8(TMSG2, TMSG1, 4));
192
0
      TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
193
194
      // Rounds 60-63
195
0
      MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 15));
196
0
      STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
197
0
      STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E));
198
199
      // Add values back to state
200
0
      STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
201
0
      STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
202
203
0
      input_mm += 4;
204
0
      blocks--;
205
0
      }
206
207
0
   STATE0 = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA
208
0
   STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG
209
210
   // Save state
211
0
   _mm_storeu_si128(reinterpret_cast<__m128i*>(&state[0]), _mm_blend_epi16(STATE0, STATE1, 0xF0)); // DCBA
212
0
   _mm_storeu_si128(reinterpret_cast<__m128i*>(&state[4]), _mm_alignr_epi8(STATE1, STATE0, 8)); // ABEF
213
0
   }
214
#endif
215
216
}