/src/openssl/include/crypto/md32_common.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright 1999-2026 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | |
10 | | /*- |
11 | | * This is a generic 32 bit "collector" for message digest algorithms. |
12 | | * Whenever needed it collects input character stream into chunks of |
13 | | * 32 bit values and invokes a block function that performs actual hash |
14 | | * calculations. |
15 | | * |
16 | | * Porting guide. |
17 | | * |
18 | | * Obligatory macros: |
19 | | * |
20 | | * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN |
21 | | * this macro defines byte order of input stream. |
22 | | * HASH_CBLOCK |
23 | | * size of a unit chunk HASH_BLOCK operates on. |
24 | | * HASH_LONG |
25 | | * has to be at least 32 bit wide. |
26 | | * HASH_CTX |
27 | | * context structure that at least contains following |
28 | | * members: |
29 | | * typedef struct { |
30 | | * ... |
31 | | * HASH_LONG Nl,Nh; |
32 | | * either { |
33 | | * HASH_LONG data[HASH_LBLOCK]; |
34 | | * unsigned char data[HASH_CBLOCK]; |
35 | | * }; |
36 | | * unsigned int num; |
37 | | * ... |
38 | | * } HASH_CTX; |
39 | | * data[] vector is expected to be zeroed upon first call to |
40 | | * HASH_UPDATE. |
41 | | * HASH_UPDATE |
42 | | * name of "Update" function, implemented here. |
43 | | * HASH_TRANSFORM |
44 | | * name of "Transform" function, implemented here. |
45 | | * HASH_FINAL |
46 | | * name of "Final" function, implemented here. |
47 | | * HASH_BLOCK_DATA_ORDER |
48 | | * name of "block" function capable of treating *unaligned* input |
49 | | * message in original (data) byte order, implemented externally. |
50 | | * HASH_MAKE_STRING |
51 | | * macro converting context variables to an ASCII hash string. |
52 | | * |
53 | | * MD5 example: |
54 | | * |
55 | | * #define DATA_ORDER_IS_LITTLE_ENDIAN |
56 | | * |
57 | | * #define HASH_LONG MD5_LONG |
58 | | * #define HASH_CTX MD5_CTX |
59 | | * #define HASH_CBLOCK MD5_CBLOCK |
60 | | * #define HASH_UPDATE MD5_Update |
61 | | * #define HASH_TRANSFORM MD5_Transform |
62 | | * #define HASH_FINAL MD5_Final |
63 | | * #define HASH_BLOCK_DATA_ORDER md5_block_data_order |
64 | | */ |
65 | | |
66 | | #ifndef OSSL_CRYPTO_MD32_COMMON_H |
67 | | #define OSSL_CRYPTO_MD32_COMMON_H |
68 | | #pragma once |
69 | | |
70 | | #include <openssl/crypto.h> |
71 | | /* |
72 | | * For ossl_(un)likely |
73 | | */ |
74 | | #include <internal/common.h> |
75 | | |
76 | | #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
77 | | #error "DATA_ORDER must be defined!" |
78 | | #endif |
79 | | |
80 | | #ifndef HASH_CBLOCK |
81 | | #error "HASH_CBLOCK must be defined!" |
82 | | #endif |
83 | | #ifndef HASH_LONG |
84 | | #error "HASH_LONG must be defined!" |
85 | | #endif |
86 | | #ifndef HASH_CTX |
87 | | #error "HASH_CTX must be defined!" |
88 | | #endif |
89 | | |
90 | | #ifndef HASH_UPDATE |
91 | | #error "HASH_UPDATE must be defined!" |
92 | | #endif |
93 | | #ifndef HASH_TRANSFORM |
94 | | #error "HASH_TRANSFORM must be defined!" |
95 | | #endif |
96 | | #ifndef HASH_FINAL |
97 | | #error "HASH_FINAL must be defined!" |
98 | | #endif |
99 | | |
100 | | #ifndef HASH_BLOCK_DATA_ORDER |
101 | | #error "HASH_BLOCK_DATA_ORDER must be defined!" |
102 | | #endif |
103 | | |
104 | 10.1G | #define ROTATE(a, n) (((a) << (n)) | (((a) & 0xffffffff) >> (32 - (n)))) |
105 | | |
106 | | #ifndef PEDANTIC |
107 | | #if defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
108 | | #if defined(__riscv_zbb) || defined(__riscv_zbkb) |
109 | | #if __riscv_xlen == 64 |
110 | | #undef ROTATE |
111 | | #define ROTATE(x, n) ({ MD32_REG_T ret; \ |
112 | | asm ("roriw %0, %1, %2" \ |
113 | | : "=r"(ret) \ |
114 | | : "r"(x), "i"(32 - (n))); ret; }) |
115 | | #endif |
116 | | #if __riscv_xlen == 32 |
117 | | #undef ROTATE |
118 | | #define ROTATE(x, n) ({ MD32_REG_T ret; \ |
119 | | asm ("rori %0, %1, %2" \ |
120 | | : "=r"(ret) \ |
121 | | : "r"(x), "i"(32 - (n))); ret; }) |
122 | | #endif |
123 | | #endif |
124 | | #endif |
125 | | #endif |
126 | | |
127 | | #if defined(DATA_ORDER_IS_BIG_ENDIAN) |
128 | | |
129 | 291M | #define HOST_c2l(c, l) (l = (((unsigned long)(*((c)++))) << 24), \ |
130 | 291M | l |= (((unsigned long)(*((c)++))) << 16), \ |
131 | 291M | l |= (((unsigned long)(*((c)++))) << 8), \ |
132 | 291M | l |= (((unsigned long)(*((c)++))))) |
133 | 139k | #define HOST_l2c(l, c) (*((c)++) = (unsigned char)(((l) >> 24) & 0xff), \ |
134 | 139k | *((c)++) = (unsigned char)(((l) >> 16) & 0xff), \ |
135 | 139k | *((c)++) = (unsigned char)(((l) >> 8) & 0xff), \ |
136 | 139k | *((c)++) = (unsigned char)(((l)) & 0xff), \ |
137 | 139k | l) |
138 | | |
139 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
140 | | |
141 | 80.5M | #define HOST_c2l(c, l) (l = (((unsigned long)(*((c)++)))), \ |
142 | 80.5M | l |= (((unsigned long)(*((c)++))) << 8), \ |
143 | 80.5M | l |= (((unsigned long)(*((c)++))) << 16), \ |
144 | 80.5M | l |= (((unsigned long)(*((c)++))) << 24)) |
145 | 1.12k | #define HOST_l2c(l, c) (*((c)++) = (unsigned char)(((l)) & 0xff), \ |
146 | 1.12k | *((c)++) = (unsigned char)(((l) >> 8) & 0xff), \ |
147 | 1.12k | *((c)++) = (unsigned char)(((l) >> 16) & 0xff), \ |
148 | 1.12k | *((c)++) = (unsigned char)(((l) >> 24) & 0xff), \ |
149 | 1.12k | l) |
150 | | |
151 | | #endif |
152 | | |
153 | | /* |
154 | | * Time for some action :-) |
155 | | */ |
156 | | |
157 | | #ifdef HASH_UPDATE_THUNK |
158 | | int HASH_UPDATE(void *cp, const unsigned char *data_, size_t len); |
159 | | int HASH_UPDATE(void *cp, const unsigned char *data_, size_t len) |
160 | | #else |
161 | | int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) |
162 | | #endif |
163 | 167M | { |
164 | | #ifdef HASH_UPDATE_THUNK |
165 | 167M | HASH_CTX *c = (HASH_CTX *)cp; |
166 | | #endif |
167 | 167M | const unsigned char *data = data_; |
168 | 167M | unsigned char *p; |
169 | 167M | HASH_LONG l; |
170 | 167M | size_t n; |
171 | | |
172 | 167M | if (ossl_unlikely(len == 0)) |
173 | 0 | return 1; |
174 | | |
175 | 167M | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; |
176 | 167M | if (ossl_unlikely(l < c->Nl)) /* overflow */ |
177 | 0 | c->Nh++; |
178 | 167M | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on |
179 | | * 16-bit */ |
180 | 167M | c->Nl = l; |
181 | | |
182 | 167M | n = c->num; |
183 | 167M | if (ossl_likely(n != 0)) { |
184 | | /* Gets here if we already have buffered input data */ |
185 | 164M | p = (unsigned char *)c->data; |
186 | | |
187 | 164M | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { |
188 | | /* |
189 | | * If there is enough input to fill the buffer then fill the |
190 | | * buffer and process a single chunk. |
191 | | */ |
192 | 2.62M | memcpy(p + n, data, HASH_CBLOCK - n); |
193 | 2.62M | HASH_BLOCK_DATA_ORDER(c, p, 1); |
194 | 2.62M | n = HASH_CBLOCK - n; |
195 | 2.62M | data += n; |
196 | 2.62M | len -= n; |
197 | 2.62M | c->num = 0; |
198 | | /* |
199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. |
200 | | * Using OPENSSL_cleanse() here could be a performance issue. It |
201 | | * will get properly cleansed on finalisation so this isn't a |
202 | | * security problem. |
203 | | */ |
204 | 2.62M | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ |
205 | 162M | } else { |
206 | | /* Otherwise just keep filling the buffer */ |
207 | 162M | memcpy(p + n, data, len); |
208 | 162M | c->num += (unsigned int)len; |
209 | 162M | return 1; |
210 | 162M | } |
211 | 164M | } |
212 | | |
213 | 5.26M | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ |
214 | 5.26M | if (n > 0) { |
215 | | /* Process chunks */ |
216 | 25.4k | HASH_BLOCK_DATA_ORDER(c, data, n); |
217 | 25.4k | n *= HASH_CBLOCK; |
218 | 25.4k | data += n; |
219 | 25.4k | len -= n; |
220 | 25.4k | } |
221 | | /* Buffer any left over data */ |
222 | 5.26M | if (len != 0) { |
223 | 2.62M | p = (unsigned char *)c->data; |
224 | 2.62M | c->num = (unsigned int)len; |
225 | 2.62M | memcpy(p, data, len); |
226 | 2.62M | } |
227 | 5.26M | return 1; |
228 | 167M | } Line | Count | Source | 163 | 100 | { | 164 | | #ifdef HASH_UPDATE_THUNK | 165 | | HASH_CTX *c = (HASH_CTX *)cp; | 166 | | #endif | 167 | 100 | const unsigned char *data = data_; | 168 | 100 | unsigned char *p; | 169 | 100 | HASH_LONG l; | 170 | 100 | size_t n; | 171 | | | 172 | 100 | if (ossl_unlikely(len == 0)) | 173 | 0 | return 1; | 174 | | | 175 | 100 | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; | 176 | 100 | if (ossl_unlikely(l < c->Nl)) /* overflow */ | 177 | 0 | c->Nh++; | 178 | 100 | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on | 179 | | * 16-bit */ | 180 | 100 | c->Nl = l; | 181 | | | 182 | 100 | n = c->num; | 183 | 100 | if (ossl_likely(n != 0)) { | 184 | | /* Gets here if we already have buffered input data */ | 185 | 0 | p = (unsigned char *)c->data; | 186 | |
| 187 | 0 | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { | 188 | | /* | 189 | | * If there is enough input to fill the buffer then fill the | 190 | | * buffer and process a single chunk. | 191 | | */ | 192 | 0 | memcpy(p + n, data, HASH_CBLOCK - n); | 193 | 0 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 194 | 0 | n = HASH_CBLOCK - n; | 195 | 0 | data += n; | 196 | 0 | len -= n; | 197 | 0 | c->num = 0; | 198 | | /* | 199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. | 200 | | * Using OPENSSL_cleanse() here could be a performance issue. It | 201 | | * will get properly cleansed on finalisation so this isn't a | 202 | | * security problem. | 203 | | */ | 204 | 0 | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ | 205 | 0 | } else { | 206 | | /* Otherwise just keep filling the buffer */ | 207 | 0 | memcpy(p + n, data, len); | 208 | 0 | c->num += (unsigned int)len; | 209 | 0 | return 1; | 210 | 0 | } | 211 | 0 | } | 212 | | | 213 | 100 | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ | 214 | 100 | if (n > 0) { | 215 | | /* Process chunks */ | 216 | 100 | HASH_BLOCK_DATA_ORDER(c, data, n); | 217 | 100 | n *= HASH_CBLOCK; | 218 | 100 | data += n; | 219 | 100 | len -= n; | 220 | 100 | } | 221 | | /* Buffer any left over data */ | 222 | 100 | if (len != 0) { | 223 | 40 | p = (unsigned char *)c->data; | 224 | 40 | c->num = (unsigned int)len; | 225 | 40 | memcpy(p, data, len); | 226 | 40 | } | 227 | 100 | return 1; | 228 | 100 | } |
Line | Count | Source | 163 | 156 | { | 164 | | #ifdef HASH_UPDATE_THUNK | 165 | | HASH_CTX *c = (HASH_CTX *)cp; | 166 | | #endif | 167 | 156 | const unsigned char *data = data_; | 168 | 156 | unsigned char *p; | 169 | 156 | HASH_LONG l; | 170 | 156 | size_t n; | 171 | | | 172 | 156 | if (ossl_unlikely(len == 0)) | 173 | 0 | return 1; | 174 | | | 175 | 156 | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; | 176 | 156 | if (ossl_unlikely(l < c->Nl)) /* overflow */ | 177 | 0 | c->Nh++; | 178 | 156 | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on | 179 | | * 16-bit */ | 180 | 156 | c->Nl = l; | 181 | | | 182 | 156 | n = c->num; | 183 | 156 | if (ossl_likely(n != 0)) { | 184 | | /* Gets here if we already have buffered input data */ | 185 | 0 | p = (unsigned char *)c->data; | 186 | |
| 187 | 0 | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { | 188 | | /* | 189 | | * If there is enough input to fill the buffer then fill the | 190 | | * buffer and process a single chunk. | 191 | | */ | 192 | 0 | memcpy(p + n, data, HASH_CBLOCK - n); | 193 | 0 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 194 | 0 | n = HASH_CBLOCK - n; | 195 | 0 | data += n; | 196 | 0 | len -= n; | 197 | 0 | c->num = 0; | 198 | | /* | 199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. | 200 | | * Using OPENSSL_cleanse() here could be a performance issue. It | 201 | | * will get properly cleansed on finalisation so this isn't a | 202 | | * security problem. | 203 | | */ | 204 | 0 | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ | 205 | 0 | } else { | 206 | | /* Otherwise just keep filling the buffer */ | 207 | 0 | memcpy(p + n, data, len); | 208 | 0 | c->num += (unsigned int)len; | 209 | 0 | return 1; | 210 | 0 | } | 211 | 0 | } | 212 | | | 213 | 156 | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ | 214 | 156 | if (n > 0) { | 215 | | /* Process chunks */ | 216 | 156 | HASH_BLOCK_DATA_ORDER(c, data, n); | 217 | 156 | n *= HASH_CBLOCK; | 218 | 156 | data += n; | 219 | 156 | len -= n; | 220 | 156 | } | 221 | | /* Buffer any left over data */ | 222 | 156 | if (len != 0) { | 223 | 44 | p = (unsigned char *)c->data; | 224 | 44 | c->num = (unsigned int)len; | 225 | 44 | memcpy(p, data, len); | 226 | 44 | } | 227 | 156 | return 1; | 228 | 156 | } |
Line | Count | Source | 163 | 102 | { | 164 | | #ifdef HASH_UPDATE_THUNK | 165 | | HASH_CTX *c = (HASH_CTX *)cp; | 166 | | #endif | 167 | 102 | const unsigned char *data = data_; | 168 | 102 | unsigned char *p; | 169 | 102 | HASH_LONG l; | 170 | 102 | size_t n; | 171 | | | 172 | 102 | if (ossl_unlikely(len == 0)) | 173 | 0 | return 1; | 174 | | | 175 | 102 | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; | 176 | 102 | if (ossl_unlikely(l < c->Nl)) /* overflow */ | 177 | 0 | c->Nh++; | 178 | 102 | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on | 179 | | * 16-bit */ | 180 | 102 | c->Nl = l; | 181 | | | 182 | 102 | n = c->num; | 183 | 102 | if (ossl_likely(n != 0)) { | 184 | | /* Gets here if we already have buffered input data */ | 185 | 0 | p = (unsigned char *)c->data; | 186 | |
| 187 | 0 | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { | 188 | | /* | 189 | | * If there is enough input to fill the buffer then fill the | 190 | | * buffer and process a single chunk. | 191 | | */ | 192 | 0 | memcpy(p + n, data, HASH_CBLOCK - n); | 193 | 0 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 194 | 0 | n = HASH_CBLOCK - n; | 195 | 0 | data += n; | 196 | 0 | len -= n; | 197 | 0 | c->num = 0; | 198 | | /* | 199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. | 200 | | * Using OPENSSL_cleanse() here could be a performance issue. It | 201 | | * will get properly cleansed on finalisation so this isn't a | 202 | | * security problem. | 203 | | */ | 204 | 0 | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ | 205 | 0 | } else { | 206 | | /* Otherwise just keep filling the buffer */ | 207 | 0 | memcpy(p + n, data, len); | 208 | 0 | c->num += (unsigned int)len; | 209 | 0 | return 1; | 210 | 0 | } | 211 | 0 | } | 212 | | | 213 | 102 | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ | 214 | 102 | if (n > 0) { | 215 | | /* Process chunks */ | 216 | 102 | HASH_BLOCK_DATA_ORDER(c, data, n); | 217 | 102 | n *= HASH_CBLOCK; | 218 | 102 | data += n; | 219 | 102 | len -= n; | 220 | 102 | } | 221 | | /* Buffer any left over data */ | 222 | 102 | if (len != 0) { | 223 | 42 | p = (unsigned char *)c->data; | 224 | 42 | c->num = (unsigned int)len; | 225 | 42 | memcpy(p, data, len); | 226 | 42 | } | 227 | 102 | return 1; | 228 | 102 | } |
Line | Count | Source | 163 | 172 | { | 164 | 172 | #ifdef HASH_UPDATE_THUNK | 165 | 172 | HASH_CTX *c = (HASH_CTX *)cp; | 166 | 172 | #endif | 167 | 172 | const unsigned char *data = data_; | 168 | 172 | unsigned char *p; | 169 | 172 | HASH_LONG l; | 170 | 172 | size_t n; | 171 | | | 172 | 172 | if (ossl_unlikely(len == 0)) | 173 | 0 | return 1; | 174 | | | 175 | 172 | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; | 176 | 172 | if (ossl_unlikely(l < c->Nl)) /* overflow */ | 177 | 0 | c->Nh++; | 178 | 172 | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on | 179 | | * 16-bit */ | 180 | 172 | c->Nl = l; | 181 | | | 182 | 172 | n = c->num; | 183 | 172 | if (ossl_likely(n != 0)) { | 184 | | /* Gets here if we already have buffered input data */ | 185 | 0 | p = (unsigned char *)c->data; | 186 | |
| 187 | 0 | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { | 188 | | /* | 189 | | * If there is enough input to fill the buffer then fill the | 190 | | * buffer and process a single chunk. | 191 | | */ | 192 | 0 | memcpy(p + n, data, HASH_CBLOCK - n); | 193 | 0 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 194 | 0 | n = HASH_CBLOCK - n; | 195 | 0 | data += n; | 196 | 0 | len -= n; | 197 | 0 | c->num = 0; | 198 | | /* | 199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. | 200 | | * Using OPENSSL_cleanse() here could be a performance issue. It | 201 | | * will get properly cleansed on finalisation so this isn't a | 202 | | * security problem. | 203 | | */ | 204 | 0 | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ | 205 | 0 | } else { | 206 | | /* Otherwise just keep filling the buffer */ | 207 | 0 | memcpy(p + n, data, len); | 208 | 0 | c->num += (unsigned int)len; | 209 | 0 | return 1; | 210 | 0 | } | 211 | 0 | } | 212 | | | 213 | 172 | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ | 214 | 172 | if (n > 0) { | 215 | | /* Process chunks */ | 216 | 172 | HASH_BLOCK_DATA_ORDER(c, data, n); | 217 | 172 | n *= HASH_CBLOCK; | 218 | 172 | data += n; | 219 | 172 | len -= n; | 220 | 172 | } | 221 | | /* Buffer any left over data */ | 222 | 172 | if (len != 0) { | 223 | 50 | p = (unsigned char *)c->data; | 224 | 50 | c->num = (unsigned int)len; | 225 | 50 | memcpy(p, data, len); | 226 | 50 | } | 227 | 172 | return 1; | 228 | 172 | } |
Line | Count | Source | 163 | 167M | { | 164 | 167M | #ifdef HASH_UPDATE_THUNK | 165 | 167M | HASH_CTX *c = (HASH_CTX *)cp; | 166 | 167M | #endif | 167 | 167M | const unsigned char *data = data_; | 168 | 167M | unsigned char *p; | 169 | 167M | HASH_LONG l; | 170 | 167M | size_t n; | 171 | | | 172 | 167M | if (ossl_unlikely(len == 0)) | 173 | 0 | return 1; | 174 | | | 175 | 167M | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; | 176 | 167M | if (ossl_unlikely(l < c->Nl)) /* overflow */ | 177 | 0 | c->Nh++; | 178 | 167M | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on | 179 | | * 16-bit */ | 180 | 167M | c->Nl = l; | 181 | | | 182 | 167M | n = c->num; | 183 | 167M | if (ossl_likely(n != 0)) { | 184 | | /* Gets here if we already have buffered input data */ | 185 | 164M | p = (unsigned char *)c->data; | 186 | | | 187 | 164M | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { | 188 | | /* | 189 | | * If there is enough input to fill the buffer then fill the | 190 | | * buffer and process a single chunk. | 191 | | */ | 192 | 2.62M | memcpy(p + n, data, HASH_CBLOCK - n); | 193 | 2.62M | HASH_BLOCK_DATA_ORDER(c, p, 1); | 194 | 2.62M | n = HASH_CBLOCK - n; | 195 | 2.62M | data += n; | 196 | 2.62M | len -= n; | 197 | 2.62M | c->num = 0; | 198 | | /* | 199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. | 200 | | * Using OPENSSL_cleanse() here could be a performance issue. It | 201 | | * will get properly cleansed on finalisation so this isn't a | 202 | | * security problem. | 203 | | */ | 204 | 2.62M | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ | 205 | 162M | } else { | 206 | | /* Otherwise just keep filling the buffer */ | 207 | 162M | memcpy(p + n, data, len); | 208 | 162M | c->num += (unsigned int)len; | 209 | 162M | return 1; | 210 | 162M | } | 211 | 164M | } | 212 | | | 213 | 5.26M | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ | 214 | 5.26M | if (n > 0) { | 215 | | /* Process chunks */ | 216 | 24.7k | HASH_BLOCK_DATA_ORDER(c, data, n); | 217 | 24.7k | n *= HASH_CBLOCK; | 218 | 24.7k | data += n; | 219 | 24.7k | len -= n; | 220 | 24.7k | } | 221 | | /* Buffer any left over data */ | 222 | 5.26M | if (len != 0) { | 223 | 2.62M | p = (unsigned char *)c->data; | 224 | 2.62M | c->num = (unsigned int)len; | 225 | 2.62M | memcpy(p, data, len); | 226 | 2.62M | } | 227 | 5.26M | return 1; | 228 | 167M | } |
Line | Count | Source | 163 | 170 | { | 164 | | #ifdef HASH_UPDATE_THUNK | 165 | | HASH_CTX *c = (HASH_CTX *)cp; | 166 | | #endif | 167 | 170 | const unsigned char *data = data_; | 168 | 170 | unsigned char *p; | 169 | 170 | HASH_LONG l; | 170 | 170 | size_t n; | 171 | | | 172 | 170 | if (ossl_unlikely(len == 0)) | 173 | 0 | return 1; | 174 | | | 175 | 170 | l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL; | 176 | 170 | if (ossl_unlikely(l < c->Nl)) /* overflow */ | 177 | 0 | c->Nh++; | 178 | 170 | c->Nh += (HASH_LONG)(len >> 29); /* might cause compiler warning on | 179 | | * 16-bit */ | 180 | 170 | c->Nl = l; | 181 | | | 182 | 170 | n = c->num; | 183 | 170 | if (ossl_likely(n != 0)) { | 184 | | /* Gets here if we already have buffered input data */ | 185 | 0 | p = (unsigned char *)c->data; | 186 | |
| 187 | 0 | if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { | 188 | | /* | 189 | | * If there is enough input to fill the buffer then fill the | 190 | | * buffer and process a single chunk. | 191 | | */ | 192 | 0 | memcpy(p + n, data, HASH_CBLOCK - n); | 193 | 0 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 194 | 0 | n = HASH_CBLOCK - n; | 195 | 0 | data += n; | 196 | 0 | len -= n; | 197 | 0 | c->num = 0; | 198 | | /* | 199 | | * We use memset rather than OPENSSL_cleanse() here deliberately. | 200 | | * Using OPENSSL_cleanse() here could be a performance issue. It | 201 | | * will get properly cleansed on finalisation so this isn't a | 202 | | * security problem. | 203 | | */ | 204 | 0 | memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ | 205 | 0 | } else { | 206 | | /* Otherwise just keep filling the buffer */ | 207 | 0 | memcpy(p + n, data, len); | 208 | 0 | c->num += (unsigned int)len; | 209 | 0 | return 1; | 210 | 0 | } | 211 | 0 | } | 212 | | | 213 | 170 | n = len / HASH_CBLOCK; /* Get number of input chunks (e.g. multiple of 512 bits for SHA256) */ | 214 | 170 | if (n > 0) { | 215 | | /* Process chunks */ | 216 | 170 | HASH_BLOCK_DATA_ORDER(c, data, n); | 217 | 170 | n *= HASH_CBLOCK; | 218 | 170 | data += n; | 219 | 170 | len -= n; | 220 | 170 | } | 221 | | /* Buffer any left over data */ | 222 | 170 | if (len != 0) { | 223 | 69 | p = (unsigned char *)c->data; | 224 | 69 | c->num = (unsigned int)len; | 225 | 69 | memcpy(p, data, len); | 226 | 69 | } | 227 | 170 | return 1; | 228 | 170 | } |
|
229 | | |
230 | | void HASH_TRANSFORM(HASH_CTX *c, const unsigned char *data) |
231 | 0 | { |
232 | 0 | HASH_BLOCK_DATA_ORDER(c, data, 1); /* Process a single chunk */ |
233 | 0 | } Unexecuted instantiation: MD4_Transform Unexecuted instantiation: MD5_Transform Unexecuted instantiation: RIPEMD160_Transform Unexecuted instantiation: SHA1_Transform Unexecuted instantiation: SHA256_Transform Unexecuted instantiation: ossl_sm3_transform |
234 | | |
235 | | int HASH_FINAL(unsigned char *md, HASH_CTX *c) |
236 | 14.1k | { |
237 | 14.1k | unsigned char *p = (unsigned char *)c->data; |
238 | 14.1k | size_t n = c->num; |
239 | | |
240 | | /* |
241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) |
242 | | * as a 64 bit value. K must align the data to a chunk boundary. |
243 | | */ |
244 | 14.1k | p[n] = 0x80; /* there is always room for one */ |
245 | 14.1k | n++; |
246 | | |
247 | 14.1k | if (n > (HASH_CBLOCK - 8)) { |
248 | | /* |
249 | | * If there is not enough room in the buffer to add L, then fill the |
250 | | * current buffer with zeros, and process the chunk |
251 | | */ |
252 | 151 | memset(p + n, 0, HASH_CBLOCK - n); |
253 | 151 | n = 0; |
254 | 151 | HASH_BLOCK_DATA_ORDER(c, p, 1); |
255 | 151 | } |
256 | | /* Add zero padding - but leave enough room for L */ |
257 | 14.1k | memset(p + n, 0, HASH_CBLOCK - 8 - n); |
258 | | |
259 | | /* Add the 64 bit L value to the end of the buffer */ |
260 | 14.1k | p += HASH_CBLOCK - 8; |
261 | | #if defined(DATA_ORDER_IS_BIG_ENDIAN) |
262 | 13.9k | (void)HOST_l2c(c->Nh, p); |
263 | 13.9k | (void)HOST_l2c(c->Nl, p); |
264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
265 | 179 | (void)HOST_l2c(c->Nl, p); |
266 | 179 | (void)HOST_l2c(c->Nh, p); |
267 | | #endif |
268 | 14.1k | p -= HASH_CBLOCK; |
269 | | /* Process the final padded chunk */ |
270 | 14.1k | HASH_BLOCK_DATA_ORDER(c, p, 1); |
271 | 14.1k | c->num = 0; |
272 | 14.1k | OPENSSL_cleanse(p, HASH_CBLOCK); |
273 | | |
274 | | #ifndef HASH_MAKE_STRING |
275 | | #error "HASH_MAKE_STRING must be defined!" |
276 | | #else |
277 | 14.1k | HASH_MAKE_STRING(c, md); |
278 | 13.8k | #endif |
279 | | |
280 | 13.8k | return 1; |
281 | 14.1k | } Line | Count | Source | 236 | 50 | { | 237 | 50 | unsigned char *p = (unsigned char *)c->data; | 238 | 50 | size_t n = c->num; | 239 | | | 240 | | /* | 241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) | 242 | | * as a 64 bit value. K must align the data to a chunk boundary. | 243 | | */ | 244 | 50 | p[n] = 0x80; /* there is always room for one */ | 245 | 50 | n++; | 246 | | | 247 | 50 | if (n > (HASH_CBLOCK - 8)) { | 248 | | /* | 249 | | * If there is not enough room in the buffer to add L, then fill the | 250 | | * current buffer with zeros, and process the chunk | 251 | | */ | 252 | 17 | memset(p + n, 0, HASH_CBLOCK - n); | 253 | 17 | n = 0; | 254 | 17 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 255 | 17 | } | 256 | | /* Add zero padding - but leave enough room for L */ | 257 | 50 | memset(p + n, 0, HASH_CBLOCK - 8 - n); | 258 | | | 259 | | /* Add the 64 bit L value to the end of the buffer */ | 260 | 50 | p += HASH_CBLOCK - 8; | 261 | | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 262 | | (void)HOST_l2c(c->Nh, p); | 263 | | (void)HOST_l2c(c->Nl, p); | 264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 265 | 50 | (void)HOST_l2c(c->Nl, p); | 266 | 50 | (void)HOST_l2c(c->Nh, p); | 267 | 50 | #endif | 268 | 50 | p -= HASH_CBLOCK; | 269 | | /* Process the final padded chunk */ | 270 | 50 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 271 | 50 | c->num = 0; | 272 | 50 | OPENSSL_cleanse(p, HASH_CBLOCK); | 273 | | | 274 | | #ifndef HASH_MAKE_STRING | 275 | | #error "HASH_MAKE_STRING must be defined!" | 276 | | #else | 277 | 50 | HASH_MAKE_STRING(c, md); | 278 | 50 | #endif | 279 | | | 280 | 50 | return 1; | 281 | 50 | } |
Line | Count | Source | 236 | 78 | { | 237 | 78 | unsigned char *p = (unsigned char *)c->data; | 238 | 78 | size_t n = c->num; | 239 | | | 240 | | /* | 241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) | 242 | | * as a 64 bit value. K must align the data to a chunk boundary. | 243 | | */ | 244 | 78 | p[n] = 0x80; /* there is always room for one */ | 245 | 78 | n++; | 246 | | | 247 | 78 | if (n > (HASH_CBLOCK - 8)) { | 248 | | /* | 249 | | * If there is not enough room in the buffer to add L, then fill the | 250 | | * current buffer with zeros, and process the chunk | 251 | | */ | 252 | 12 | memset(p + n, 0, HASH_CBLOCK - n); | 253 | 12 | n = 0; | 254 | 12 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 255 | 12 | } | 256 | | /* Add zero padding - but leave enough room for L */ | 257 | 78 | memset(p + n, 0, HASH_CBLOCK - 8 - n); | 258 | | | 259 | | /* Add the 64 bit L value to the end of the buffer */ | 260 | 78 | p += HASH_CBLOCK - 8; | 261 | | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 262 | | (void)HOST_l2c(c->Nh, p); | 263 | | (void)HOST_l2c(c->Nl, p); | 264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 265 | 78 | (void)HOST_l2c(c->Nl, p); | 266 | 78 | (void)HOST_l2c(c->Nh, p); | 267 | 78 | #endif | 268 | 78 | p -= HASH_CBLOCK; | 269 | | /* Process the final padded chunk */ | 270 | 78 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 271 | 78 | c->num = 0; | 272 | 78 | OPENSSL_cleanse(p, HASH_CBLOCK); | 273 | | | 274 | | #ifndef HASH_MAKE_STRING | 275 | | #error "HASH_MAKE_STRING must be defined!" | 276 | | #else | 277 | 78 | HASH_MAKE_STRING(c, md); | 278 | 78 | #endif | 279 | | | 280 | 78 | return 1; | 281 | 78 | } |
Line | Count | Source | 236 | 51 | { | 237 | 51 | unsigned char *p = (unsigned char *)c->data; | 238 | 51 | size_t n = c->num; | 239 | | | 240 | | /* | 241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) | 242 | | * as a 64 bit value. K must align the data to a chunk boundary. | 243 | | */ | 244 | 51 | p[n] = 0x80; /* there is always room for one */ | 245 | 51 | n++; | 246 | | | 247 | 51 | if (n > (HASH_CBLOCK - 8)) { | 248 | | /* | 249 | | * If there is not enough room in the buffer to add L, then fill the | 250 | | * current buffer with zeros, and process the chunk | 251 | | */ | 252 | 20 | memset(p + n, 0, HASH_CBLOCK - n); | 253 | 20 | n = 0; | 254 | 20 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 255 | 20 | } | 256 | | /* Add zero padding - but leave enough room for L */ | 257 | 51 | memset(p + n, 0, HASH_CBLOCK - 8 - n); | 258 | | | 259 | | /* Add the 64 bit L value to the end of the buffer */ | 260 | 51 | p += HASH_CBLOCK - 8; | 261 | | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 262 | | (void)HOST_l2c(c->Nh, p); | 263 | | (void)HOST_l2c(c->Nl, p); | 264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 265 | 51 | (void)HOST_l2c(c->Nl, p); | 266 | 51 | (void)HOST_l2c(c->Nh, p); | 267 | 51 | #endif | 268 | 51 | p -= HASH_CBLOCK; | 269 | | /* Process the final padded chunk */ | 270 | 51 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 271 | 51 | c->num = 0; | 272 | 51 | OPENSSL_cleanse(p, HASH_CBLOCK); | 273 | | | 274 | | #ifndef HASH_MAKE_STRING | 275 | | #error "HASH_MAKE_STRING must be defined!" | 276 | | #else | 277 | 51 | HASH_MAKE_STRING(c, md); | 278 | 51 | #endif | 279 | | | 280 | 51 | return 1; | 281 | 51 | } |
Line | Count | Source | 236 | 86 | { | 237 | 86 | unsigned char *p = (unsigned char *)c->data; | 238 | 86 | size_t n = c->num; | 239 | | | 240 | | /* | 241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) | 242 | | * as a 64 bit value. K must align the data to a chunk boundary. | 243 | | */ | 244 | 86 | p[n] = 0x80; /* there is always room for one */ | 245 | 86 | n++; | 246 | | | 247 | 86 | if (n > (HASH_CBLOCK - 8)) { | 248 | | /* | 249 | | * If there is not enough room in the buffer to add L, then fill the | 250 | | * current buffer with zeros, and process the chunk | 251 | | */ | 252 | 13 | memset(p + n, 0, HASH_CBLOCK - n); | 253 | 13 | n = 0; | 254 | 13 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 255 | 13 | } | 256 | | /* Add zero padding - but leave enough room for L */ | 257 | 86 | memset(p + n, 0, HASH_CBLOCK - 8 - n); | 258 | | | 259 | | /* Add the 64 bit L value to the end of the buffer */ | 260 | 86 | p += HASH_CBLOCK - 8; | 261 | 86 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 262 | 86 | (void)HOST_l2c(c->Nh, p); | 263 | 86 | (void)HOST_l2c(c->Nl, p); | 264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 265 | | (void)HOST_l2c(c->Nl, p); | 266 | | (void)HOST_l2c(c->Nh, p); | 267 | | #endif | 268 | 86 | p -= HASH_CBLOCK; | 269 | | /* Process the final padded chunk */ | 270 | 86 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 271 | 86 | c->num = 0; | 272 | 86 | OPENSSL_cleanse(p, HASH_CBLOCK); | 273 | | | 274 | | #ifndef HASH_MAKE_STRING | 275 | | #error "HASH_MAKE_STRING must be defined!" | 276 | | #else | 277 | 86 | HASH_MAKE_STRING(c, md); | 278 | 86 | #endif | 279 | | | 280 | 86 | return 1; | 281 | 86 | } |
Line | Count | Source | 236 | 13.8k | { | 237 | 13.8k | unsigned char *p = (unsigned char *)c->data; | 238 | 13.8k | size_t n = c->num; | 239 | | | 240 | | /* | 241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) | 242 | | * as a 64 bit value. K must align the data to a chunk boundary. | 243 | | */ | 244 | 13.8k | p[n] = 0x80; /* there is always room for one */ | 245 | 13.8k | n++; | 246 | | | 247 | 13.8k | if (n > (HASH_CBLOCK - 8)) { | 248 | | /* | 249 | | * If there is not enough room in the buffer to add L, then fill the | 250 | | * current buffer with zeros, and process the chunk | 251 | | */ | 252 | 46 | memset(p + n, 0, HASH_CBLOCK - n); | 253 | 46 | n = 0; | 254 | 46 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 255 | 46 | } | 256 | | /* Add zero padding - but leave enough room for L */ | 257 | 13.8k | memset(p + n, 0, HASH_CBLOCK - 8 - n); | 258 | | | 259 | | /* Add the 64 bit L value to the end of the buffer */ | 260 | 13.8k | p += HASH_CBLOCK - 8; | 261 | 13.8k | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 262 | 13.8k | (void)HOST_l2c(c->Nh, p); | 263 | 13.8k | (void)HOST_l2c(c->Nl, p); | 264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 265 | | (void)HOST_l2c(c->Nl, p); | 266 | | (void)HOST_l2c(c->Nh, p); | 267 | | #endif | 268 | 13.8k | p -= HASH_CBLOCK; | 269 | | /* Process the final padded chunk */ | 270 | 13.8k | HASH_BLOCK_DATA_ORDER(c, p, 1); | 271 | 13.8k | c->num = 0; | 272 | 13.8k | OPENSSL_cleanse(p, HASH_CBLOCK); | 273 | | | 274 | | #ifndef HASH_MAKE_STRING | 275 | | #error "HASH_MAKE_STRING must be defined!" | 276 | | #else | 277 | 13.8k | HASH_MAKE_STRING(c, md); | 278 | 13.8k | #endif | 279 | | | 280 | 13.8k | return 1; | 281 | 13.8k | } |
Line | Count | Source | 236 | 85 | { | 237 | 85 | unsigned char *p = (unsigned char *)c->data; | 238 | 85 | size_t n = c->num; | 239 | | | 240 | | /* | 241 | | * Pad the input by adding a 1 bit + K zero bits + input length (L) | 242 | | * as a 64 bit value. K must align the data to a chunk boundary. | 243 | | */ | 244 | 85 | p[n] = 0x80; /* there is always room for one */ | 245 | 85 | n++; | 246 | | | 247 | 85 | if (n > (HASH_CBLOCK - 8)) { | 248 | | /* | 249 | | * If there is not enough room in the buffer to add L, then fill the | 250 | | * current buffer with zeros, and process the chunk | 251 | | */ | 252 | 43 | memset(p + n, 0, HASH_CBLOCK - n); | 253 | 43 | n = 0; | 254 | 43 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 255 | 43 | } | 256 | | /* Add zero padding - but leave enough room for L */ | 257 | 85 | memset(p + n, 0, HASH_CBLOCK - 8 - n); | 258 | | | 259 | | /* Add the 64 bit L value to the end of the buffer */ | 260 | 85 | p += HASH_CBLOCK - 8; | 261 | 85 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | 262 | 85 | (void)HOST_l2c(c->Nh, p); | 263 | 85 | (void)HOST_l2c(c->Nl, p); | 264 | | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | 265 | | (void)HOST_l2c(c->Nl, p); | 266 | | (void)HOST_l2c(c->Nh, p); | 267 | | #endif | 268 | 85 | p -= HASH_CBLOCK; | 269 | | /* Process the final padded chunk */ | 270 | 85 | HASH_BLOCK_DATA_ORDER(c, p, 1); | 271 | 85 | c->num = 0; | 272 | 85 | OPENSSL_cleanse(p, HASH_CBLOCK); | 273 | | | 274 | | #ifndef HASH_MAKE_STRING | 275 | | #error "HASH_MAKE_STRING must be defined!" | 276 | | #else | 277 | 85 | HASH_MAKE_STRING(c, md); | 278 | 85 | #endif | 279 | | | 280 | 85 | return 1; | 281 | 85 | } |
|
282 | | |
283 | | #ifndef MD32_REG_T |
284 | | #if defined(__alpha) || defined(__sparcv9) || defined(__mips) |
285 | | #define MD32_REG_T long |
286 | | /* |
287 | | * This comment was originally written for MD5, which is why it |
288 | | * discusses A-D. But it basically applies to all 32-bit digests, |
289 | | * which is why it was moved to common header file. |
290 | | * |
291 | | * In case you wonder why A-D are declared as long and not |
292 | | * as MD5_LONG. Doing so results in slight performance |
293 | | * boost on LP64 architectures. The catch is we don't |
294 | | * really care if 32 MSBs of a 64-bit register get polluted |
295 | | * with eventual overflows as we *save* only 32 LSBs in |
296 | | * *either* case. Now declaring 'em long excuses the compiler |
297 | | * from keeping 32 MSBs zeroed resulting in 13% performance |
298 | | * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. |
299 | | * Well, to be honest it should say that this *prevents* |
300 | | * performance degradation. |
301 | | */ |
302 | | #else |
303 | | /* |
304 | | * Above is not absolute and there are LP64 compilers that |
305 | | * generate better code if MD32_REG_T is defined int. The above |
306 | | * pre-processor condition reflects the circumstances under which |
307 | | * the conclusion was made and is subject to further extension. |
308 | | */ |
309 | | #define MD32_REG_T int |
310 | | #endif |
311 | | #endif |
312 | | |
313 | | #endif |