/src/nss-nspr/nss/lib/freebl/verified/Hacl_Ed25519.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* MIT License |
2 | | * |
3 | | * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation |
4 | | * Copyright (c) 2022-2023 HACL* Contributors |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to deal |
8 | | * in the Software without restriction, including without limitation the rights |
9 | | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
10 | | * copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in all |
14 | | * copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
21 | | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
22 | | * SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include "internal/Hacl_Ed25519.h" |
26 | | |
27 | | #include "internal/Hacl_Krmllib.h" |
28 | | #include "internal/Hacl_Ed25519_PrecompTable.h" |
29 | | #include "internal/Hacl_Curve25519_51.h" |
30 | | #include "internal/Hacl_Bignum_Base.h" |
31 | | #include "internal/Hacl_Bignum25519_51.h" |
32 | | |
33 | | #include "../Hacl_Hash_SHA2_shim.h" |
34 | | |
35 | | static inline void |
36 | | fsum(uint64_t *out, uint64_t *a, uint64_t *b) |
37 | 0 | { |
38 | 0 | Hacl_Impl_Curve25519_Field51_fadd(out, a, b); |
39 | 0 | } |
40 | | |
41 | | static inline void |
42 | | fdifference(uint64_t *out, uint64_t *a, uint64_t *b) |
43 | 0 | { |
44 | 0 | Hacl_Impl_Curve25519_Field51_fsub(out, a, b); |
45 | 0 | } |
46 | | |
47 | | void |
48 | | Hacl_Bignum25519_reduce_513(uint64_t *a) |
49 | 0 | { |
50 | 0 | uint64_t f0 = a[0U]; |
51 | 0 | uint64_t f1 = a[1U]; |
52 | 0 | uint64_t f2 = a[2U]; |
53 | 0 | uint64_t f3 = a[3U]; |
54 | 0 | uint64_t f4 = a[4U]; |
55 | 0 | uint64_t l_ = f0 + (uint64_t)0U; |
56 | 0 | uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU; |
57 | 0 | uint64_t c0 = l_ >> (uint32_t)51U; |
58 | 0 | uint64_t l_0 = f1 + c0; |
59 | 0 | uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU; |
60 | 0 | uint64_t c1 = l_0 >> (uint32_t)51U; |
61 | 0 | uint64_t l_1 = f2 + c1; |
62 | 0 | uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU; |
63 | 0 | uint64_t c2 = l_1 >> (uint32_t)51U; |
64 | 0 | uint64_t l_2 = f3 + c2; |
65 | 0 | uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU; |
66 | 0 | uint64_t c3 = l_2 >> (uint32_t)51U; |
67 | 0 | uint64_t l_3 = f4 + c3; |
68 | 0 | uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU; |
69 | 0 | uint64_t c4 = l_3 >> (uint32_t)51U; |
70 | 0 | uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; |
71 | 0 | uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; |
72 | 0 | uint64_t c5 = l_4 >> (uint32_t)51U; |
73 | 0 | a[0U] = tmp0_; |
74 | 0 | a[1U] = tmp1 + c5; |
75 | 0 | a[2U] = tmp2; |
76 | 0 | a[3U] = tmp3; |
77 | 0 | a[4U] = tmp4; |
78 | 0 | } |
79 | | |
80 | | static inline void |
81 | | fmul0(uint64_t *output, uint64_t *input, uint64_t *input2) |
82 | 0 | { |
83 | 0 | FStar_UInt128_uint128 tmp[10U]; |
84 | 0 | for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i) |
85 | 0 | tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); |
86 | 0 | Hacl_Impl_Curve25519_Field51_fmul(output, input, input2, tmp); |
87 | 0 | } |
88 | | |
89 | | static inline void |
90 | | times_2(uint64_t *out, uint64_t *a) |
91 | 0 | { |
92 | 0 | uint64_t a0 = a[0U]; |
93 | 0 | uint64_t a1 = a[1U]; |
94 | 0 | uint64_t a2 = a[2U]; |
95 | 0 | uint64_t a3 = a[3U]; |
96 | 0 | uint64_t a4 = a[4U]; |
97 | 0 | uint64_t o0 = (uint64_t)2U * a0; |
98 | 0 | uint64_t o1 = (uint64_t)2U * a1; |
99 | 0 | uint64_t o2 = (uint64_t)2U * a2; |
100 | 0 | uint64_t o3 = (uint64_t)2U * a3; |
101 | 0 | uint64_t o4 = (uint64_t)2U * a4; |
102 | 0 | out[0U] = o0; |
103 | 0 | out[1U] = o1; |
104 | 0 | out[2U] = o2; |
105 | 0 | out[3U] = o3; |
106 | 0 | out[4U] = o4; |
107 | 0 | } |
108 | | |
109 | | static inline void |
110 | | times_d(uint64_t *out, uint64_t *a) |
111 | 0 | { |
112 | 0 | uint64_t d[5U] = { 0U }; |
113 | 0 | d[0U] = (uint64_t)0x00034dca135978a3U; |
114 | 0 | d[1U] = (uint64_t)0x0001a8283b156ebdU; |
115 | 0 | d[2U] = (uint64_t)0x0005e7a26001c029U; |
116 | 0 | d[3U] = (uint64_t)0x000739c663a03cbbU; |
117 | 0 | d[4U] = (uint64_t)0x00052036cee2b6ffU; |
118 | 0 | fmul0(out, d, a); |
119 | 0 | } |
120 | | |
121 | | static inline void |
122 | | times_2d(uint64_t *out, uint64_t *a) |
123 | 0 | { |
124 | 0 | uint64_t d2[5U] = { 0U }; |
125 | 0 | d2[0U] = (uint64_t)0x00069b9426b2f159U; |
126 | 0 | d2[1U] = (uint64_t)0x00035050762add7aU; |
127 | 0 | d2[2U] = (uint64_t)0x0003cf44c0038052U; |
128 | 0 | d2[3U] = (uint64_t)0x0006738cc7407977U; |
129 | 0 | d2[4U] = (uint64_t)0x0002406d9dc56dffU; |
130 | 0 | fmul0(out, d2, a); |
131 | 0 | } |
132 | | |
133 | | static inline void |
134 | | fsquare(uint64_t *out, uint64_t *a) |
135 | 0 | { |
136 | 0 | FStar_UInt128_uint128 tmp[5U]; |
137 | 0 | for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i) |
138 | 0 | tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); |
139 | 0 | Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp); |
140 | 0 | } |
141 | | |
142 | | static inline void |
143 | | fsquare_times(uint64_t *output, uint64_t *input, uint32_t count) |
144 | 0 | { |
145 | 0 | FStar_UInt128_uint128 tmp[5U]; |
146 | 0 | for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i) |
147 | 0 | tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); |
148 | 0 | Hacl_Curve25519_51_fsquare_times(output, input, tmp, count); |
149 | 0 | } |
150 | | |
151 | | static inline void |
152 | | fsquare_times_inplace(uint64_t *output, uint32_t count) |
153 | 0 | { |
154 | 0 | FStar_UInt128_uint128 tmp[5U]; |
155 | 0 | for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i) |
156 | 0 | tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); |
157 | 0 | Hacl_Curve25519_51_fsquare_times(output, output, tmp, count); |
158 | 0 | } |
159 | | |
160 | | void |
161 | | Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a) |
162 | 0 | { |
163 | 0 | FStar_UInt128_uint128 tmp[10U]; |
164 | 0 | for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i) |
165 | 0 | tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); |
166 | 0 | Hacl_Curve25519_51_finv(out, a, tmp); |
167 | 0 | } |
168 | | |
169 | | static inline void |
170 | | reduce(uint64_t *out) |
171 | 0 | { |
172 | 0 | uint64_t o0 = out[0U]; |
173 | 0 | uint64_t o1 = out[1U]; |
174 | 0 | uint64_t o2 = out[2U]; |
175 | 0 | uint64_t o3 = out[3U]; |
176 | 0 | uint64_t o4 = out[4U]; |
177 | 0 | uint64_t l_ = o0 + (uint64_t)0U; |
178 | 0 | uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU; |
179 | 0 | uint64_t c0 = l_ >> (uint32_t)51U; |
180 | 0 | uint64_t l_0 = o1 + c0; |
181 | 0 | uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU; |
182 | 0 | uint64_t c1 = l_0 >> (uint32_t)51U; |
183 | 0 | uint64_t l_1 = o2 + c1; |
184 | 0 | uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU; |
185 | 0 | uint64_t c2 = l_1 >> (uint32_t)51U; |
186 | 0 | uint64_t l_2 = o3 + c2; |
187 | 0 | uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU; |
188 | 0 | uint64_t c3 = l_2 >> (uint32_t)51U; |
189 | 0 | uint64_t l_3 = o4 + c3; |
190 | 0 | uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU; |
191 | 0 | uint64_t c4 = l_3 >> (uint32_t)51U; |
192 | 0 | uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; |
193 | 0 | uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; |
194 | 0 | uint64_t c5 = l_4 >> (uint32_t)51U; |
195 | 0 | uint64_t f0 = tmp0_; |
196 | 0 | uint64_t f1 = tmp1 + c5; |
197 | 0 | uint64_t f2 = tmp2; |
198 | 0 | uint64_t f3 = tmp3; |
199 | 0 | uint64_t f4 = tmp4; |
200 | 0 | uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0x7ffffffffffedU); |
201 | 0 | uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0x7ffffffffffffU); |
202 | 0 | uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0x7ffffffffffffU); |
203 | 0 | uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7ffffffffffffU); |
204 | 0 | uint64_t m4 = FStar_UInt64_eq_mask(f4, (uint64_t)0x7ffffffffffffU); |
205 | 0 | uint64_t mask = (((m0 & m1) & m2) & m3) & m4; |
206 | 0 | uint64_t f0_ = f0 - (mask & (uint64_t)0x7ffffffffffedU); |
207 | 0 | uint64_t f1_ = f1 - (mask & (uint64_t)0x7ffffffffffffU); |
208 | 0 | uint64_t f2_ = f2 - (mask & (uint64_t)0x7ffffffffffffU); |
209 | 0 | uint64_t f3_ = f3 - (mask & (uint64_t)0x7ffffffffffffU); |
210 | 0 | uint64_t f4_ = f4 - (mask & (uint64_t)0x7ffffffffffffU); |
211 | 0 | uint64_t f01 = f0_; |
212 | 0 | uint64_t f11 = f1_; |
213 | 0 | uint64_t f21 = f2_; |
214 | 0 | uint64_t f31 = f3_; |
215 | 0 | uint64_t f41 = f4_; |
216 | 0 | out[0U] = f01; |
217 | 0 | out[1U] = f11; |
218 | 0 | out[2U] = f21; |
219 | 0 | out[3U] = f31; |
220 | 0 | out[4U] = f41; |
221 | 0 | } |
222 | | |
223 | | void |
224 | | Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input) |
225 | 0 | { |
226 | 0 | uint64_t u64s[4U] = { 0U }; |
227 | 0 | KRML_MAYBE_FOR4(i, |
228 | 0 | (uint32_t)0U, |
229 | 0 | (uint32_t)4U, |
230 | 0 | (uint32_t)1U, |
231 | 0 | uint64_t *os = u64s; |
232 | 0 | uint8_t *bj = input + i * (uint32_t)8U; |
233 | 0 | uint64_t u = load64_le(bj); |
234 | 0 | uint64_t r = u; |
235 | 0 | uint64_t x = r; |
236 | 0 | os[i] = x;); |
237 | 0 | uint64_t u64s3 = u64s[3U]; |
238 | 0 | u64s[3U] = u64s3 & (uint64_t)0x7fffffffffffffffU; |
239 | 0 | output[0U] = u64s[0U] & (uint64_t)0x7ffffffffffffU; |
240 | 0 | output[1U] = u64s[0U] >> (uint32_t)51U | (u64s[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U; |
241 | 0 | output[2U] = u64s[1U] >> (uint32_t)38U | (u64s[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U; |
242 | 0 | output[3U] = u64s[2U] >> (uint32_t)25U | (u64s[3U] & (uint64_t)0xfffU) << (uint32_t)39U; |
243 | 0 | output[4U] = u64s[3U] >> (uint32_t)12U; |
244 | 0 | } |
245 | | |
246 | | void |
247 | | Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input) |
248 | 0 | { |
249 | 0 | uint64_t u64s[4U] = { 0U }; |
250 | 0 | Hacl_Impl_Curve25519_Field51_store_felem(u64s, input); |
251 | 0 | KRML_MAYBE_FOR4(i, |
252 | 0 | (uint32_t)0U, |
253 | 0 | (uint32_t)4U, |
254 | 0 | (uint32_t)1U, |
255 | 0 | store64_le(output + i * (uint32_t)8U, u64s[i]);); |
256 | 0 | } |
257 | | |
258 | | void |
259 | | Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p) |
260 | 0 | { |
261 | 0 | uint64_t tmp[20U] = { 0U }; |
262 | 0 | uint64_t *tmp1 = tmp; |
263 | 0 | uint64_t *tmp20 = tmp + (uint32_t)5U; |
264 | 0 | uint64_t *tmp30 = tmp + (uint32_t)10U; |
265 | 0 | uint64_t *tmp40 = tmp + (uint32_t)15U; |
266 | 0 | uint64_t *x10 = p; |
267 | 0 | uint64_t *y10 = p + (uint32_t)5U; |
268 | 0 | uint64_t *z1 = p + (uint32_t)10U; |
269 | 0 | fsquare(tmp1, x10); |
270 | 0 | fsquare(tmp20, y10); |
271 | 0 | fsum(tmp30, tmp1, tmp20); |
272 | 0 | fdifference(tmp40, tmp1, tmp20); |
273 | 0 | fsquare(tmp1, z1); |
274 | 0 | times_2(tmp1, tmp1); |
275 | 0 | uint64_t *tmp10 = tmp; |
276 | 0 | uint64_t *tmp2 = tmp + (uint32_t)5U; |
277 | 0 | uint64_t *tmp3 = tmp + (uint32_t)10U; |
278 | 0 | uint64_t *tmp4 = tmp + (uint32_t)15U; |
279 | 0 | uint64_t *x1 = p; |
280 | 0 | uint64_t *y1 = p + (uint32_t)5U; |
281 | 0 | fsum(tmp2, x1, y1); |
282 | 0 | fsquare(tmp2, tmp2); |
283 | 0 | Hacl_Bignum25519_reduce_513(tmp3); |
284 | 0 | fdifference(tmp2, tmp3, tmp2); |
285 | 0 | Hacl_Bignum25519_reduce_513(tmp10); |
286 | 0 | Hacl_Bignum25519_reduce_513(tmp4); |
287 | 0 | fsum(tmp10, tmp10, tmp4); |
288 | 0 | uint64_t *tmp_f = tmp; |
289 | 0 | uint64_t *tmp_e = tmp + (uint32_t)5U; |
290 | 0 | uint64_t *tmp_h = tmp + (uint32_t)10U; |
291 | 0 | uint64_t *tmp_g = tmp + (uint32_t)15U; |
292 | 0 | uint64_t *x3 = out; |
293 | 0 | uint64_t *y3 = out + (uint32_t)5U; |
294 | 0 | uint64_t *z3 = out + (uint32_t)10U; |
295 | 0 | uint64_t *t3 = out + (uint32_t)15U; |
296 | 0 | fmul0(x3, tmp_e, tmp_f); |
297 | 0 | fmul0(y3, tmp_g, tmp_h); |
298 | 0 | fmul0(t3, tmp_e, tmp_h); |
299 | 0 | fmul0(z3, tmp_f, tmp_g); |
300 | 0 | } |
301 | | |
302 | | void |
303 | | Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q) |
304 | 0 | { |
305 | 0 | uint64_t tmp[30U] = { 0U }; |
306 | 0 | uint64_t *tmp1 = tmp; |
307 | 0 | uint64_t *tmp20 = tmp + (uint32_t)5U; |
308 | 0 | uint64_t *tmp30 = tmp + (uint32_t)10U; |
309 | 0 | uint64_t *tmp40 = tmp + (uint32_t)15U; |
310 | 0 | uint64_t *x1 = p; |
311 | 0 | uint64_t *y1 = p + (uint32_t)5U; |
312 | 0 | uint64_t *x2 = q; |
313 | 0 | uint64_t *y2 = q + (uint32_t)5U; |
314 | 0 | fdifference(tmp1, y1, x1); |
315 | 0 | fdifference(tmp20, y2, x2); |
316 | 0 | fmul0(tmp30, tmp1, tmp20); |
317 | 0 | fsum(tmp1, y1, x1); |
318 | 0 | fsum(tmp20, y2, x2); |
319 | 0 | fmul0(tmp40, tmp1, tmp20); |
320 | 0 | uint64_t *tmp10 = tmp; |
321 | 0 | uint64_t *tmp2 = tmp + (uint32_t)5U; |
322 | 0 | uint64_t *tmp3 = tmp + (uint32_t)10U; |
323 | 0 | uint64_t *tmp4 = tmp + (uint32_t)15U; |
324 | 0 | uint64_t *tmp5 = tmp + (uint32_t)20U; |
325 | 0 | uint64_t *tmp6 = tmp + (uint32_t)25U; |
326 | 0 | uint64_t *z1 = p + (uint32_t)10U; |
327 | 0 | uint64_t *t1 = p + (uint32_t)15U; |
328 | 0 | uint64_t *z2 = q + (uint32_t)10U; |
329 | 0 | uint64_t *t2 = q + (uint32_t)15U; |
330 | 0 | times_2d(tmp10, t1); |
331 | 0 | fmul0(tmp10, tmp10, t2); |
332 | 0 | times_2(tmp2, z1); |
333 | 0 | fmul0(tmp2, tmp2, z2); |
334 | 0 | fdifference(tmp5, tmp4, tmp3); |
335 | 0 | fdifference(tmp6, tmp2, tmp10); |
336 | 0 | fsum(tmp10, tmp2, tmp10); |
337 | 0 | fsum(tmp2, tmp4, tmp3); |
338 | 0 | uint64_t *tmp_g = tmp; |
339 | 0 | uint64_t *tmp_h = tmp + (uint32_t)5U; |
340 | 0 | uint64_t *tmp_e = tmp + (uint32_t)20U; |
341 | 0 | uint64_t *tmp_f = tmp + (uint32_t)25U; |
342 | 0 | uint64_t *x3 = out; |
343 | 0 | uint64_t *y3 = out + (uint32_t)5U; |
344 | 0 | uint64_t *z3 = out + (uint32_t)10U; |
345 | 0 | uint64_t *t3 = out + (uint32_t)15U; |
346 | 0 | fmul0(x3, tmp_e, tmp_f); |
347 | 0 | fmul0(y3, tmp_g, tmp_h); |
348 | 0 | fmul0(t3, tmp_e, tmp_h); |
349 | 0 | fmul0(z3, tmp_f, tmp_g); |
350 | 0 | } |
351 | | |
352 | | void |
353 | | Hacl_Impl_Ed25519_PointConstants_make_point_inf(uint64_t *b) |
354 | 0 | { |
355 | 0 | uint64_t *x = b; |
356 | 0 | uint64_t *y = b + (uint32_t)5U; |
357 | 0 | uint64_t *z = b + (uint32_t)10U; |
358 | 0 | uint64_t *t = b + (uint32_t)15U; |
359 | 0 | x[0U] = (uint64_t)0U; |
360 | 0 | x[1U] = (uint64_t)0U; |
361 | 0 | x[2U] = (uint64_t)0U; |
362 | 0 | x[3U] = (uint64_t)0U; |
363 | 0 | x[4U] = (uint64_t)0U; |
364 | 0 | y[0U] = (uint64_t)1U; |
365 | 0 | y[1U] = (uint64_t)0U; |
366 | 0 | y[2U] = (uint64_t)0U; |
367 | 0 | y[3U] = (uint64_t)0U; |
368 | 0 | y[4U] = (uint64_t)0U; |
369 | 0 | z[0U] = (uint64_t)1U; |
370 | 0 | z[1U] = (uint64_t)0U; |
371 | 0 | z[2U] = (uint64_t)0U; |
372 | 0 | z[3U] = (uint64_t)0U; |
373 | 0 | z[4U] = (uint64_t)0U; |
374 | 0 | t[0U] = (uint64_t)0U; |
375 | 0 | t[1U] = (uint64_t)0U; |
376 | 0 | t[2U] = (uint64_t)0U; |
377 | 0 | t[3U] = (uint64_t)0U; |
378 | 0 | t[4U] = (uint64_t)0U; |
379 | 0 | } |
380 | | |
381 | | static inline void |
382 | | pow2_252m2(uint64_t *out, uint64_t *z) |
383 | 0 | { |
384 | 0 | uint64_t buf[20U] = { 0U }; |
385 | 0 | uint64_t *a = buf; |
386 | 0 | uint64_t *t00 = buf + (uint32_t)5U; |
387 | 0 | uint64_t *b0 = buf + (uint32_t)10U; |
388 | 0 | uint64_t *c0 = buf + (uint32_t)15U; |
389 | 0 | fsquare_times(a, z, (uint32_t)1U); |
390 | 0 | fsquare_times(t00, a, (uint32_t)2U); |
391 | 0 | fmul0(b0, t00, z); |
392 | 0 | fmul0(a, b0, a); |
393 | 0 | fsquare_times(t00, a, (uint32_t)1U); |
394 | 0 | fmul0(b0, t00, b0); |
395 | 0 | fsquare_times(t00, b0, (uint32_t)5U); |
396 | 0 | fmul0(b0, t00, b0); |
397 | 0 | fsquare_times(t00, b0, (uint32_t)10U); |
398 | 0 | fmul0(c0, t00, b0); |
399 | 0 | fsquare_times(t00, c0, (uint32_t)20U); |
400 | 0 | fmul0(t00, t00, c0); |
401 | 0 | fsquare_times_inplace(t00, (uint32_t)10U); |
402 | 0 | fmul0(b0, t00, b0); |
403 | 0 | fsquare_times(t00, b0, (uint32_t)50U); |
404 | 0 | uint64_t *a0 = buf; |
405 | 0 | uint64_t *t0 = buf + (uint32_t)5U; |
406 | 0 | uint64_t *b = buf + (uint32_t)10U; |
407 | 0 | uint64_t *c = buf + (uint32_t)15U; |
408 | 0 | fsquare_times(a0, z, (uint32_t)1U); |
409 | 0 | fmul0(c, t0, b); |
410 | 0 | fsquare_times(t0, c, (uint32_t)100U); |
411 | 0 | fmul0(t0, t0, c); |
412 | 0 | fsquare_times_inplace(t0, (uint32_t)50U); |
413 | 0 | fmul0(t0, t0, b); |
414 | 0 | fsquare_times_inplace(t0, (uint32_t)2U); |
415 | 0 | fmul0(out, t0, a0); |
416 | 0 | } |
417 | | |
418 | | static inline bool |
419 | | is_0(uint64_t *x) |
420 | 0 | { |
421 | 0 | uint64_t x0 = x[0U]; |
422 | 0 | uint64_t x1 = x[1U]; |
423 | 0 | uint64_t x2 = x[2U]; |
424 | 0 | uint64_t x3 = x[3U]; |
425 | 0 | uint64_t x4 = x[4U]; |
426 | 0 | return x0 == (uint64_t)0U && x1 == (uint64_t)0U && x2 == (uint64_t)0U && x3 == (uint64_t)0U && x4 == (uint64_t)0U; |
427 | 0 | } |
428 | | |
429 | | static inline void |
430 | | mul_modp_sqrt_m1(uint64_t *x) |
431 | 0 | { |
432 | 0 | uint64_t sqrt_m1[5U] = { 0U }; |
433 | 0 | sqrt_m1[0U] = (uint64_t)0x00061b274a0ea0b0U; |
434 | 0 | sqrt_m1[1U] = (uint64_t)0x0000d5a5fc8f189dU; |
435 | 0 | sqrt_m1[2U] = (uint64_t)0x0007ef5e9cbd0c60U; |
436 | 0 | sqrt_m1[3U] = (uint64_t)0x00078595a6804c9eU; |
437 | 0 | sqrt_m1[4U] = (uint64_t)0x0002b8324804fc1dU; |
438 | 0 | fmul0(x, x, sqrt_m1); |
439 | 0 | } |
440 | | |
441 | | static inline bool |
442 | | recover_x(uint64_t *x, uint64_t *y, uint64_t sign) |
443 | 0 | { |
444 | 0 | uint64_t tmp[15U] = { 0U }; |
445 | 0 | uint64_t *x2 = tmp; |
446 | 0 | uint64_t x00 = y[0U]; |
447 | 0 | uint64_t x1 = y[1U]; |
448 | 0 | uint64_t x21 = y[2U]; |
449 | 0 | uint64_t x30 = y[3U]; |
450 | 0 | uint64_t x4 = y[4U]; |
451 | 0 | bool |
452 | 0 | b = |
453 | 0 | x00 >= (uint64_t)0x7ffffffffffedU && x1 == (uint64_t)0x7ffffffffffffU && x21 == (uint64_t)0x7ffffffffffffU && x30 == (uint64_t)0x7ffffffffffffU && x4 == (uint64_t)0x7ffffffffffffU; |
454 | 0 | bool res; |
455 | 0 | if (b) { |
456 | 0 | res = false; |
457 | 0 | } else { |
458 | 0 | uint64_t tmp1[20U] = { 0U }; |
459 | 0 | uint64_t *one = tmp1; |
460 | 0 | uint64_t *y2 = tmp1 + (uint32_t)5U; |
461 | 0 | uint64_t *dyyi = tmp1 + (uint32_t)10U; |
462 | 0 | uint64_t *dyy = tmp1 + (uint32_t)15U; |
463 | 0 | one[0U] = (uint64_t)1U; |
464 | 0 | one[1U] = (uint64_t)0U; |
465 | 0 | one[2U] = (uint64_t)0U; |
466 | 0 | one[3U] = (uint64_t)0U; |
467 | 0 | one[4U] = (uint64_t)0U; |
468 | 0 | fsquare(y2, y); |
469 | 0 | times_d(dyy, y2); |
470 | 0 | fsum(dyy, dyy, one); |
471 | 0 | Hacl_Bignum25519_reduce_513(dyy); |
472 | 0 | Hacl_Bignum25519_inverse(dyyi, dyy); |
473 | 0 | fdifference(x2, y2, one); |
474 | 0 | fmul0(x2, x2, dyyi); |
475 | 0 | reduce(x2); |
476 | 0 | bool x2_is_0 = is_0(x2); |
477 | 0 | uint8_t z; |
478 | 0 | if (x2_is_0) { |
479 | 0 | if (sign == (uint64_t)0U) { |
480 | 0 | x[0U] = (uint64_t)0U; |
481 | 0 | x[1U] = (uint64_t)0U; |
482 | 0 | x[2U] = (uint64_t)0U; |
483 | 0 | x[3U] = (uint64_t)0U; |
484 | 0 | x[4U] = (uint64_t)0U; |
485 | 0 | z = (uint8_t)1U; |
486 | 0 | } else { |
487 | 0 | z = (uint8_t)0U; |
488 | 0 | } |
489 | 0 | } else { |
490 | 0 | z = (uint8_t)2U; |
491 | 0 | } |
492 | 0 | if (z == (uint8_t)0U) { |
493 | 0 | res = false; |
494 | 0 | } else if (z == (uint8_t)1U) { |
495 | 0 | res = true; |
496 | 0 | } else { |
497 | 0 | uint64_t *x210 = tmp; |
498 | 0 | uint64_t *x31 = tmp + (uint32_t)5U; |
499 | 0 | uint64_t *t00 = tmp + (uint32_t)10U; |
500 | 0 | pow2_252m2(x31, x210); |
501 | 0 | fsquare(t00, x31); |
502 | 0 | fdifference(t00, t00, x210); |
503 | 0 | Hacl_Bignum25519_reduce_513(t00); |
504 | 0 | reduce(t00); |
505 | 0 | bool t0_is_0 = is_0(t00); |
506 | 0 | if (!t0_is_0) { |
507 | 0 | mul_modp_sqrt_m1(x31); |
508 | 0 | } |
509 | 0 | uint64_t *x211 = tmp; |
510 | 0 | uint64_t *x3 = tmp + (uint32_t)5U; |
511 | 0 | uint64_t *t01 = tmp + (uint32_t)10U; |
512 | 0 | fsquare(t01, x3); |
513 | 0 | fdifference(t01, t01, x211); |
514 | 0 | Hacl_Bignum25519_reduce_513(t01); |
515 | 0 | reduce(t01); |
516 | 0 | bool z1 = is_0(t01); |
517 | 0 | if (z1 == false) { |
518 | 0 | res = false; |
519 | 0 | } else { |
520 | 0 | uint64_t *x32 = tmp + (uint32_t)5U; |
521 | 0 | uint64_t *t0 = tmp + (uint32_t)10U; |
522 | 0 | reduce(x32); |
523 | 0 | uint64_t x0 = x32[0U]; |
524 | 0 | uint64_t x01 = x0 & (uint64_t)1U; |
525 | 0 | if (!(x01 == sign)) { |
526 | 0 | t0[0U] = (uint64_t)0U; |
527 | 0 | t0[1U] = (uint64_t)0U; |
528 | 0 | t0[2U] = (uint64_t)0U; |
529 | 0 | t0[3U] = (uint64_t)0U; |
530 | 0 | t0[4U] = (uint64_t)0U; |
531 | 0 | fdifference(x32, t0, x32); |
532 | 0 | Hacl_Bignum25519_reduce_513(x32); |
533 | 0 | reduce(x32); |
534 | 0 | } |
535 | 0 | memcpy(x, x32, (uint32_t)5U * sizeof(uint64_t)); |
536 | 0 | res = true; |
537 | 0 | } |
538 | 0 | } |
539 | 0 | } |
540 | 0 | bool res0 = res; |
541 | 0 | return res0; |
542 | 0 | } |
543 | | |
544 | | bool |
545 | | Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *s) |
546 | 0 | { |
547 | 0 | uint64_t tmp[10U] = { 0U }; |
548 | 0 | uint64_t *y = tmp; |
549 | 0 | uint64_t *x = tmp + (uint32_t)5U; |
550 | 0 | uint8_t s31 = s[31U]; |
551 | 0 | uint8_t z = s31 >> (uint32_t)7U; |
552 | 0 | uint64_t sign = (uint64_t)z; |
553 | 0 | Hacl_Bignum25519_load_51(y, s); |
554 | 0 | bool z0 = recover_x(x, y, sign); |
555 | 0 | bool res; |
556 | 0 | if (z0 == false) { |
557 | 0 | res = false; |
558 | 0 | } else { |
559 | 0 | uint64_t *outx = out; |
560 | 0 | uint64_t *outy = out + (uint32_t)5U; |
561 | 0 | uint64_t *outz = out + (uint32_t)10U; |
562 | 0 | uint64_t *outt = out + (uint32_t)15U; |
563 | 0 | memcpy(outx, x, (uint32_t)5U * sizeof(uint64_t)); |
564 | 0 | memcpy(outy, y, (uint32_t)5U * sizeof(uint64_t)); |
565 | 0 | outz[0U] = (uint64_t)1U; |
566 | 0 | outz[1U] = (uint64_t)0U; |
567 | 0 | outz[2U] = (uint64_t)0U; |
568 | 0 | outz[3U] = (uint64_t)0U; |
569 | 0 | outz[4U] = (uint64_t)0U; |
570 | 0 | fmul0(outt, x, y); |
571 | 0 | res = true; |
572 | 0 | } |
573 | 0 | bool res0 = res; |
574 | 0 | return res0; |
575 | 0 | } |
576 | | |
577 | | void |
578 | | Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p) |
579 | 0 | { |
580 | 0 | uint64_t tmp[15U] = { 0U }; |
581 | 0 | uint64_t *x = tmp + (uint32_t)5U; |
582 | 0 | uint64_t *out = tmp + (uint32_t)10U; |
583 | 0 | uint64_t *zinv1 = tmp; |
584 | 0 | uint64_t *x1 = tmp + (uint32_t)5U; |
585 | 0 | uint64_t *out1 = tmp + (uint32_t)10U; |
586 | 0 | uint64_t *px = p; |
587 | 0 | uint64_t *py = p + (uint32_t)5U; |
588 | 0 | uint64_t *pz = p + (uint32_t)10U; |
589 | 0 | Hacl_Bignum25519_inverse(zinv1, pz); |
590 | 0 | fmul0(x1, px, zinv1); |
591 | 0 | reduce(x1); |
592 | 0 | fmul0(out1, py, zinv1); |
593 | 0 | Hacl_Bignum25519_reduce_513(out1); |
594 | 0 | uint64_t x0 = x[0U]; |
595 | 0 | uint64_t b = x0 & (uint64_t)1U; |
596 | 0 | Hacl_Bignum25519_store_51(z, out); |
597 | 0 | uint8_t xbyte = (uint8_t)b; |
598 | 0 | uint8_t o31 = z[31U]; |
599 | 0 | z[31U] = o31 + (xbyte << (uint32_t)7U); |
600 | 0 | } |
601 | | |
602 | | static inline void |
603 | | barrett_reduction(uint64_t *z, uint64_t *t) |
604 | 0 | { |
605 | 0 | uint64_t t0 = t[0U]; |
606 | 0 | uint64_t t1 = t[1U]; |
607 | 0 | uint64_t t2 = t[2U]; |
608 | 0 | uint64_t t3 = t[3U]; |
609 | 0 | uint64_t t4 = t[4U]; |
610 | 0 | uint64_t t5 = t[5U]; |
611 | 0 | uint64_t t6 = t[6U]; |
612 | 0 | uint64_t t7 = t[7U]; |
613 | 0 | uint64_t t8 = t[8U]; |
614 | 0 | uint64_t t9 = t[9U]; |
615 | 0 | uint64_t m00 = (uint64_t)0x12631a5cf5d3edU; |
616 | 0 | uint64_t m10 = (uint64_t)0xf9dea2f79cd658U; |
617 | 0 | uint64_t m20 = (uint64_t)0x000000000014deU; |
618 | 0 | uint64_t m30 = (uint64_t)0x00000000000000U; |
619 | 0 | uint64_t m40 = (uint64_t)0x00000010000000U; |
620 | 0 | uint64_t m0 = m00; |
621 | 0 | uint64_t m1 = m10; |
622 | 0 | uint64_t m2 = m20; |
623 | 0 | uint64_t m3 = m30; |
624 | 0 | uint64_t m4 = m40; |
625 | 0 | uint64_t m010 = (uint64_t)0x9ce5a30a2c131bU; |
626 | 0 | uint64_t m110 = (uint64_t)0x215d086329a7edU; |
627 | 0 | uint64_t m210 = (uint64_t)0xffffffffeb2106U; |
628 | 0 | uint64_t m310 = (uint64_t)0xffffffffffffffU; |
629 | 0 | uint64_t m410 = (uint64_t)0x00000fffffffffU; |
630 | 0 | uint64_t mu0 = m010; |
631 | 0 | uint64_t mu1 = m110; |
632 | 0 | uint64_t mu2 = m210; |
633 | 0 | uint64_t mu3 = m310; |
634 | 0 | uint64_t mu4 = m410; |
635 | 0 | uint64_t y_ = (t5 & (uint64_t)0xffffffU) << (uint32_t)32U; |
636 | 0 | uint64_t x_ = t4 >> (uint32_t)24U; |
637 | 0 | uint64_t z00 = x_ | y_; |
638 | 0 | uint64_t y_0 = (t6 & (uint64_t)0xffffffU) << (uint32_t)32U; |
639 | 0 | uint64_t x_0 = t5 >> (uint32_t)24U; |
640 | 0 | uint64_t z10 = x_0 | y_0; |
641 | 0 | uint64_t y_1 = (t7 & (uint64_t)0xffffffU) << (uint32_t)32U; |
642 | 0 | uint64_t x_1 = t6 >> (uint32_t)24U; |
643 | 0 | uint64_t z20 = x_1 | y_1; |
644 | 0 | uint64_t y_2 = (t8 & (uint64_t)0xffffffU) << (uint32_t)32U; |
645 | 0 | uint64_t x_2 = t7 >> (uint32_t)24U; |
646 | 0 | uint64_t z30 = x_2 | y_2; |
647 | 0 | uint64_t y_3 = (t9 & (uint64_t)0xffffffU) << (uint32_t)32U; |
648 | 0 | uint64_t x_3 = t8 >> (uint32_t)24U; |
649 | 0 | uint64_t z40 = x_3 | y_3; |
650 | 0 | uint64_t q0 = z00; |
651 | 0 | uint64_t q1 = z10; |
652 | 0 | uint64_t q2 = z20; |
653 | 0 | uint64_t q3 = z30; |
654 | 0 | uint64_t q4 = z40; |
655 | 0 | FStar_UInt128_uint128 xy000 = FStar_UInt128_mul_wide(q0, mu0); |
656 | 0 | FStar_UInt128_uint128 xy010 = FStar_UInt128_mul_wide(q0, mu1); |
657 | 0 | FStar_UInt128_uint128 xy020 = FStar_UInt128_mul_wide(q0, mu2); |
658 | 0 | FStar_UInt128_uint128 xy030 = FStar_UInt128_mul_wide(q0, mu3); |
659 | 0 | FStar_UInt128_uint128 xy040 = FStar_UInt128_mul_wide(q0, mu4); |
660 | 0 | FStar_UInt128_uint128 xy100 = FStar_UInt128_mul_wide(q1, mu0); |
661 | 0 | FStar_UInt128_uint128 xy110 = FStar_UInt128_mul_wide(q1, mu1); |
662 | 0 | FStar_UInt128_uint128 xy120 = FStar_UInt128_mul_wide(q1, mu2); |
663 | 0 | FStar_UInt128_uint128 xy130 = FStar_UInt128_mul_wide(q1, mu3); |
664 | 0 | FStar_UInt128_uint128 xy14 = FStar_UInt128_mul_wide(q1, mu4); |
665 | 0 | FStar_UInt128_uint128 xy200 = FStar_UInt128_mul_wide(q2, mu0); |
666 | 0 | FStar_UInt128_uint128 xy210 = FStar_UInt128_mul_wide(q2, mu1); |
667 | 0 | FStar_UInt128_uint128 xy220 = FStar_UInt128_mul_wide(q2, mu2); |
668 | 0 | FStar_UInt128_uint128 xy23 = FStar_UInt128_mul_wide(q2, mu3); |
669 | 0 | FStar_UInt128_uint128 xy24 = FStar_UInt128_mul_wide(q2, mu4); |
670 | 0 | FStar_UInt128_uint128 xy300 = FStar_UInt128_mul_wide(q3, mu0); |
671 | 0 | FStar_UInt128_uint128 xy310 = FStar_UInt128_mul_wide(q3, mu1); |
672 | 0 | FStar_UInt128_uint128 xy32 = FStar_UInt128_mul_wide(q3, mu2); |
673 | 0 | FStar_UInt128_uint128 xy33 = FStar_UInt128_mul_wide(q3, mu3); |
674 | 0 | FStar_UInt128_uint128 xy34 = FStar_UInt128_mul_wide(q3, mu4); |
675 | 0 | FStar_UInt128_uint128 xy400 = FStar_UInt128_mul_wide(q4, mu0); |
676 | 0 | FStar_UInt128_uint128 xy41 = FStar_UInt128_mul_wide(q4, mu1); |
677 | 0 | FStar_UInt128_uint128 xy42 = FStar_UInt128_mul_wide(q4, mu2); |
678 | 0 | FStar_UInt128_uint128 xy43 = FStar_UInt128_mul_wide(q4, mu3); |
679 | 0 | FStar_UInt128_uint128 xy44 = FStar_UInt128_mul_wide(q4, mu4); |
680 | 0 | FStar_UInt128_uint128 z01 = xy000; |
681 | 0 | FStar_UInt128_uint128 z11 = FStar_UInt128_add_mod(xy010, xy100); |
682 | 0 | FStar_UInt128_uint128 z21 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy020, xy110), xy200); |
683 | 0 | FStar_UInt128_uint128 |
684 | 0 | z31 = |
685 | 0 | FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy030, xy120), xy210), |
686 | 0 | xy300); |
687 | 0 | FStar_UInt128_uint128 |
688 | 0 | z41 = |
689 | 0 | FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy040, |
690 | 0 | xy130), |
691 | 0 | xy220), |
692 | 0 | xy310), |
693 | 0 | xy400); |
694 | 0 | FStar_UInt128_uint128 |
695 | 0 | z5 = |
696 | 0 | FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy14, xy23), xy32), |
697 | 0 | xy41); |
698 | 0 | FStar_UInt128_uint128 z6 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42); |
699 | 0 | FStar_UInt128_uint128 z7 = FStar_UInt128_add_mod(xy34, xy43); |
700 | 0 | FStar_UInt128_uint128 z8 = xy44; |
701 | 0 | FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, (uint32_t)56U); |
702 | 0 | FStar_UInt128_uint128 c00 = carry0; |
703 | 0 | FStar_UInt128_uint128 |
704 | 0 | carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U); |
705 | 0 | FStar_UInt128_uint128 c10 = carry1; |
706 | 0 | FStar_UInt128_uint128 |
707 | 0 | carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U); |
708 | 0 | FStar_UInt128_uint128 c20 = carry2; |
709 | 0 | FStar_UInt128_uint128 |
710 | 0 | carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U); |
711 | 0 | FStar_UInt128_uint128 c30 = carry3; |
712 | 0 | FStar_UInt128_uint128 |
713 | 0 | carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U); |
714 | 0 | uint64_t |
715 | 0 | t100 = |
716 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & (uint64_t)0xffffffffffffffU; |
717 | 0 | FStar_UInt128_uint128 c40 = carry4; |
718 | 0 | uint64_t t410 = t100; |
719 | 0 | FStar_UInt128_uint128 |
720 | 0 | carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U); |
721 | 0 | uint64_t |
722 | 0 | t101 = |
723 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & (uint64_t)0xffffffffffffffU; |
724 | 0 | FStar_UInt128_uint128 c5 = carry5; |
725 | 0 | uint64_t t51 = t101; |
726 | 0 | FStar_UInt128_uint128 |
727 | 0 | carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U); |
728 | 0 | uint64_t |
729 | 0 | t102 = |
730 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & (uint64_t)0xffffffffffffffU; |
731 | 0 | FStar_UInt128_uint128 c6 = carry6; |
732 | 0 | uint64_t t61 = t102; |
733 | 0 | FStar_UInt128_uint128 |
734 | 0 | carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U); |
735 | 0 | uint64_t |
736 | 0 | t103 = |
737 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & (uint64_t)0xffffffffffffffU; |
738 | 0 | FStar_UInt128_uint128 c7 = carry7; |
739 | 0 | uint64_t t71 = t103; |
740 | 0 | FStar_UInt128_uint128 |
741 | 0 | carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U); |
742 | 0 | uint64_t |
743 | 0 | t104 = |
744 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & (uint64_t)0xffffffffffffffU; |
745 | 0 | FStar_UInt128_uint128 c8 = carry8; |
746 | 0 | uint64_t t81 = t104; |
747 | 0 | uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8); |
748 | 0 | uint64_t qmu4_ = t410; |
749 | 0 | uint64_t qmu5_ = t51; |
750 | 0 | uint64_t qmu6_ = t61; |
751 | 0 | uint64_t qmu7_ = t71; |
752 | 0 | uint64_t qmu8_ = t81; |
753 | 0 | uint64_t qmu9_ = t91; |
754 | 0 | uint64_t y_4 = (qmu5_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; |
755 | 0 | uint64_t x_4 = qmu4_ >> (uint32_t)40U; |
756 | 0 | uint64_t z02 = x_4 | y_4; |
757 | 0 | uint64_t y_5 = (qmu6_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; |
758 | 0 | uint64_t x_5 = qmu5_ >> (uint32_t)40U; |
759 | 0 | uint64_t z12 = x_5 | y_5; |
760 | 0 | uint64_t y_6 = (qmu7_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; |
761 | 0 | uint64_t x_6 = qmu6_ >> (uint32_t)40U; |
762 | 0 | uint64_t z22 = x_6 | y_6; |
763 | 0 | uint64_t y_7 = (qmu8_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; |
764 | 0 | uint64_t x_7 = qmu7_ >> (uint32_t)40U; |
765 | 0 | uint64_t z32 = x_7 | y_7; |
766 | 0 | uint64_t y_8 = (qmu9_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; |
767 | 0 | uint64_t x_8 = qmu8_ >> (uint32_t)40U; |
768 | 0 | uint64_t z42 = x_8 | y_8; |
769 | 0 | uint64_t qdiv0 = z02; |
770 | 0 | uint64_t qdiv1 = z12; |
771 | 0 | uint64_t qdiv2 = z22; |
772 | 0 | uint64_t qdiv3 = z32; |
773 | 0 | uint64_t qdiv4 = z42; |
774 | 0 | uint64_t r0 = t0; |
775 | 0 | uint64_t r1 = t1; |
776 | 0 | uint64_t r2 = t2; |
777 | 0 | uint64_t r3 = t3; |
778 | 0 | uint64_t r4 = t4 & (uint64_t)0xffffffffffU; |
779 | 0 | FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(qdiv0, m0); |
780 | 0 | FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(qdiv0, m1); |
781 | 0 | FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(qdiv0, m2); |
782 | 0 | FStar_UInt128_uint128 xy03 = FStar_UInt128_mul_wide(qdiv0, m3); |
783 | 0 | FStar_UInt128_uint128 xy04 = FStar_UInt128_mul_wide(qdiv0, m4); |
784 | 0 | FStar_UInt128_uint128 xy10 = FStar_UInt128_mul_wide(qdiv1, m0); |
785 | 0 | FStar_UInt128_uint128 xy11 = FStar_UInt128_mul_wide(qdiv1, m1); |
786 | 0 | FStar_UInt128_uint128 xy12 = FStar_UInt128_mul_wide(qdiv1, m2); |
787 | 0 | FStar_UInt128_uint128 xy13 = FStar_UInt128_mul_wide(qdiv1, m3); |
788 | 0 | FStar_UInt128_uint128 xy20 = FStar_UInt128_mul_wide(qdiv2, m0); |
789 | 0 | FStar_UInt128_uint128 xy21 = FStar_UInt128_mul_wide(qdiv2, m1); |
790 | 0 | FStar_UInt128_uint128 xy22 = FStar_UInt128_mul_wide(qdiv2, m2); |
791 | 0 | FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(qdiv3, m0); |
792 | 0 | FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1); |
793 | 0 | FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0); |
794 | 0 | FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U); |
795 | 0 | uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU; |
796 | 0 | FStar_UInt128_uint128 c0 = carry9; |
797 | 0 | uint64_t t010 = t105; |
798 | 0 | FStar_UInt128_uint128 |
799 | 0 | carry10 = |
800 | 0 | FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0), |
801 | 0 | (uint32_t)56U); |
802 | 0 | uint64_t |
803 | 0 | t106 = |
804 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0)) & (uint64_t)0xffffffffffffffU; |
805 | 0 | FStar_UInt128_uint128 c11 = carry10; |
806 | 0 | uint64_t t110 = t106; |
807 | 0 | FStar_UInt128_uint128 |
808 | 0 | carry11 = |
809 | 0 | FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, |
810 | 0 | xy11), |
811 | 0 | xy20), |
812 | 0 | c11), |
813 | 0 | (uint32_t)56U); |
814 | 0 | uint64_t |
815 | 0 | t107 = |
816 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, |
817 | 0 | xy11), |
818 | 0 | xy20), |
819 | 0 | c11)) & |
820 | 0 | (uint64_t)0xffffffffffffffU; |
821 | 0 | FStar_UInt128_uint128 c21 = carry11; |
822 | 0 | uint64_t t210 = t107; |
823 | 0 | FStar_UInt128_uint128 |
824 | 0 | carry = |
825 | 0 | FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, |
826 | 0 | xy12), |
827 | 0 | xy21), |
828 | 0 | xy30), |
829 | 0 | c21), |
830 | 0 | (uint32_t)56U); |
831 | 0 | uint64_t |
832 | 0 | t108 = |
833 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, |
834 | 0 | xy12), |
835 | 0 | xy21), |
836 | 0 | xy30), |
837 | 0 | c21)) & |
838 | 0 | (uint64_t)0xffffffffffffffU; |
839 | 0 | FStar_UInt128_uint128 c31 = carry; |
840 | 0 | uint64_t t310 = t108; |
841 | 0 | uint64_t |
842 | 0 | t411 = |
843 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy04, |
844 | 0 | xy13), |
845 | 0 | xy22), |
846 | 0 | xy31), |
847 | 0 | xy40), |
848 | 0 | c31)) & |
849 | 0 | (uint64_t)0xffffffffffU; |
850 | 0 | uint64_t qmul0 = t010; |
851 | 0 | uint64_t qmul1 = t110; |
852 | 0 | uint64_t qmul2 = t210; |
853 | 0 | uint64_t qmul3 = t310; |
854 | 0 | uint64_t qmul4 = t411; |
855 | 0 | uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U; |
856 | 0 | uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0; |
857 | 0 | uint64_t c1 = b5; |
858 | 0 | uint64_t t011 = t109; |
859 | 0 | uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U; |
860 | 0 | uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1); |
861 | 0 | uint64_t c2 = b6; |
862 | 0 | uint64_t t111 = t1010; |
863 | 0 | uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U; |
864 | 0 | uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2); |
865 | 0 | uint64_t c3 = b7; |
866 | 0 | uint64_t t211 = t1011; |
867 | 0 | uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U; |
868 | 0 | uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3); |
869 | 0 | uint64_t c4 = b8; |
870 | 0 | uint64_t t311 = t1012; |
871 | 0 | uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U; |
872 | 0 | uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4); |
873 | 0 | uint64_t t412 = t1013; |
874 | 0 | uint64_t s0 = t011; |
875 | 0 | uint64_t s1 = t111; |
876 | 0 | uint64_t s2 = t211; |
877 | 0 | uint64_t s3 = t311; |
878 | 0 | uint64_t s4 = t412; |
879 | 0 | uint64_t m01 = (uint64_t)0x12631a5cf5d3edU; |
880 | 0 | uint64_t m11 = (uint64_t)0xf9dea2f79cd658U; |
881 | 0 | uint64_t m21 = (uint64_t)0x000000000014deU; |
882 | 0 | uint64_t m31 = (uint64_t)0x00000000000000U; |
883 | 0 | uint64_t m41 = (uint64_t)0x00000010000000U; |
884 | 0 | uint64_t y0 = m01; |
885 | 0 | uint64_t y1 = m11; |
886 | 0 | uint64_t y2 = m21; |
887 | 0 | uint64_t y3 = m31; |
888 | 0 | uint64_t y4 = m41; |
889 | 0 | uint64_t b10 = (s0 - y0) >> (uint32_t)63U; |
890 | 0 | uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0; |
891 | 0 | uint64_t b0 = b10; |
892 | 0 | uint64_t t01 = t1014; |
893 | 0 | uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U; |
894 | 0 | uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0); |
895 | 0 | uint64_t b1 = b11; |
896 | 0 | uint64_t t11 = t1015; |
897 | 0 | uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U; |
898 | 0 | uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1); |
899 | 0 | uint64_t b2 = b12; |
900 | 0 | uint64_t t21 = t1016; |
901 | 0 | uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U; |
902 | 0 | uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2); |
903 | 0 | uint64_t b3 = b13; |
904 | 0 | uint64_t t31 = t1017; |
905 | 0 | uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U; |
906 | 0 | uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3); |
907 | 0 | uint64_t b4 = b; |
908 | 0 | uint64_t t41 = t10; |
909 | 0 | uint64_t mask = b4 - (uint64_t)1U; |
910 | 0 | uint64_t z03 = s0 ^ (mask & (s0 ^ t01)); |
911 | 0 | uint64_t z13 = s1 ^ (mask & (s1 ^ t11)); |
912 | 0 | uint64_t z23 = s2 ^ (mask & (s2 ^ t21)); |
913 | 0 | uint64_t z33 = s3 ^ (mask & (s3 ^ t31)); |
914 | 0 | uint64_t z43 = s4 ^ (mask & (s4 ^ t41)); |
915 | 0 | uint64_t z04 = z03; |
916 | 0 | uint64_t z14 = z13; |
917 | 0 | uint64_t z24 = z23; |
918 | 0 | uint64_t z34 = z33; |
919 | 0 | uint64_t z44 = z43; |
920 | 0 | uint64_t o0 = z04; |
921 | 0 | uint64_t o1 = z14; |
922 | 0 | uint64_t o2 = z24; |
923 | 0 | uint64_t o3 = z34; |
924 | 0 | uint64_t o4 = z44; |
925 | 0 | uint64_t z0 = o0; |
926 | 0 | uint64_t z1 = o1; |
927 | 0 | uint64_t z2 = o2; |
928 | 0 | uint64_t z3 = o3; |
929 | 0 | uint64_t z4 = o4; |
930 | 0 | z[0U] = z0; |
931 | 0 | z[1U] = z1; |
932 | 0 | z[2U] = z2; |
933 | 0 | z[3U] = z3; |
934 | 0 | z[4U] = z4; |
935 | 0 | } |
936 | | |
937 | | static inline void |
938 | | mul_modq(uint64_t *out, uint64_t *x, uint64_t *y) |
939 | 0 | { |
940 | 0 | uint64_t tmp[10U] = { 0U }; |
941 | 0 | uint64_t x0 = x[0U]; |
942 | 0 | uint64_t x1 = x[1U]; |
943 | 0 | uint64_t x2 = x[2U]; |
944 | 0 | uint64_t x3 = x[3U]; |
945 | 0 | uint64_t x4 = x[4U]; |
946 | 0 | uint64_t y0 = y[0U]; |
947 | 0 | uint64_t y1 = y[1U]; |
948 | 0 | uint64_t y2 = y[2U]; |
949 | 0 | uint64_t y3 = y[3U]; |
950 | 0 | uint64_t y4 = y[4U]; |
951 | 0 | FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(x0, y0); |
952 | 0 | FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(x0, y1); |
953 | 0 | FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(x0, y2); |
954 | 0 | FStar_UInt128_uint128 xy03 = FStar_UInt128_mul_wide(x0, y3); |
955 | 0 | FStar_UInt128_uint128 xy04 = FStar_UInt128_mul_wide(x0, y4); |
956 | 0 | FStar_UInt128_uint128 xy10 = FStar_UInt128_mul_wide(x1, y0); |
957 | 0 | FStar_UInt128_uint128 xy11 = FStar_UInt128_mul_wide(x1, y1); |
958 | 0 | FStar_UInt128_uint128 xy12 = FStar_UInt128_mul_wide(x1, y2); |
959 | 0 | FStar_UInt128_uint128 xy13 = FStar_UInt128_mul_wide(x1, y3); |
960 | 0 | FStar_UInt128_uint128 xy14 = FStar_UInt128_mul_wide(x1, y4); |
961 | 0 | FStar_UInt128_uint128 xy20 = FStar_UInt128_mul_wide(x2, y0); |
962 | 0 | FStar_UInt128_uint128 xy21 = FStar_UInt128_mul_wide(x2, y1); |
963 | 0 | FStar_UInt128_uint128 xy22 = FStar_UInt128_mul_wide(x2, y2); |
964 | 0 | FStar_UInt128_uint128 xy23 = FStar_UInt128_mul_wide(x2, y3); |
965 | 0 | FStar_UInt128_uint128 xy24 = FStar_UInt128_mul_wide(x2, y4); |
966 | 0 | FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(x3, y0); |
967 | 0 | FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(x3, y1); |
968 | 0 | FStar_UInt128_uint128 xy32 = FStar_UInt128_mul_wide(x3, y2); |
969 | 0 | FStar_UInt128_uint128 xy33 = FStar_UInt128_mul_wide(x3, y3); |
970 | 0 | FStar_UInt128_uint128 xy34 = FStar_UInt128_mul_wide(x3, y4); |
971 | 0 | FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(x4, y0); |
972 | 0 | FStar_UInt128_uint128 xy41 = FStar_UInt128_mul_wide(x4, y1); |
973 | 0 | FStar_UInt128_uint128 xy42 = FStar_UInt128_mul_wide(x4, y2); |
974 | 0 | FStar_UInt128_uint128 xy43 = FStar_UInt128_mul_wide(x4, y3); |
975 | 0 | FStar_UInt128_uint128 xy44 = FStar_UInt128_mul_wide(x4, y4); |
976 | 0 | FStar_UInt128_uint128 z00 = xy00; |
977 | 0 | FStar_UInt128_uint128 z10 = FStar_UInt128_add_mod(xy01, xy10); |
978 | 0 | FStar_UInt128_uint128 z20 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, xy11), xy20); |
979 | 0 | FStar_UInt128_uint128 |
980 | 0 | z30 = |
981 | 0 | FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, xy12), xy21), |
982 | 0 | xy30); |
983 | 0 | FStar_UInt128_uint128 |
984 | 0 | z40 = |
985 | 0 | FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy04, |
986 | 0 | xy13), |
987 | 0 | xy22), |
988 | 0 | xy31), |
989 | 0 | xy40); |
990 | 0 | FStar_UInt128_uint128 |
991 | 0 | z50 = |
992 | 0 | FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy14, xy23), xy32), |
993 | 0 | xy41); |
994 | 0 | FStar_UInt128_uint128 z60 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42); |
995 | 0 | FStar_UInt128_uint128 z70 = FStar_UInt128_add_mod(xy34, xy43); |
996 | 0 | FStar_UInt128_uint128 z80 = xy44; |
997 | 0 | FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, (uint32_t)56U); |
998 | 0 | uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & (uint64_t)0xffffffffffffffU; |
999 | 0 | FStar_UInt128_uint128 c0 = carry0; |
1000 | 0 | uint64_t t0 = t10; |
1001 | 0 | FStar_UInt128_uint128 |
1002 | 0 | carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), (uint32_t)56U); |
1003 | 0 | uint64_t |
1004 | 0 | t11 = |
1005 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0)) & (uint64_t)0xffffffffffffffU; |
1006 | 0 | FStar_UInt128_uint128 c1 = carry1; |
1007 | 0 | uint64_t t1 = t11; |
1008 | 0 | FStar_UInt128_uint128 |
1009 | 0 | carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), (uint32_t)56U); |
1010 | 0 | uint64_t |
1011 | 0 | t12 = |
1012 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1)) & (uint64_t)0xffffffffffffffU; |
1013 | 0 | FStar_UInt128_uint128 c2 = carry2; |
1014 | 0 | uint64_t t2 = t12; |
1015 | 0 | FStar_UInt128_uint128 |
1016 | 0 | carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), (uint32_t)56U); |
1017 | 0 | uint64_t |
1018 | 0 | t13 = |
1019 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2)) & (uint64_t)0xffffffffffffffU; |
1020 | 0 | FStar_UInt128_uint128 c3 = carry3; |
1021 | 0 | uint64_t t3 = t13; |
1022 | 0 | FStar_UInt128_uint128 |
1023 | 0 | carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), (uint32_t)56U); |
1024 | 0 | uint64_t |
1025 | 0 | t14 = |
1026 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3)) & (uint64_t)0xffffffffffffffU; |
1027 | 0 | FStar_UInt128_uint128 c4 = carry4; |
1028 | 0 | uint64_t t4 = t14; |
1029 | 0 | FStar_UInt128_uint128 |
1030 | 0 | carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), (uint32_t)56U); |
1031 | 0 | uint64_t |
1032 | 0 | t15 = |
1033 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4)) & (uint64_t)0xffffffffffffffU; |
1034 | 0 | FStar_UInt128_uint128 c5 = carry5; |
1035 | 0 | uint64_t t5 = t15; |
1036 | 0 | FStar_UInt128_uint128 |
1037 | 0 | carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), (uint32_t)56U); |
1038 | 0 | uint64_t |
1039 | 0 | t16 = |
1040 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5)) & (uint64_t)0xffffffffffffffU; |
1041 | 0 | FStar_UInt128_uint128 c6 = carry6; |
1042 | 0 | uint64_t t6 = t16; |
1043 | 0 | FStar_UInt128_uint128 |
1044 | 0 | carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), (uint32_t)56U); |
1045 | 0 | uint64_t |
1046 | 0 | t17 = |
1047 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6)) & (uint64_t)0xffffffffffffffU; |
1048 | 0 | FStar_UInt128_uint128 c7 = carry7; |
1049 | 0 | uint64_t t7 = t17; |
1050 | 0 | FStar_UInt128_uint128 |
1051 | 0 | carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), (uint32_t)56U); |
1052 | 0 | uint64_t |
1053 | 0 | t = |
1054 | 0 | FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7)) & (uint64_t)0xffffffffffffffU; |
1055 | 0 | FStar_UInt128_uint128 c8 = carry; |
1056 | 0 | uint64_t t8 = t; |
1057 | 0 | uint64_t t9 = FStar_UInt128_uint128_to_uint64(c8); |
1058 | 0 | uint64_t z0 = t0; |
1059 | 0 | uint64_t z1 = t1; |
1060 | 0 | uint64_t z2 = t2; |
1061 | 0 | uint64_t z3 = t3; |
1062 | 0 | uint64_t z4 = t4; |
1063 | 0 | uint64_t z5 = t5; |
1064 | 0 | uint64_t z6 = t6; |
1065 | 0 | uint64_t z7 = t7; |
1066 | 0 | uint64_t z8 = t8; |
1067 | 0 | uint64_t z9 = t9; |
1068 | 0 | tmp[0U] = z0; |
1069 | 0 | tmp[1U] = z1; |
1070 | 0 | tmp[2U] = z2; |
1071 | 0 | tmp[3U] = z3; |
1072 | 0 | tmp[4U] = z4; |
1073 | 0 | tmp[5U] = z5; |
1074 | 0 | tmp[6U] = z6; |
1075 | 0 | tmp[7U] = z7; |
1076 | 0 | tmp[8U] = z8; |
1077 | 0 | tmp[9U] = z9; |
1078 | 0 | barrett_reduction(out, tmp); |
1079 | 0 | } |
1080 | | |
1081 | | static inline void |
1082 | | add_modq(uint64_t *out, uint64_t *x, uint64_t *y) |
1083 | 0 | { |
1084 | 0 | uint64_t x0 = x[0U]; |
1085 | 0 | uint64_t x1 = x[1U]; |
1086 | 0 | uint64_t x2 = x[2U]; |
1087 | 0 | uint64_t x3 = x[3U]; |
1088 | 0 | uint64_t x4 = x[4U]; |
1089 | 0 | uint64_t y0 = y[0U]; |
1090 | 0 | uint64_t y1 = y[1U]; |
1091 | 0 | uint64_t y2 = y[2U]; |
1092 | 0 | uint64_t y3 = y[3U]; |
1093 | 0 | uint64_t y4 = y[4U]; |
1094 | 0 | uint64_t carry0 = (x0 + y0) >> (uint32_t)56U; |
1095 | 0 | uint64_t t0 = (x0 + y0) & (uint64_t)0xffffffffffffffU; |
1096 | 0 | uint64_t t00 = t0; |
1097 | 0 | uint64_t c0 = carry0; |
1098 | 0 | uint64_t carry1 = (x1 + y1 + c0) >> (uint32_t)56U; |
1099 | 0 | uint64_t t1 = (x1 + y1 + c0) & (uint64_t)0xffffffffffffffU; |
1100 | 0 | uint64_t t10 = t1; |
1101 | 0 | uint64_t c1 = carry1; |
1102 | 0 | uint64_t carry2 = (x2 + y2 + c1) >> (uint32_t)56U; |
1103 | 0 | uint64_t t2 = (x2 + y2 + c1) & (uint64_t)0xffffffffffffffU; |
1104 | 0 | uint64_t t20 = t2; |
1105 | 0 | uint64_t c2 = carry2; |
1106 | 0 | uint64_t carry = (x3 + y3 + c2) >> (uint32_t)56U; |
1107 | 0 | uint64_t t3 = (x3 + y3 + c2) & (uint64_t)0xffffffffffffffU; |
1108 | 0 | uint64_t t30 = t3; |
1109 | 0 | uint64_t c3 = carry; |
1110 | 0 | uint64_t t4 = x4 + y4 + c3; |
1111 | 0 | uint64_t m0 = (uint64_t)0x12631a5cf5d3edU; |
1112 | 0 | uint64_t m1 = (uint64_t)0xf9dea2f79cd658U; |
1113 | 0 | uint64_t m2 = (uint64_t)0x000000000014deU; |
1114 | 0 | uint64_t m3 = (uint64_t)0x00000000000000U; |
1115 | 0 | uint64_t m4 = (uint64_t)0x00000010000000U; |
1116 | 0 | uint64_t y01 = m0; |
1117 | 0 | uint64_t y11 = m1; |
1118 | 0 | uint64_t y21 = m2; |
1119 | 0 | uint64_t y31 = m3; |
1120 | 0 | uint64_t y41 = m4; |
1121 | 0 | uint64_t b5 = (t00 - y01) >> (uint32_t)63U; |
1122 | 0 | uint64_t t5 = (b5 << (uint32_t)56U) + t00 - y01; |
1123 | 0 | uint64_t b0 = b5; |
1124 | 0 | uint64_t t01 = t5; |
1125 | 0 | uint64_t b6 = (t10 - (y11 + b0)) >> (uint32_t)63U; |
1126 | 0 | uint64_t t6 = (b6 << (uint32_t)56U) + t10 - (y11 + b0); |
1127 | 0 | uint64_t b1 = b6; |
1128 | 0 | uint64_t t11 = t6; |
1129 | 0 | uint64_t b7 = (t20 - (y21 + b1)) >> (uint32_t)63U; |
1130 | 0 | uint64_t t7 = (b7 << (uint32_t)56U) + t20 - (y21 + b1); |
1131 | 0 | uint64_t b2 = b7; |
1132 | 0 | uint64_t t21 = t7; |
1133 | 0 | uint64_t b8 = (t30 - (y31 + b2)) >> (uint32_t)63U; |
1134 | 0 | uint64_t t8 = (b8 << (uint32_t)56U) + t30 - (y31 + b2); |
1135 | 0 | uint64_t b3 = b8; |
1136 | 0 | uint64_t t31 = t8; |
1137 | 0 | uint64_t b = (t4 - (y41 + b3)) >> (uint32_t)63U; |
1138 | 0 | uint64_t t = (b << (uint32_t)56U) + t4 - (y41 + b3); |
1139 | 0 | uint64_t b4 = b; |
1140 | 0 | uint64_t t41 = t; |
1141 | 0 | uint64_t mask = b4 - (uint64_t)1U; |
1142 | 0 | uint64_t z00 = t00 ^ (mask & (t00 ^ t01)); |
1143 | 0 | uint64_t z10 = t10 ^ (mask & (t10 ^ t11)); |
1144 | 0 | uint64_t z20 = t20 ^ (mask & (t20 ^ t21)); |
1145 | 0 | uint64_t z30 = t30 ^ (mask & (t30 ^ t31)); |
1146 | 0 | uint64_t z40 = t4 ^ (mask & (t4 ^ t41)); |
1147 | 0 | uint64_t z01 = z00; |
1148 | 0 | uint64_t z11 = z10; |
1149 | 0 | uint64_t z21 = z20; |
1150 | 0 | uint64_t z31 = z30; |
1151 | 0 | uint64_t z41 = z40; |
1152 | 0 | uint64_t o0 = z01; |
1153 | 0 | uint64_t o1 = z11; |
1154 | 0 | uint64_t o2 = z21; |
1155 | 0 | uint64_t o3 = z31; |
1156 | 0 | uint64_t o4 = z41; |
1157 | 0 | uint64_t z0 = o0; |
1158 | 0 | uint64_t z1 = o1; |
1159 | 0 | uint64_t z2 = o2; |
1160 | 0 | uint64_t z3 = o3; |
1161 | 0 | uint64_t z4 = o4; |
1162 | 0 | out[0U] = z0; |
1163 | 0 | out[1U] = z1; |
1164 | 0 | out[2U] = z2; |
1165 | 0 | out[3U] = z3; |
1166 | 0 | out[4U] = z4; |
1167 | 0 | } |
1168 | | |
1169 | | static inline bool |
1170 | | gte_q(uint64_t *s) |
1171 | 0 | { |
1172 | 0 | uint64_t s0 = s[0U]; |
1173 | 0 | uint64_t s1 = s[1U]; |
1174 | 0 | uint64_t s2 = s[2U]; |
1175 | 0 | uint64_t s3 = s[3U]; |
1176 | 0 | uint64_t s4 = s[4U]; |
1177 | 0 | if (s4 > (uint64_t)0x00000010000000U) { |
1178 | 0 | return true; |
1179 | 0 | } |
1180 | 0 | if (s4 < (uint64_t)0x00000010000000U) { |
1181 | 0 | return false; |
1182 | 0 | } |
1183 | 0 | if (s3 > (uint64_t)0x00000000000000U) { |
1184 | 0 | return true; |
1185 | 0 | } |
1186 | 0 | if (s2 > (uint64_t)0x000000000014deU) { |
1187 | 0 | return true; |
1188 | 0 | } |
1189 | 0 | if (s2 < (uint64_t)0x000000000014deU) { |
1190 | 0 | return false; |
1191 | 0 | } |
1192 | 0 | if (s1 > (uint64_t)0xf9dea2f79cd658U) { |
1193 | 0 | return true; |
1194 | 0 | } |
1195 | 0 | if (s1 < (uint64_t)0xf9dea2f79cd658U) { |
1196 | 0 | return false; |
1197 | 0 | } |
1198 | 0 | if (s0 >= (uint64_t)0x12631a5cf5d3edU) { |
1199 | 0 | return true; |
1200 | 0 | } |
1201 | 0 | return false; |
1202 | 0 | } |
1203 | | |
1204 | | static inline bool |
1205 | | eq(uint64_t *a, uint64_t *b) |
1206 | 0 | { |
1207 | 0 | uint64_t a0 = a[0U]; |
1208 | 0 | uint64_t a1 = a[1U]; |
1209 | 0 | uint64_t a2 = a[2U]; |
1210 | 0 | uint64_t a3 = a[3U]; |
1211 | 0 | uint64_t a4 = a[4U]; |
1212 | 0 | uint64_t b0 = b[0U]; |
1213 | 0 | uint64_t b1 = b[1U]; |
1214 | 0 | uint64_t b2 = b[2U]; |
1215 | 0 | uint64_t b3 = b[3U]; |
1216 | 0 | uint64_t b4 = b[4U]; |
1217 | 0 | return a0 == b0 && a1 == b1 && a2 == b2 && a3 == b3 && a4 == b4; |
1218 | 0 | } |
1219 | | |
1220 | | bool |
1221 | | Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q) |
1222 | 0 | { |
1223 | 0 | uint64_t tmp[20U] = { 0U }; |
1224 | 0 | uint64_t *pxqz = tmp; |
1225 | 0 | uint64_t *qxpz = tmp + (uint32_t)5U; |
1226 | 0 | fmul0(pxqz, p, q + (uint32_t)10U); |
1227 | 0 | reduce(pxqz); |
1228 | 0 | fmul0(qxpz, q, p + (uint32_t)10U); |
1229 | 0 | reduce(qxpz); |
1230 | 0 | bool b = eq(pxqz, qxpz); |
1231 | 0 | if (b) { |
1232 | 0 | uint64_t *pyqz = tmp + (uint32_t)10U; |
1233 | 0 | uint64_t *qypz = tmp + (uint32_t)15U; |
1234 | 0 | fmul0(pyqz, p + (uint32_t)5U, q + (uint32_t)10U); |
1235 | 0 | reduce(pyqz); |
1236 | 0 | fmul0(qypz, q + (uint32_t)5U, p + (uint32_t)10U); |
1237 | 0 | reduce(qypz); |
1238 | 0 | return eq(pyqz, qypz); |
1239 | 0 | } |
1240 | 0 | return false; |
1241 | 0 | } |
1242 | | |
1243 | | void |
1244 | | Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out) |
1245 | 0 | { |
1246 | 0 | uint64_t zero[5U] = { 0U }; |
1247 | 0 | zero[0U] = (uint64_t)0U; |
1248 | 0 | zero[1U] = (uint64_t)0U; |
1249 | 0 | zero[2U] = (uint64_t)0U; |
1250 | 0 | zero[3U] = (uint64_t)0U; |
1251 | 0 | zero[4U] = (uint64_t)0U; |
1252 | 0 | uint64_t *x = p; |
1253 | 0 | uint64_t *y = p + (uint32_t)5U; |
1254 | 0 | uint64_t *z = p + (uint32_t)10U; |
1255 | 0 | uint64_t *t = p + (uint32_t)15U; |
1256 | 0 | uint64_t *x1 = out; |
1257 | 0 | uint64_t *y1 = out + (uint32_t)5U; |
1258 | 0 | uint64_t *z1 = out + (uint32_t)10U; |
1259 | 0 | uint64_t *t1 = out + (uint32_t)15U; |
1260 | 0 | fdifference(x1, zero, x); |
1261 | 0 | Hacl_Bignum25519_reduce_513(x1); |
1262 | 0 | memcpy(y1, y, (uint32_t)5U * sizeof(uint64_t)); |
1263 | 0 | memcpy(z1, z, (uint32_t)5U * sizeof(uint64_t)); |
1264 | 0 | fdifference(t1, zero, t); |
1265 | 0 | Hacl_Bignum25519_reduce_513(t1); |
1266 | 0 | } |
1267 | | |
1268 | | void |
1269 | | Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t *q) |
1270 | 0 | { |
1271 | 0 | uint64_t bscalar[4U] = { 0U }; |
1272 | 0 | KRML_MAYBE_FOR4(i, |
1273 | 0 | (uint32_t)0U, |
1274 | 0 | (uint32_t)4U, |
1275 | 0 | (uint32_t)1U, |
1276 | 0 | uint64_t *os = bscalar; |
1277 | 0 | uint8_t *bj = scalar + i * (uint32_t)8U; |
1278 | 0 | uint64_t u = load64_le(bj); |
1279 | 0 | uint64_t r = u; |
1280 | 0 | uint64_t x = r; |
1281 | 0 | os[i] = x;); |
1282 | 0 | uint64_t table[320U] = { 0U }; |
1283 | 0 | uint64_t tmp[20U] = { 0U }; |
1284 | 0 | uint64_t *t0 = table; |
1285 | 0 | uint64_t *t1 = table + (uint32_t)20U; |
1286 | 0 | Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0); |
1287 | 0 | memcpy(t1, q, (uint32_t)20U * sizeof(uint64_t)); |
1288 | 0 | KRML_MAYBE_FOR7(i, |
1289 | 0 | (uint32_t)0U, |
1290 | 0 | (uint32_t)7U, |
1291 | 0 | (uint32_t)1U, |
1292 | 0 | uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)20U; |
1293 | 0 | Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11); |
1294 | 0 | memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U, |
1295 | 0 | tmp, |
1296 | 0 | (uint32_t)20U * sizeof(uint64_t)); |
1297 | 0 | uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U; |
1298 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2); |
1299 | 0 | memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U, |
1300 | 0 | tmp, |
1301 | 0 | (uint32_t)20U * sizeof(uint64_t));); |
1302 | 0 | Hacl_Impl_Ed25519_PointConstants_make_point_inf(out); |
1303 | 0 | uint64_t tmp0[20U] = { 0U }; |
1304 | 0 | for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) { |
1305 | 0 | KRML_MAYBE_FOR4(i, |
1306 | 0 | (uint32_t)0U, |
1307 | 0 | (uint32_t)4U, |
1308 | 0 | (uint32_t)1U, |
1309 | 0 | Hacl_Impl_Ed25519_PointDouble_point_double(out, out);); |
1310 | 0 | uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U; |
1311 | 0 | uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar, k, (uint32_t)4U); |
1312 | 0 | memcpy(tmp0, (uint64_t *)table, (uint32_t)20U * sizeof(uint64_t)); |
1313 | 0 | KRML_MAYBE_FOR15( |
1314 | 0 | i1, |
1315 | 0 | (uint32_t)0U, |
1316 | 0 | (uint32_t)15U, |
1317 | 0 | (uint32_t)1U, |
1318 | 0 | uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U)); |
1319 | 0 | const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)20U; |
1320 | 0 | for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++) { |
1321 | 0 | uint64_t *os = tmp0; |
1322 | 0 | uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); |
1323 | 0 | os[i] = x; |
1324 | 0 | }); |
1325 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp0); |
1326 | 0 | } |
1327 | 0 | } |
1328 | | |
1329 | | static inline void |
1330 | | precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp) |
1331 | 0 | { |
1332 | 0 | memcpy(tmp, (uint64_t *)table, (uint32_t)20U * sizeof(uint64_t)); |
1333 | 0 | KRML_MAYBE_FOR15( |
1334 | 0 | i0, |
1335 | 0 | (uint32_t)0U, |
1336 | 0 | (uint32_t)15U, |
1337 | 0 | (uint32_t)1U, |
1338 | 0 | uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U)); |
1339 | 0 | const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)20U; |
1340 | 0 | for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++) { |
1341 | 0 | uint64_t *os = tmp; |
1342 | 0 | uint64_t x = (c & res_j[i]) | (~c & tmp[i]); |
1343 | 0 | os[i] = x; |
1344 | 0 | }); |
1345 | 0 | } |
1346 | | |
1347 | | static inline void |
1348 | | point_mul_g(uint64_t *out, uint8_t *scalar) |
1349 | 0 | { |
1350 | 0 | uint64_t bscalar[4U] = { 0U }; |
1351 | 0 | KRML_MAYBE_FOR4(i, |
1352 | 0 | (uint32_t)0U, |
1353 | 0 | (uint32_t)4U, |
1354 | 0 | (uint32_t)1U, |
1355 | 0 | uint64_t *os = bscalar; |
1356 | 0 | uint8_t *bj = scalar + i * (uint32_t)8U; |
1357 | 0 | uint64_t u = load64_le(bj); |
1358 | 0 | uint64_t r = u; |
1359 | 0 | uint64_t x = r; |
1360 | 0 | os[i] = x;); |
1361 | 0 | uint64_t q1[20U] = { 0U }; |
1362 | 0 | uint64_t *gx = q1; |
1363 | 0 | uint64_t *gy = q1 + (uint32_t)5U; |
1364 | 0 | uint64_t *gz = q1 + (uint32_t)10U; |
1365 | 0 | uint64_t *gt = q1 + (uint32_t)15U; |
1366 | 0 | gx[0U] = (uint64_t)0x00062d608f25d51aU; |
1367 | 0 | gx[1U] = (uint64_t)0x000412a4b4f6592aU; |
1368 | 0 | gx[2U] = (uint64_t)0x00075b7171a4b31dU; |
1369 | 0 | gx[3U] = (uint64_t)0x0001ff60527118feU; |
1370 | 0 | gx[4U] = (uint64_t)0x000216936d3cd6e5U; |
1371 | 0 | gy[0U] = (uint64_t)0x0006666666666658U; |
1372 | 0 | gy[1U] = (uint64_t)0x0004ccccccccccccU; |
1373 | 0 | gy[2U] = (uint64_t)0x0001999999999999U; |
1374 | 0 | gy[3U] = (uint64_t)0x0003333333333333U; |
1375 | 0 | gy[4U] = (uint64_t)0x0006666666666666U; |
1376 | 0 | gz[0U] = (uint64_t)1U; |
1377 | 0 | gz[1U] = (uint64_t)0U; |
1378 | 0 | gz[2U] = (uint64_t)0U; |
1379 | 0 | gz[3U] = (uint64_t)0U; |
1380 | 0 | gz[4U] = (uint64_t)0U; |
1381 | 0 | gt[0U] = (uint64_t)0x00068ab3a5b7dda3U; |
1382 | 0 | gt[1U] = (uint64_t)0x00000eea2a5eadbbU; |
1383 | 0 | gt[2U] = (uint64_t)0x0002af8df483c27eU; |
1384 | 0 | gt[3U] = (uint64_t)0x000332b375274732U; |
1385 | 0 | gt[4U] = (uint64_t)0x00067875f0fd78b7U; |
1386 | 0 | uint64_t |
1387 | 0 | q2[20U] = { |
1388 | 0 | (uint64_t)13559344787725U, (uint64_t)2051621493703448U, (uint64_t)1947659315640708U, |
1389 | 0 | (uint64_t)626856790370168U, (uint64_t)1592804284034836U, (uint64_t)1781728767459187U, |
1390 | 0 | (uint64_t)278818420518009U, (uint64_t)2038030359908351U, (uint64_t)910625973862690U, |
1391 | 0 | (uint64_t)471887343142239U, (uint64_t)1298543306606048U, (uint64_t)794147365642417U, |
1392 | 0 | (uint64_t)129968992326749U, (uint64_t)523140861678572U, (uint64_t)1166419653909231U, |
1393 | 0 | (uint64_t)2009637196928390U, (uint64_t)1288020222395193U, (uint64_t)1007046974985829U, |
1394 | 0 | (uint64_t)208981102651386U, (uint64_t)2074009315253380U |
1395 | 0 | }; |
1396 | 0 | uint64_t |
1397 | 0 | q3[20U] = { |
1398 | 0 | (uint64_t)557549315715710U, (uint64_t)196756086293855U, (uint64_t)846062225082495U, |
1399 | 0 | (uint64_t)1865068224838092U, (uint64_t)991112090754908U, (uint64_t)522916421512828U, |
1400 | 0 | (uint64_t)2098523346722375U, (uint64_t)1135633221747012U, (uint64_t)858420432114866U, |
1401 | 0 | (uint64_t)186358544306082U, (uint64_t)1044420411868480U, (uint64_t)2080052304349321U, |
1402 | 0 | (uint64_t)557301814716724U, (uint64_t)1305130257814057U, (uint64_t)2126012765451197U, |
1403 | 0 | (uint64_t)1441004402875101U, (uint64_t)353948968859203U, (uint64_t)470765987164835U, |
1404 | 0 | (uint64_t)1507675957683570U, (uint64_t)1086650358745097U |
1405 | 0 | }; |
1406 | 0 | uint64_t |
1407 | 0 | q4[20U] = { |
1408 | 0 | (uint64_t)1129953239743101U, (uint64_t)1240339163956160U, (uint64_t)61002583352401U, |
1409 | 0 | (uint64_t)2017604552196030U, (uint64_t)1576867829229863U, (uint64_t)1508654942849389U, |
1410 | 0 | (uint64_t)270111619664077U, (uint64_t)1253097517254054U, (uint64_t)721798270973250U, |
1411 | 0 | (uint64_t)161923365415298U, (uint64_t)828530877526011U, (uint64_t)1494851059386763U, |
1412 | 0 | (uint64_t)662034171193976U, (uint64_t)1315349646974670U, (uint64_t)2199229517308806U, |
1413 | 0 | (uint64_t)497078277852673U, (uint64_t)1310507715989956U, (uint64_t)1881315714002105U, |
1414 | 0 | (uint64_t)2214039404983803U, (uint64_t)1331036420272667U |
1415 | 0 | }; |
1416 | 0 | uint64_t *r1 = bscalar; |
1417 | 0 | uint64_t *r2 = bscalar + (uint32_t)1U; |
1418 | 0 | uint64_t *r3 = bscalar + (uint32_t)2U; |
1419 | 0 | uint64_t *r4 = bscalar + (uint32_t)3U; |
1420 | 0 | Hacl_Impl_Ed25519_PointConstants_make_point_inf(out); |
1421 | 0 | uint64_t tmp[20U] = { 0U }; |
1422 | 0 | KRML_MAYBE_FOR16(i, |
1423 | 0 | (uint32_t)0U, |
1424 | 0 | (uint32_t)16U, |
1425 | 0 | (uint32_t)1U, |
1426 | 0 | KRML_MAYBE_FOR4(i0, |
1427 | 0 | (uint32_t)0U, |
1428 | 0 | (uint32_t)4U, |
1429 | 0 | (uint32_t)1U, |
1430 | 0 | Hacl_Impl_Ed25519_PointDouble_point_double(out, out);); |
1431 | 0 | uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; |
1432 | 0 | uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U); |
1433 | 0 | precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp); |
1434 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp); |
1435 | 0 | uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; |
1436 | 0 | uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U); |
1437 | 0 | precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp); |
1438 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp); |
1439 | 0 | uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; |
1440 | 0 | uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U); |
1441 | 0 | precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp); |
1442 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp); |
1443 | 0 | uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; |
1444 | 0 | uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U); |
1445 | 0 | precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp); |
1446 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);); |
1447 | 0 | KRML_HOST_IGNORE(q2); |
1448 | 0 | KRML_HOST_IGNORE(q3); |
1449 | 0 | KRML_HOST_IGNORE(q4); |
1450 | 0 | } |
1451 | | |
1452 | | static inline void |
1453 | | point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, uint64_t *q2) |
1454 | 0 | { |
1455 | 0 | uint64_t tmp[28U] = { 0U }; |
1456 | 0 | uint64_t *g = tmp; |
1457 | 0 | uint64_t *bscalar1 = tmp + (uint32_t)20U; |
1458 | 0 | uint64_t *bscalar2 = tmp + (uint32_t)24U; |
1459 | 0 | uint64_t *gx = g; |
1460 | 0 | uint64_t *gy = g + (uint32_t)5U; |
1461 | 0 | uint64_t *gz = g + (uint32_t)10U; |
1462 | 0 | uint64_t *gt = g + (uint32_t)15U; |
1463 | 0 | gx[0U] = (uint64_t)0x00062d608f25d51aU; |
1464 | 0 | gx[1U] = (uint64_t)0x000412a4b4f6592aU; |
1465 | 0 | gx[2U] = (uint64_t)0x00075b7171a4b31dU; |
1466 | 0 | gx[3U] = (uint64_t)0x0001ff60527118feU; |
1467 | 0 | gx[4U] = (uint64_t)0x000216936d3cd6e5U; |
1468 | 0 | gy[0U] = (uint64_t)0x0006666666666658U; |
1469 | 0 | gy[1U] = (uint64_t)0x0004ccccccccccccU; |
1470 | 0 | gy[2U] = (uint64_t)0x0001999999999999U; |
1471 | 0 | gy[3U] = (uint64_t)0x0003333333333333U; |
1472 | 0 | gy[4U] = (uint64_t)0x0006666666666666U; |
1473 | 0 | gz[0U] = (uint64_t)1U; |
1474 | 0 | gz[1U] = (uint64_t)0U; |
1475 | 0 | gz[2U] = (uint64_t)0U; |
1476 | 0 | gz[3U] = (uint64_t)0U; |
1477 | 0 | gz[4U] = (uint64_t)0U; |
1478 | 0 | gt[0U] = (uint64_t)0x00068ab3a5b7dda3U; |
1479 | 0 | gt[1U] = (uint64_t)0x00000eea2a5eadbbU; |
1480 | 0 | gt[2U] = (uint64_t)0x0002af8df483c27eU; |
1481 | 0 | gt[3U] = (uint64_t)0x000332b375274732U; |
1482 | 0 | gt[4U] = (uint64_t)0x00067875f0fd78b7U; |
1483 | 0 | KRML_MAYBE_FOR4(i, |
1484 | 0 | (uint32_t)0U, |
1485 | 0 | (uint32_t)4U, |
1486 | 0 | (uint32_t)1U, |
1487 | 0 | uint64_t *os = bscalar1; |
1488 | 0 | uint8_t *bj = scalar1 + i * (uint32_t)8U; |
1489 | 0 | uint64_t u = load64_le(bj); |
1490 | 0 | uint64_t r = u; |
1491 | 0 | uint64_t x = r; |
1492 | 0 | os[i] = x;); |
1493 | 0 | KRML_MAYBE_FOR4(i, |
1494 | 0 | (uint32_t)0U, |
1495 | 0 | (uint32_t)4U, |
1496 | 0 | (uint32_t)1U, |
1497 | 0 | uint64_t *os = bscalar2; |
1498 | 0 | uint8_t *bj = scalar2 + i * (uint32_t)8U; |
1499 | 0 | uint64_t u = load64_le(bj); |
1500 | 0 | uint64_t r = u; |
1501 | 0 | uint64_t x = r; |
1502 | 0 | os[i] = x;); |
1503 | 0 | uint64_t table2[640U] = { 0U }; |
1504 | 0 | uint64_t tmp1[20U] = { 0U }; |
1505 | 0 | uint64_t *t0 = table2; |
1506 | 0 | uint64_t *t1 = table2 + (uint32_t)20U; |
1507 | 0 | Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0); |
1508 | 0 | memcpy(t1, q2, (uint32_t)20U * sizeof(uint64_t)); |
1509 | 0 | KRML_MAYBE_FOR15(i, |
1510 | 0 | (uint32_t)0U, |
1511 | 0 | (uint32_t)15U, |
1512 | 0 | (uint32_t)1U, |
1513 | 0 | uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)20U; |
1514 | 0 | Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, t11); |
1515 | 0 | memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U, |
1516 | 0 | tmp1, |
1517 | 0 | (uint32_t)20U * sizeof(uint64_t)); |
1518 | 0 | uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U; |
1519 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, q2, t2); |
1520 | 0 | memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U, |
1521 | 0 | tmp1, |
1522 | 0 | (uint32_t)20U * sizeof(uint64_t));); |
1523 | 0 | uint64_t tmp10[20U] = { 0U }; |
1524 | 0 | uint32_t i0 = (uint32_t)255U; |
1525 | 0 | uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, i0, (uint32_t)5U); |
1526 | 0 | uint32_t bits_l32 = (uint32_t)bits_c; |
1527 | 0 | const uint64_t |
1528 | 0 | *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)20U; |
1529 | 0 | memcpy(out, (uint64_t *)a_bits_l, (uint32_t)20U * sizeof(uint64_t)); |
1530 | 0 | uint32_t i1 = (uint32_t)255U; |
1531 | 0 | uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, i1, (uint32_t)5U); |
1532 | 0 | uint32_t bits_l320 = (uint32_t)bits_c0; |
1533 | 0 | const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)20U; |
1534 | 0 | memcpy(tmp10, (uint64_t *)a_bits_l0, (uint32_t)20U * sizeof(uint64_t)); |
1535 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp10); |
1536 | 0 | uint64_t tmp11[20U] = { 0U }; |
1537 | 0 | for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++) { |
1538 | 0 | KRML_MAYBE_FOR5(i2, |
1539 | 0 | (uint32_t)0U, |
1540 | 0 | (uint32_t)5U, |
1541 | 0 | (uint32_t)1U, |
1542 | 0 | Hacl_Impl_Ed25519_PointDouble_point_double(out, out);); |
1543 | 0 | uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U; |
1544 | 0 | uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, k, (uint32_t)5U); |
1545 | 0 | uint32_t bits_l321 = (uint32_t)bits_l; |
1546 | 0 | const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)20U; |
1547 | 0 | memcpy(tmp11, (uint64_t *)a_bits_l1, (uint32_t)20U * sizeof(uint64_t)); |
1548 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11); |
1549 | 0 | uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U; |
1550 | 0 | uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, k0, (uint32_t)5U); |
1551 | 0 | uint32_t bits_l322 = (uint32_t)bits_l0; |
1552 | 0 | const uint64_t |
1553 | 0 | *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)20U; |
1554 | 0 | memcpy(tmp11, (uint64_t *)a_bits_l2, (uint32_t)20U * sizeof(uint64_t)); |
1555 | 0 | Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11); |
1556 | 0 | } |
1557 | 0 | } |
1558 | | |
1559 | | static inline void |
1560 | | point_negate_mul_double_g_vartime( |
1561 | | uint64_t *out, |
1562 | | uint8_t *scalar1, |
1563 | | uint8_t *scalar2, |
1564 | | uint64_t *q2) |
1565 | 0 | { |
1566 | 0 | uint64_t q2_neg[20U] = { 0U }; |
1567 | 0 | Hacl_Impl_Ed25519_PointNegate_point_negate(q2, q2_neg); |
1568 | 0 | point_mul_g_double_vartime(out, scalar1, scalar2, q2_neg); |
1569 | 0 | } |
1570 | | |
1571 | | static inline void |
1572 | | store_56(uint8_t *out, uint64_t *b) |
1573 | 0 | { |
1574 | 0 | uint64_t b0 = b[0U]; |
1575 | 0 | uint64_t b1 = b[1U]; |
1576 | 0 | uint64_t b2 = b[2U]; |
1577 | 0 | uint64_t b3 = b[3U]; |
1578 | 0 | uint64_t b4 = b[4U]; |
1579 | 0 | uint32_t b4_ = (uint32_t)b4; |
1580 | 0 | uint8_t *b8 = out; |
1581 | 0 | store64_le(b8, b0); |
1582 | 0 | uint8_t *b80 = out + (uint32_t)7U; |
1583 | 0 | store64_le(b80, b1); |
1584 | 0 | uint8_t *b81 = out + (uint32_t)14U; |
1585 | 0 | store64_le(b81, b2); |
1586 | 0 | uint8_t *b82 = out + (uint32_t)21U; |
1587 | 0 | store64_le(b82, b3); |
1588 | 0 | store32_le(out + (uint32_t)28U, b4_); |
1589 | 0 | } |
1590 | | |
1591 | | static inline void |
1592 | | load_64_bytes(uint64_t *out, uint8_t *b) |
1593 | 0 | { |
1594 | 0 | uint8_t *b80 = b; |
1595 | 0 | uint64_t u = load64_le(b80); |
1596 | 0 | uint64_t z = u; |
1597 | 0 | uint64_t b0 = z & (uint64_t)0xffffffffffffffU; |
1598 | 0 | uint8_t *b81 = b + (uint32_t)7U; |
1599 | 0 | uint64_t u0 = load64_le(b81); |
1600 | 0 | uint64_t z0 = u0; |
1601 | 0 | uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU; |
1602 | 0 | uint8_t *b82 = b + (uint32_t)14U; |
1603 | 0 | uint64_t u1 = load64_le(b82); |
1604 | 0 | uint64_t z1 = u1; |
1605 | 0 | uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU; |
1606 | 0 | uint8_t *b83 = b + (uint32_t)21U; |
1607 | 0 | uint64_t u2 = load64_le(b83); |
1608 | 0 | uint64_t z2 = u2; |
1609 | 0 | uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU; |
1610 | 0 | uint8_t *b84 = b + (uint32_t)28U; |
1611 | 0 | uint64_t u3 = load64_le(b84); |
1612 | 0 | uint64_t z3 = u3; |
1613 | 0 | uint64_t b4 = z3 & (uint64_t)0xffffffffffffffU; |
1614 | 0 | uint8_t *b85 = b + (uint32_t)35U; |
1615 | 0 | uint64_t u4 = load64_le(b85); |
1616 | 0 | uint64_t z4 = u4; |
1617 | 0 | uint64_t b5 = z4 & (uint64_t)0xffffffffffffffU; |
1618 | 0 | uint8_t *b86 = b + (uint32_t)42U; |
1619 | 0 | uint64_t u5 = load64_le(b86); |
1620 | 0 | uint64_t z5 = u5; |
1621 | 0 | uint64_t b6 = z5 & (uint64_t)0xffffffffffffffU; |
1622 | 0 | uint8_t *b87 = b + (uint32_t)49U; |
1623 | 0 | uint64_t u6 = load64_le(b87); |
1624 | 0 | uint64_t z6 = u6; |
1625 | 0 | uint64_t b7 = z6 & (uint64_t)0xffffffffffffffU; |
1626 | 0 | uint8_t *b8 = b + (uint32_t)56U; |
1627 | 0 | uint64_t u7 = load64_le(b8); |
1628 | 0 | uint64_t z7 = u7; |
1629 | 0 | uint64_t b88 = z7 & (uint64_t)0xffffffffffffffU; |
1630 | 0 | uint8_t b63 = b[63U]; |
1631 | 0 | uint64_t b9 = (uint64_t)b63; |
1632 | 0 | out[0U] = b0; |
1633 | 0 | out[1U] = b1; |
1634 | 0 | out[2U] = b2; |
1635 | 0 | out[3U] = b3; |
1636 | 0 | out[4U] = b4; |
1637 | 0 | out[5U] = b5; |
1638 | 0 | out[6U] = b6; |
1639 | 0 | out[7U] = b7; |
1640 | 0 | out[8U] = b88; |
1641 | 0 | out[9U] = b9; |
1642 | 0 | } |
1643 | | |
1644 | | static inline void |
1645 | | load_32_bytes(uint64_t *out, uint8_t *b) |
1646 | 0 | { |
1647 | 0 | uint8_t *b80 = b; |
1648 | 0 | uint64_t u0 = load64_le(b80); |
1649 | 0 | uint64_t z = u0; |
1650 | 0 | uint64_t b0 = z & (uint64_t)0xffffffffffffffU; |
1651 | 0 | uint8_t *b81 = b + (uint32_t)7U; |
1652 | 0 | uint64_t u1 = load64_le(b81); |
1653 | 0 | uint64_t z0 = u1; |
1654 | 0 | uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU; |
1655 | 0 | uint8_t *b82 = b + (uint32_t)14U; |
1656 | 0 | uint64_t u2 = load64_le(b82); |
1657 | 0 | uint64_t z1 = u2; |
1658 | 0 | uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU; |
1659 | 0 | uint8_t *b8 = b + (uint32_t)21U; |
1660 | 0 | uint64_t u3 = load64_le(b8); |
1661 | 0 | uint64_t z2 = u3; |
1662 | 0 | uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU; |
1663 | 0 | uint32_t u = load32_le(b + (uint32_t)28U); |
1664 | 0 | uint32_t b4 = u; |
1665 | 0 | uint64_t b41 = (uint64_t)b4; |
1666 | 0 | out[0U] = b0; |
1667 | 0 | out[1U] = b1; |
1668 | 0 | out[2U] = b2; |
1669 | 0 | out[3U] = b3; |
1670 | 0 | out[4U] = b41; |
1671 | 0 | } |
1672 | | |
1673 | | static inline void |
1674 | | sha512_modq_pre(uint64_t *out, uint8_t *prefix, uint32_t len, uint8_t *input) |
1675 | 0 | { |
1676 | 0 | uint64_t tmp[10U] = { 0U }; |
1677 | 0 | uint8_t hash[64U] = { 0U }; |
1678 | 0 | sha512_pre_msg(hash, prefix, len, input); |
1679 | 0 | load_64_bytes(tmp, hash); |
1680 | 0 | barrett_reduction(out, tmp); |
1681 | 0 | } |
1682 | | |
1683 | | static inline void |
1684 | | sha512_modq_pre_pre2( |
1685 | | uint64_t *out, |
1686 | | uint8_t *prefix, |
1687 | | uint8_t *prefix2, |
1688 | | uint32_t len, |
1689 | | uint8_t *input) |
1690 | 0 | { |
1691 | 0 | uint64_t tmp[10U] = { 0U }; |
1692 | 0 | uint8_t hash[64U] = { 0U }; |
1693 | 0 | sha512_pre_pre2_msg(hash, prefix, prefix2, len, input); |
1694 | 0 | load_64_bytes(tmp, hash); |
1695 | 0 | barrett_reduction(out, tmp); |
1696 | 0 | } |
1697 | | |
1698 | | static inline void |
1699 | | point_mul_g_compress(uint8_t *out, uint8_t *s) |
1700 | 0 | { |
1701 | 0 | uint64_t tmp[20U] = { 0U }; |
1702 | 0 | point_mul_g(tmp, s); |
1703 | 0 | Hacl_Impl_Ed25519_PointCompress_point_compress(out, tmp); |
1704 | 0 | } |
1705 | | |
1706 | | static inline void |
1707 | | secret_expand(uint8_t *expanded, uint8_t *secret) |
1708 | 0 | { |
1709 | 0 | Hacl_Streaming_SHA2_hash_512(secret, (uint32_t)32U, expanded); |
1710 | 0 | uint8_t *h_low = expanded; |
1711 | 0 | uint8_t h_low0 = h_low[0U]; |
1712 | 0 | uint8_t h_low31 = h_low[31U]; |
1713 | 0 | h_low[0U] = h_low0 & (uint8_t)0xf8U; |
1714 | 0 | h_low[31U] = (h_low31 & (uint8_t)127U) | (uint8_t)64U; |
1715 | 0 | } |
1716 | | |
1717 | | /******************************************************************************** |
1718 | | Verified C library for EdDSA signing and verification on the edwards25519 curve. |
1719 | | ********************************************************************************/ |
1720 | | |
1721 | | /** |
1722 | | Compute the public key from the private key. |
1723 | | |
1724 | | The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. |
1725 | | The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. |
1726 | | */ |
1727 | | void |
1728 | | Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) |
1729 | 0 | { |
1730 | 0 | uint8_t expanded_secret[64U] = { 0U }; |
1731 | 0 | secret_expand(expanded_secret, private_key); |
1732 | 0 | uint8_t *a = expanded_secret; |
1733 | 0 | point_mul_g_compress(public_key, a); |
1734 | 0 | } |
1735 | | |
1736 | | /** |
1737 | | Compute the expanded keys for an Ed25519 signature. |
1738 | | |
1739 | | The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. |
1740 | | The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. |
1741 | | |
1742 | | If one needs to sign several messages under the same private key, it is more efficient |
1743 | | to call `expand_keys` only once and `sign_expanded` multiple times, for each message. |
1744 | | */ |
1745 | | void |
1746 | | Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key) |
1747 | 0 | { |
1748 | 0 | uint8_t *public_key = expanded_keys; |
1749 | 0 | uint8_t *s_prefix = expanded_keys + (uint32_t)32U; |
1750 | 0 | uint8_t *s = expanded_keys + (uint32_t)32U; |
1751 | 0 | secret_expand(s_prefix, private_key); |
1752 | 0 | point_mul_g_compress(public_key, s); |
1753 | 0 | } |
1754 | | |
1755 | | /** |
1756 | | Create an Ed25519 signature with the (precomputed) expanded keys. |
1757 | | |
1758 | | The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. |
1759 | | The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. |
1760 | | The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. |
1761 | | |
1762 | | The argument `expanded_keys` is obtained through `expand_keys`. |
1763 | | |
1764 | | If one needs to sign several messages under the same private key, it is more efficient |
1765 | | to call `expand_keys` only once and `sign_expanded` multiple times, for each message. |
1766 | | */ |
1767 | | void |
1768 | | Hacl_Ed25519_sign_expanded( |
1769 | | uint8_t *signature, |
1770 | | uint8_t *expanded_keys, |
1771 | | uint32_t msg_len, |
1772 | | uint8_t *msg) |
1773 | 0 | { |
1774 | 0 | uint8_t *rs = signature; |
1775 | 0 | uint8_t *ss = signature + (uint32_t)32U; |
1776 | 0 | uint64_t rq[5U] = { 0U }; |
1777 | 0 | uint64_t hq[5U] = { 0U }; |
1778 | 0 | uint8_t rb[32U] = { 0U }; |
1779 | 0 | uint8_t *public_key = expanded_keys; |
1780 | 0 | uint8_t *s = expanded_keys + (uint32_t)32U; |
1781 | 0 | uint8_t *prefix = expanded_keys + (uint32_t)64U; |
1782 | 0 | sha512_modq_pre(rq, prefix, msg_len, msg); |
1783 | 0 | store_56(rb, rq); |
1784 | 0 | point_mul_g_compress(rs, rb); |
1785 | 0 | sha512_modq_pre_pre2(hq, rs, public_key, msg_len, msg); |
1786 | 0 | uint64_t aq[5U] = { 0U }; |
1787 | 0 | load_32_bytes(aq, s); |
1788 | 0 | mul_modq(aq, hq, aq); |
1789 | 0 | add_modq(aq, rq, aq); |
1790 | 0 | store_56(ss, aq); |
1791 | 0 | } |
1792 | | |
1793 | | /** |
1794 | | Create an Ed25519 signature. |
1795 | | |
1796 | | The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. |
1797 | | The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. |
1798 | | The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. |
1799 | | |
1800 | | The function first calls `expand_keys` and then invokes `sign_expanded`. |
1801 | | |
1802 | | If one needs to sign several messages under the same private key, it is more efficient |
1803 | | to call `expand_keys` only once and `sign_expanded` multiple times, for each message. |
1804 | | */ |
1805 | | void |
1806 | | Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, uint8_t *msg) |
1807 | 0 | { |
1808 | 0 | uint8_t expanded_keys[96U] = { 0U }; |
1809 | 0 | Hacl_Ed25519_expand_keys(expanded_keys, private_key); |
1810 | 0 | Hacl_Ed25519_sign_expanded(signature, expanded_keys, msg_len, msg); |
1811 | 0 | } |
1812 | | |
1813 | | /** |
1814 | | Verify an Ed25519 signature. |
1815 | | |
1816 | | The function returns `true` if the signature is valid and `false` otherwise. |
1817 | | |
1818 | | The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. |
1819 | | The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. |
1820 | | The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. |
1821 | | */ |
1822 | | bool |
1823 | | Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature) |
1824 | 0 | { |
1825 | 0 | uint64_t a_[20U] = { 0U }; |
1826 | 0 | bool b = Hacl_Impl_Ed25519_PointDecompress_point_decompress(a_, public_key); |
1827 | 0 | if (b) { |
1828 | 0 | uint64_t r_[20U] = { 0U }; |
1829 | 0 | uint8_t *rs = signature; |
1830 | 0 | bool b_ = Hacl_Impl_Ed25519_PointDecompress_point_decompress(r_, rs); |
1831 | 0 | if (b_) { |
1832 | 0 | uint8_t hb[32U] = { 0U }; |
1833 | 0 | uint8_t *rs1 = signature; |
1834 | 0 | uint8_t *sb = signature + (uint32_t)32U; |
1835 | 0 | uint64_t tmp[5U] = { 0U }; |
1836 | 0 | load_32_bytes(tmp, sb); |
1837 | 0 | bool b1 = gte_q(tmp); |
1838 | 0 | bool b10 = b1; |
1839 | 0 | if (b10) { |
1840 | 0 | return false; |
1841 | 0 | } |
1842 | 0 | uint64_t tmp0[5U] = { 0U }; |
1843 | 0 | sha512_modq_pre_pre2(tmp0, rs1, public_key, msg_len, msg); |
1844 | 0 | store_56(hb, tmp0); |
1845 | 0 | uint64_t exp_d[20U] = { 0U }; |
1846 | 0 | point_negate_mul_double_g_vartime(exp_d, sb, hb, a_); |
1847 | 0 | bool b2 = Hacl_Impl_Ed25519_PointEqual_point_equal(exp_d, r_); |
1848 | 0 | return b2; |
1849 | 0 | } |
1850 | 0 | return false; |
1851 | 0 | } |
1852 | 0 | return false; |
1853 | 0 | } |