/src/mbedtls/3rdparty/everest/library/Hacl_Curve25519.c
Line | Count | Source |
1 | | /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. |
2 | | Licensed under the Apache 2.0 License. */ |
3 | | |
4 | | /* This file was generated by KreMLin <https://github.com/FStarLang/kremlin> |
5 | | * KreMLin invocation: /mnt/e/everest/verify/kremlin/krml -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -fbuiltin-uint128 -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -I /mnt/e/everest/verify/hacl-star/code/lib/kremlin -I /mnt/e/everest/verify/kremlin/kremlib/compat -I /mnt/e/everest/verify/hacl-star/specs -I /mnt/e/everest/verify/hacl-star/specs/old -I . -ccopt -march=native -verbose -ldopt -flto -tmpdir x25519-c -I ../bignum -bundle Hacl.Curve25519=* -minimal -add-include "kremlib.h" -skip-compilation x25519-c/out.krml -o x25519-c/Hacl_Curve25519.c |
6 | | * F* version: 059db0c8 |
7 | | * KreMLin version: 916c37ac |
8 | | */ |
9 | | |
10 | | |
11 | | #include "Hacl_Curve25519.h" |
12 | | |
13 | | extern uint64_t FStar_UInt64_eq_mask(uint64_t x0, uint64_t x1); |
14 | | |
15 | | extern uint64_t FStar_UInt64_gte_mask(uint64_t x0, uint64_t x1); |
16 | | |
17 | | extern uint128_t FStar_UInt128_add(uint128_t x0, uint128_t x1); |
18 | | |
19 | | extern uint128_t FStar_UInt128_add_mod(uint128_t x0, uint128_t x1); |
20 | | |
21 | | extern uint128_t FStar_UInt128_logand(uint128_t x0, uint128_t x1); |
22 | | |
23 | | extern uint128_t FStar_UInt128_shift_right(uint128_t x0, uint32_t x1); |
24 | | |
25 | | extern uint128_t FStar_UInt128_uint64_to_uint128(uint64_t x0); |
26 | | |
27 | | extern uint64_t FStar_UInt128_uint128_to_uint64(uint128_t x0); |
28 | | |
29 | | extern uint128_t FStar_UInt128_mul_wide(uint64_t x0, uint64_t x1); |
30 | | |
31 | | static void Hacl_Bignum_Modulo_carry_top(uint64_t *b) |
32 | 0 | { |
33 | 0 | uint64_t b4 = b[4U]; |
34 | 0 | uint64_t b0 = b[0U]; |
35 | 0 | uint64_t b4_ = b4 & (uint64_t)0x7ffffffffffffU; |
36 | 0 | uint64_t b0_ = b0 + (uint64_t)19U * (b4 >> (uint32_t)51U); |
37 | 0 | b[4U] = b4_; |
38 | 0 | b[0U] = b0_; |
39 | 0 | } |
40 | | |
41 | | inline static void Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, uint128_t *input) |
42 | 0 | { |
43 | 0 | uint32_t i; |
44 | 0 | for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U) |
45 | 0 | { |
46 | 0 | uint128_t xi = input[i]; |
47 | 0 | output[i] = (uint64_t)xi; |
48 | 0 | } |
49 | 0 | } |
50 | | |
51 | | inline static void |
52 | | Hacl_Bignum_Fproduct_sum_scalar_multiplication_(uint128_t *output, uint64_t *input, uint64_t s) |
53 | 0 | { |
54 | 0 | uint32_t i; |
55 | 0 | for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U) |
56 | 0 | { |
57 | 0 | uint128_t xi = output[i]; |
58 | 0 | uint64_t yi = input[i]; |
59 | 0 | output[i] = xi + (uint128_t)yi * s; |
60 | 0 | } |
61 | 0 | } |
62 | | |
63 | | inline static void Hacl_Bignum_Fproduct_carry_wide_(uint128_t *tmp) |
64 | 0 | { |
65 | 0 | uint32_t i; |
66 | 0 | for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) |
67 | 0 | { |
68 | 0 | uint32_t ctr = i; |
69 | 0 | uint128_t tctr = tmp[ctr]; |
70 | 0 | uint128_t tctrp1 = tmp[ctr + (uint32_t)1U]; |
71 | 0 | uint64_t r0 = (uint64_t)tctr & (uint64_t)0x7ffffffffffffU; |
72 | 0 | uint128_t c = tctr >> (uint32_t)51U; |
73 | 0 | tmp[ctr] = (uint128_t)r0; |
74 | 0 | tmp[ctr + (uint32_t)1U] = tctrp1 + c; |
75 | 0 | } |
76 | 0 | } |
77 | | |
78 | | inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output) |
79 | 0 | { |
80 | 0 | uint64_t tmp = output[4U]; |
81 | 0 | uint64_t b0; |
82 | 0 | { |
83 | 0 | uint32_t i; |
84 | 0 | for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) |
85 | 0 | { |
86 | 0 | uint32_t ctr = (uint32_t)5U - i - (uint32_t)1U; |
87 | 0 | uint64_t z = output[ctr - (uint32_t)1U]; |
88 | 0 | output[ctr] = z; |
89 | 0 | } |
90 | 0 | } |
91 | 0 | output[0U] = tmp; |
92 | 0 | b0 = output[0U]; |
93 | 0 | output[0U] = (uint64_t)19U * b0; |
94 | 0 | } |
95 | | |
96 | | static void |
97 | | Hacl_Bignum_Fmul_mul_shift_reduce_(uint128_t *output, uint64_t *input, uint64_t *input2) |
98 | 0 | { |
99 | 0 | uint32_t i; |
100 | 0 | uint64_t input2i; |
101 | 0 | { |
102 | 0 | uint32_t i0; |
103 | 0 | for (i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U) |
104 | 0 | { |
105 | 0 | uint64_t input2i0 = input2[i0]; |
106 | 0 | Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i0); |
107 | 0 | Hacl_Bignum_Fmul_shift_reduce(input); |
108 | 0 | } |
109 | 0 | } |
110 | 0 | i = (uint32_t)4U; |
111 | 0 | input2i = input2[i]; |
112 | 0 | Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i); |
113 | 0 | } |
114 | | |
115 | | inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2) |
116 | 0 | { |
117 | 0 | uint64_t tmp[5U] = { 0U }; |
118 | 0 | memcpy(tmp, input, (uint32_t)5U * sizeof input[0U]); |
119 | 0 | KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U); |
120 | 0 | { |
121 | 0 | uint128_t t[5U]; |
122 | 0 | { |
123 | 0 | uint32_t _i; |
124 | 0 | for (_i = 0U; _i < (uint32_t)5U; ++_i) |
125 | 0 | t[_i] = (uint128_t)(uint64_t)0U; |
126 | 0 | } |
127 | 0 | { |
128 | 0 | uint128_t b4; |
129 | 0 | uint128_t b0; |
130 | 0 | uint128_t b4_; |
131 | 0 | uint128_t b0_; |
132 | 0 | uint64_t i0; |
133 | 0 | uint64_t i1; |
134 | 0 | uint64_t i0_; |
135 | 0 | uint64_t i1_; |
136 | 0 | Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input2); |
137 | 0 | Hacl_Bignum_Fproduct_carry_wide_(t); |
138 | 0 | b4 = t[4U]; |
139 | 0 | b0 = t[0U]; |
140 | 0 | b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU; |
141 | 0 | b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U); |
142 | 0 | t[4U] = b4_; |
143 | 0 | t[0U] = b0_; |
144 | 0 | Hacl_Bignum_Fproduct_copy_from_wide_(output, t); |
145 | 0 | i0 = output[0U]; |
146 | 0 | i1 = output[1U]; |
147 | 0 | i0_ = i0 & (uint64_t)0x7ffffffffffffU; |
148 | 0 | i1_ = i1 + (i0 >> (uint32_t)51U); |
149 | 0 | output[0U] = i0_; |
150 | 0 | output[1U] = i1_; |
151 | 0 | } |
152 | 0 | } |
153 | 0 | } |
154 | | |
155 | | inline static void Hacl_Bignum_Fsquare_fsquare__(uint128_t *tmp, uint64_t *output) |
156 | 0 | { |
157 | 0 | uint64_t r0 = output[0U]; |
158 | 0 | uint64_t r1 = output[1U]; |
159 | 0 | uint64_t r2 = output[2U]; |
160 | 0 | uint64_t r3 = output[3U]; |
161 | 0 | uint64_t r4 = output[4U]; |
162 | 0 | uint64_t d0 = r0 * (uint64_t)2U; |
163 | 0 | uint64_t d1 = r1 * (uint64_t)2U; |
164 | 0 | uint64_t d2 = r2 * (uint64_t)2U * (uint64_t)19U; |
165 | 0 | uint64_t d419 = r4 * (uint64_t)19U; |
166 | 0 | uint64_t d4 = d419 * (uint64_t)2U; |
167 | 0 | uint128_t s0 = (uint128_t)r0 * r0 + (uint128_t)d4 * r1 + (uint128_t)d2 * r3; |
168 | 0 | uint128_t s1 = (uint128_t)d0 * r1 + (uint128_t)d4 * r2 + (uint128_t)(r3 * (uint64_t)19U) * r3; |
169 | 0 | uint128_t s2 = (uint128_t)d0 * r2 + (uint128_t)r1 * r1 + (uint128_t)d4 * r3; |
170 | 0 | uint128_t s3 = (uint128_t)d0 * r3 + (uint128_t)d1 * r2 + (uint128_t)r4 * d419; |
171 | 0 | uint128_t s4 = (uint128_t)d0 * r4 + (uint128_t)d1 * r3 + (uint128_t)r2 * r2; |
172 | 0 | tmp[0U] = s0; |
173 | 0 | tmp[1U] = s1; |
174 | 0 | tmp[2U] = s2; |
175 | 0 | tmp[3U] = s3; |
176 | 0 | tmp[4U] = s4; |
177 | 0 | } |
178 | | |
179 | | inline static void Hacl_Bignum_Fsquare_fsquare_(uint128_t *tmp, uint64_t *output) |
180 | 0 | { |
181 | 0 | uint128_t b4; |
182 | 0 | uint128_t b0; |
183 | 0 | uint128_t b4_; |
184 | 0 | uint128_t b0_; |
185 | 0 | uint64_t i0; |
186 | 0 | uint64_t i1; |
187 | 0 | uint64_t i0_; |
188 | 0 | uint64_t i1_; |
189 | 0 | Hacl_Bignum_Fsquare_fsquare__(tmp, output); |
190 | 0 | Hacl_Bignum_Fproduct_carry_wide_(tmp); |
191 | 0 | b4 = tmp[4U]; |
192 | 0 | b0 = tmp[0U]; |
193 | 0 | b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU; |
194 | 0 | b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U); |
195 | 0 | tmp[4U] = b4_; |
196 | 0 | tmp[0U] = b0_; |
197 | 0 | Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp); |
198 | 0 | i0 = output[0U]; |
199 | 0 | i1 = output[1U]; |
200 | 0 | i0_ = i0 & (uint64_t)0x7ffffffffffffU; |
201 | 0 | i1_ = i1 + (i0 >> (uint32_t)51U); |
202 | 0 | output[0U] = i0_; |
203 | 0 | output[1U] = i1_; |
204 | 0 | } |
205 | | |
206 | | static void |
207 | | Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, uint128_t *tmp, uint32_t count1) |
208 | 0 | { |
209 | 0 | uint32_t i; |
210 | 0 | Hacl_Bignum_Fsquare_fsquare_(tmp, input); |
211 | 0 | for (i = (uint32_t)1U; i < count1; i = i + (uint32_t)1U) |
212 | 0 | Hacl_Bignum_Fsquare_fsquare_(tmp, input); |
213 | 0 | } |
214 | | |
215 | | inline static void |
216 | | Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1) |
217 | 0 | { |
218 | 0 | KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U); |
219 | 0 | { |
220 | 0 | uint128_t t[5U]; |
221 | 0 | { |
222 | 0 | uint32_t _i; |
223 | 0 | for (_i = 0U; _i < (uint32_t)5U; ++_i) |
224 | 0 | t[_i] = (uint128_t)(uint64_t)0U; |
225 | 0 | } |
226 | 0 | memcpy(output, input, (uint32_t)5U * sizeof input[0U]); |
227 | 0 | Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1); |
228 | 0 | } |
229 | 0 | } |
230 | | |
231 | | inline static void Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1) |
232 | 0 | { |
233 | 0 | KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U); |
234 | 0 | { |
235 | 0 | uint128_t t[5U]; |
236 | 0 | { |
237 | 0 | uint32_t _i; |
238 | 0 | for (_i = 0U; _i < (uint32_t)5U; ++_i) |
239 | 0 | t[_i] = (uint128_t)(uint64_t)0U; |
240 | 0 | } |
241 | 0 | Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1); |
242 | 0 | } |
243 | 0 | } |
244 | | |
245 | | inline static void Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z) |
246 | 0 | { |
247 | 0 | uint64_t buf[20U] = { 0U }; |
248 | 0 | uint64_t *a0 = buf; |
249 | 0 | uint64_t *t00 = buf + (uint32_t)5U; |
250 | 0 | uint64_t *b0 = buf + (uint32_t)10U; |
251 | 0 | uint64_t *t01; |
252 | 0 | uint64_t *b1; |
253 | 0 | uint64_t *c0; |
254 | 0 | uint64_t *a; |
255 | 0 | uint64_t *t0; |
256 | 0 | uint64_t *b; |
257 | 0 | uint64_t *c; |
258 | 0 | Hacl_Bignum_Fsquare_fsquare_times(a0, z, (uint32_t)1U); |
259 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)2U); |
260 | 0 | Hacl_Bignum_Fmul_fmul(b0, t00, z); |
261 | 0 | Hacl_Bignum_Fmul_fmul(a0, b0, a0); |
262 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)1U); |
263 | 0 | Hacl_Bignum_Fmul_fmul(b0, t00, b0); |
264 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5U); |
265 | 0 | t01 = buf + (uint32_t)5U; |
266 | 0 | b1 = buf + (uint32_t)10U; |
267 | 0 | c0 = buf + (uint32_t)15U; |
268 | 0 | Hacl_Bignum_Fmul_fmul(b1, t01, b1); |
269 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10U); |
270 | 0 | Hacl_Bignum_Fmul_fmul(c0, t01, b1); |
271 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20U); |
272 | 0 | Hacl_Bignum_Fmul_fmul(t01, t01, c0); |
273 | 0 | Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10U); |
274 | 0 | Hacl_Bignum_Fmul_fmul(b1, t01, b1); |
275 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50U); |
276 | 0 | a = buf; |
277 | 0 | t0 = buf + (uint32_t)5U; |
278 | 0 | b = buf + (uint32_t)10U; |
279 | 0 | c = buf + (uint32_t)15U; |
280 | 0 | Hacl_Bignum_Fmul_fmul(c, t0, b); |
281 | 0 | Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100U); |
282 | 0 | Hacl_Bignum_Fmul_fmul(t0, t0, c); |
283 | 0 | Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50U); |
284 | 0 | Hacl_Bignum_Fmul_fmul(t0, t0, b); |
285 | 0 | Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5U); |
286 | 0 | Hacl_Bignum_Fmul_fmul(out, t0, a); |
287 | 0 | } |
288 | | |
289 | | inline static void Hacl_Bignum_fsum(uint64_t *a, uint64_t *b) |
290 | 0 | { |
291 | 0 | uint32_t i; |
292 | 0 | for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U) |
293 | 0 | { |
294 | 0 | uint64_t xi = a[i]; |
295 | 0 | uint64_t yi = b[i]; |
296 | 0 | a[i] = xi + yi; |
297 | 0 | } |
298 | 0 | } |
299 | | |
300 | | inline static void Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b) |
301 | 0 | { |
302 | 0 | uint64_t tmp[5U] = { 0U }; |
303 | 0 | uint64_t b0; |
304 | 0 | uint64_t b1; |
305 | 0 | uint64_t b2; |
306 | 0 | uint64_t b3; |
307 | 0 | uint64_t b4; |
308 | 0 | memcpy(tmp, b, (uint32_t)5U * sizeof b[0U]); |
309 | 0 | b0 = tmp[0U]; |
310 | 0 | b1 = tmp[1U]; |
311 | 0 | b2 = tmp[2U]; |
312 | 0 | b3 = tmp[3U]; |
313 | 0 | b4 = tmp[4U]; |
314 | 0 | tmp[0U] = b0 + (uint64_t)0x3fffffffffff68U; |
315 | 0 | tmp[1U] = b1 + (uint64_t)0x3ffffffffffff8U; |
316 | 0 | tmp[2U] = b2 + (uint64_t)0x3ffffffffffff8U; |
317 | 0 | tmp[3U] = b3 + (uint64_t)0x3ffffffffffff8U; |
318 | 0 | tmp[4U] = b4 + (uint64_t)0x3ffffffffffff8U; |
319 | 0 | { |
320 | 0 | uint32_t i; |
321 | 0 | for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U) |
322 | 0 | { |
323 | 0 | uint64_t xi = a[i]; |
324 | 0 | uint64_t yi = tmp[i]; |
325 | 0 | a[i] = yi - xi; |
326 | 0 | } |
327 | 0 | } |
328 | 0 | } |
329 | | |
330 | | inline static void Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s) |
331 | 0 | { |
332 | 0 | KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U); |
333 | 0 | { |
334 | 0 | uint128_t tmp[5U]; |
335 | 0 | { |
336 | 0 | uint32_t _i; |
337 | 0 | for (_i = 0U; _i < (uint32_t)5U; ++_i) |
338 | 0 | tmp[_i] = (uint128_t)(uint64_t)0U; |
339 | 0 | } |
340 | 0 | { |
341 | 0 | uint128_t b4; |
342 | 0 | uint128_t b0; |
343 | 0 | uint128_t b4_; |
344 | 0 | uint128_t b0_; |
345 | 0 | { |
346 | 0 | uint32_t i; |
347 | 0 | for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U) |
348 | 0 | { |
349 | 0 | uint64_t xi = b[i]; |
350 | 0 | tmp[i] = (uint128_t)xi * s; |
351 | 0 | } |
352 | 0 | } |
353 | 0 | Hacl_Bignum_Fproduct_carry_wide_(tmp); |
354 | 0 | b4 = tmp[4U]; |
355 | 0 | b0 = tmp[0U]; |
356 | 0 | b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU; |
357 | 0 | b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U); |
358 | 0 | tmp[4U] = b4_; |
359 | 0 | tmp[0U] = b0_; |
360 | 0 | Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp); |
361 | 0 | } |
362 | 0 | } |
363 | 0 | } |
364 | | |
365 | | inline static void Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b) |
366 | 0 | { |
367 | 0 | Hacl_Bignum_Fmul_fmul(output, a, b); |
368 | 0 | } |
369 | | |
370 | | inline static void Hacl_Bignum_crecip(uint64_t *output, uint64_t *input) |
371 | 0 | { |
372 | 0 | Hacl_Bignum_Crecip_crecip(output, input); |
373 | 0 | } |
374 | | |
375 | | static void |
376 | | Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr) |
377 | 0 | { |
378 | 0 | uint32_t i = ctr - (uint32_t)1U; |
379 | 0 | uint64_t ai = a[i]; |
380 | 0 | uint64_t bi = b[i]; |
381 | 0 | uint64_t x = swap1 & (ai ^ bi); |
382 | 0 | uint64_t ai1 = ai ^ x; |
383 | 0 | uint64_t bi1 = bi ^ x; |
384 | 0 | a[i] = ai1; |
385 | 0 | b[i] = bi1; |
386 | 0 | } |
387 | | |
388 | | static void |
389 | | Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr) |
390 | 0 | { |
391 | 0 | if (!(ctr == (uint32_t)0U)) |
392 | 0 | { |
393 | 0 | uint32_t i; |
394 | 0 | Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr); |
395 | 0 | i = ctr - (uint32_t)1U; |
396 | 0 | Hacl_EC_Point_swap_conditional_(a, b, swap1, i); |
397 | 0 | } |
398 | 0 | } |
399 | | |
400 | | static void Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap) |
401 | 0 | { |
402 | 0 | uint64_t swap1 = (uint64_t)0U - iswap; |
403 | 0 | Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5U); |
404 | 0 | Hacl_EC_Point_swap_conditional_(a + (uint32_t)5U, b + (uint32_t)5U, swap1, (uint32_t)5U); |
405 | 0 | } |
406 | | |
407 | | static void Hacl_EC_Point_copy(uint64_t *output, uint64_t *input) |
408 | 0 | { |
409 | 0 | memcpy(output, input, (uint32_t)5U * sizeof input[0U]); |
410 | 0 | memcpy(output + (uint32_t)5U, |
411 | 0 | input + (uint32_t)5U, |
412 | 0 | (uint32_t)5U * sizeof (input + (uint32_t)5U)[0U]); |
413 | 0 | } |
414 | | |
415 | | static void Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input) |
416 | 0 | { |
417 | 0 | uint64_t i0 = load64_le(input); |
418 | 0 | uint8_t *x00 = input + (uint32_t)6U; |
419 | 0 | uint64_t i1 = load64_le(x00); |
420 | 0 | uint8_t *x01 = input + (uint32_t)12U; |
421 | 0 | uint64_t i2 = load64_le(x01); |
422 | 0 | uint8_t *x02 = input + (uint32_t)19U; |
423 | 0 | uint64_t i3 = load64_le(x02); |
424 | 0 | uint8_t *x0 = input + (uint32_t)24U; |
425 | 0 | uint64_t i4 = load64_le(x0); |
426 | 0 | uint64_t output0 = i0 & (uint64_t)0x7ffffffffffffU; |
427 | 0 | uint64_t output1 = i1 >> (uint32_t)3U & (uint64_t)0x7ffffffffffffU; |
428 | 0 | uint64_t output2 = i2 >> (uint32_t)6U & (uint64_t)0x7ffffffffffffU; |
429 | 0 | uint64_t output3 = i3 >> (uint32_t)1U & (uint64_t)0x7ffffffffffffU; |
430 | 0 | uint64_t output4 = i4 >> (uint32_t)12U & (uint64_t)0x7ffffffffffffU; |
431 | 0 | output[0U] = output0; |
432 | 0 | output[1U] = output1; |
433 | 0 | output[2U] = output2; |
434 | 0 | output[3U] = output3; |
435 | 0 | output[4U] = output4; |
436 | 0 | } |
437 | | |
438 | | static void Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input) |
439 | 0 | { |
440 | 0 | uint64_t t0 = input[0U]; |
441 | 0 | uint64_t t1 = input[1U]; |
442 | 0 | uint64_t t2 = input[2U]; |
443 | 0 | uint64_t t3 = input[3U]; |
444 | 0 | uint64_t t4 = input[4U]; |
445 | 0 | uint64_t t1_ = t1 + (t0 >> (uint32_t)51U); |
446 | 0 | uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU; |
447 | 0 | uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U); |
448 | 0 | uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU; |
449 | 0 | uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U); |
450 | 0 | uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU; |
451 | 0 | uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U); |
452 | 0 | uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU; |
453 | 0 | input[0U] = t0_; |
454 | 0 | input[1U] = t1__; |
455 | 0 | input[2U] = t2__; |
456 | 0 | input[3U] = t3__; |
457 | 0 | input[4U] = t4_; |
458 | 0 | } |
459 | | |
460 | | static void Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input) |
461 | 0 | { |
462 | 0 | Hacl_EC_Format_fcontract_first_carry_pass(input); |
463 | 0 | Hacl_Bignum_Modulo_carry_top(input); |
464 | 0 | } |
465 | | |
466 | | static void Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input) |
467 | 0 | { |
468 | 0 | uint64_t t0 = input[0U]; |
469 | 0 | uint64_t t1 = input[1U]; |
470 | 0 | uint64_t t2 = input[2U]; |
471 | 0 | uint64_t t3 = input[3U]; |
472 | 0 | uint64_t t4 = input[4U]; |
473 | 0 | uint64_t t1_ = t1 + (t0 >> (uint32_t)51U); |
474 | 0 | uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU; |
475 | 0 | uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U); |
476 | 0 | uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU; |
477 | 0 | uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U); |
478 | 0 | uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU; |
479 | 0 | uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U); |
480 | 0 | uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU; |
481 | 0 | input[0U] = t0_; |
482 | 0 | input[1U] = t1__; |
483 | 0 | input[2U] = t2__; |
484 | 0 | input[3U] = t3__; |
485 | 0 | input[4U] = t4_; |
486 | 0 | } |
487 | | |
488 | | static void Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input) |
489 | 0 | { |
490 | 0 | uint64_t i0; |
491 | 0 | uint64_t i1; |
492 | 0 | uint64_t i0_; |
493 | 0 | uint64_t i1_; |
494 | 0 | Hacl_EC_Format_fcontract_second_carry_pass(input); |
495 | 0 | Hacl_Bignum_Modulo_carry_top(input); |
496 | 0 | i0 = input[0U]; |
497 | 0 | i1 = input[1U]; |
498 | 0 | i0_ = i0 & (uint64_t)0x7ffffffffffffU; |
499 | 0 | i1_ = i1 + (i0 >> (uint32_t)51U); |
500 | 0 | input[0U] = i0_; |
501 | 0 | input[1U] = i1_; |
502 | 0 | } |
503 | | |
504 | | static void Hacl_EC_Format_fcontract_trim(uint64_t *input) |
505 | 0 | { |
506 | 0 | uint64_t a0 = input[0U]; |
507 | 0 | uint64_t a1 = input[1U]; |
508 | 0 | uint64_t a2 = input[2U]; |
509 | 0 | uint64_t a3 = input[3U]; |
510 | 0 | uint64_t a4 = input[4U]; |
511 | 0 | uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffedU); |
512 | 0 | uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffffU); |
513 | 0 | uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffffU); |
514 | 0 | uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffffU); |
515 | 0 | uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffffU); |
516 | 0 | uint64_t mask = (((mask0 & mask1) & mask2) & mask3) & mask4; |
517 | 0 | uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffedU & mask); |
518 | 0 | uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffffU & mask); |
519 | 0 | uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffffU & mask); |
520 | 0 | uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffffU & mask); |
521 | 0 | uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffffU & mask); |
522 | 0 | input[0U] = a0_; |
523 | 0 | input[1U] = a1_; |
524 | 0 | input[2U] = a2_; |
525 | 0 | input[3U] = a3_; |
526 | 0 | input[4U] = a4_; |
527 | 0 | } |
528 | | |
529 | | static void Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input) |
530 | 0 | { |
531 | 0 | uint64_t t0 = input[0U]; |
532 | 0 | uint64_t t1 = input[1U]; |
533 | 0 | uint64_t t2 = input[2U]; |
534 | 0 | uint64_t t3 = input[3U]; |
535 | 0 | uint64_t t4 = input[4U]; |
536 | 0 | uint64_t o0 = t1 << (uint32_t)51U | t0; |
537 | 0 | uint64_t o1 = t2 << (uint32_t)38U | t1 >> (uint32_t)13U; |
538 | 0 | uint64_t o2 = t3 << (uint32_t)25U | t2 >> (uint32_t)26U; |
539 | 0 | uint64_t o3 = t4 << (uint32_t)12U | t3 >> (uint32_t)39U; |
540 | 0 | uint8_t *b0 = output; |
541 | 0 | uint8_t *b1 = output + (uint32_t)8U; |
542 | 0 | uint8_t *b2 = output + (uint32_t)16U; |
543 | 0 | uint8_t *b3 = output + (uint32_t)24U; |
544 | 0 | store64_le(b0, o0); |
545 | 0 | store64_le(b1, o1); |
546 | 0 | store64_le(b2, o2); |
547 | 0 | store64_le(b3, o3); |
548 | 0 | } |
549 | | |
550 | | static void Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input) |
551 | 0 | { |
552 | 0 | Hacl_EC_Format_fcontract_first_carry_full(input); |
553 | 0 | Hacl_EC_Format_fcontract_second_carry_full(input); |
554 | 0 | Hacl_EC_Format_fcontract_trim(input); |
555 | 0 | Hacl_EC_Format_fcontract_store(output, input); |
556 | 0 | } |
557 | | |
558 | | static void Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point) |
559 | 0 | { |
560 | 0 | uint64_t *x = point; |
561 | 0 | uint64_t *z = point + (uint32_t)5U; |
562 | 0 | uint64_t buf[10U] = { 0U }; |
563 | 0 | uint64_t *zmone = buf; |
564 | 0 | uint64_t *sc = buf + (uint32_t)5U; |
565 | 0 | Hacl_Bignum_crecip(zmone, z); |
566 | 0 | Hacl_Bignum_fmul(sc, x, zmone); |
567 | 0 | Hacl_EC_Format_fcontract(scalar, sc); |
568 | 0 | } |
569 | | |
570 | | static void |
571 | | Hacl_EC_AddAndDouble_fmonty( |
572 | | uint64_t *pp, |
573 | | uint64_t *ppq, |
574 | | uint64_t *p, |
575 | | uint64_t *pq, |
576 | | uint64_t *qmqp |
577 | | ) |
578 | 0 | { |
579 | 0 | uint64_t *qx = qmqp; |
580 | 0 | uint64_t *x2 = pp; |
581 | 0 | uint64_t *z2 = pp + (uint32_t)5U; |
582 | 0 | uint64_t *x3 = ppq; |
583 | 0 | uint64_t *z3 = ppq + (uint32_t)5U; |
584 | 0 | uint64_t *x = p; |
585 | 0 | uint64_t *z = p + (uint32_t)5U; |
586 | 0 | uint64_t *xprime = pq; |
587 | 0 | uint64_t *zprime = pq + (uint32_t)5U; |
588 | 0 | uint64_t buf[40U] = { 0U }; |
589 | 0 | uint64_t *origx = buf; |
590 | 0 | uint64_t *origxprime0 = buf + (uint32_t)5U; |
591 | 0 | uint64_t *xxprime0 = buf + (uint32_t)25U; |
592 | 0 | uint64_t *zzprime0 = buf + (uint32_t)30U; |
593 | 0 | uint64_t *origxprime; |
594 | 0 | uint64_t *xx0; |
595 | 0 | uint64_t *zz0; |
596 | 0 | uint64_t *xxprime; |
597 | 0 | uint64_t *zzprime; |
598 | 0 | uint64_t *zzzprime; |
599 | 0 | uint64_t *zzz; |
600 | 0 | uint64_t *xx; |
601 | 0 | uint64_t *zz; |
602 | 0 | uint64_t scalar; |
603 | 0 | memcpy(origx, x, (uint32_t)5U * sizeof x[0U]); |
604 | 0 | Hacl_Bignum_fsum(x, z); |
605 | 0 | Hacl_Bignum_fdifference(z, origx); |
606 | 0 | memcpy(origxprime0, xprime, (uint32_t)5U * sizeof xprime[0U]); |
607 | 0 | Hacl_Bignum_fsum(xprime, zprime); |
608 | 0 | Hacl_Bignum_fdifference(zprime, origxprime0); |
609 | 0 | Hacl_Bignum_fmul(xxprime0, xprime, z); |
610 | 0 | Hacl_Bignum_fmul(zzprime0, x, zprime); |
611 | 0 | origxprime = buf + (uint32_t)5U; |
612 | 0 | xx0 = buf + (uint32_t)15U; |
613 | 0 | zz0 = buf + (uint32_t)20U; |
614 | 0 | xxprime = buf + (uint32_t)25U; |
615 | 0 | zzprime = buf + (uint32_t)30U; |
616 | 0 | zzzprime = buf + (uint32_t)35U; |
617 | 0 | memcpy(origxprime, xxprime, (uint32_t)5U * sizeof xxprime[0U]); |
618 | 0 | Hacl_Bignum_fsum(xxprime, zzprime); |
619 | 0 | Hacl_Bignum_fdifference(zzprime, origxprime); |
620 | 0 | Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1U); |
621 | 0 | Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1U); |
622 | 0 | Hacl_Bignum_fmul(z3, zzzprime, qx); |
623 | 0 | Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1U); |
624 | 0 | Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1U); |
625 | 0 | zzz = buf + (uint32_t)10U; |
626 | 0 | xx = buf + (uint32_t)15U; |
627 | 0 | zz = buf + (uint32_t)20U; |
628 | 0 | Hacl_Bignum_fmul(x2, xx, zz); |
629 | 0 | Hacl_Bignum_fdifference(zz, xx); |
630 | 0 | scalar = (uint64_t)121665U; |
631 | 0 | Hacl_Bignum_fscalar(zzz, zz, scalar); |
632 | 0 | Hacl_Bignum_fsum(zzz, xx); |
633 | 0 | Hacl_Bignum_fmul(z2, zzz, zz); |
634 | 0 | } |
635 | | |
636 | | static void |
637 | | Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step( |
638 | | uint64_t *nq, |
639 | | uint64_t *nqpq, |
640 | | uint64_t *nq2, |
641 | | uint64_t *nqpq2, |
642 | | uint64_t *q, |
643 | | uint8_t byt |
644 | | ) |
645 | 0 | { |
646 | 0 | uint64_t bit0 = (uint64_t)(byt >> (uint32_t)7U); |
647 | 0 | uint64_t bit; |
648 | 0 | Hacl_EC_Point_swap_conditional(nq, nqpq, bit0); |
649 | 0 | Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q); |
650 | 0 | bit = (uint64_t)(byt >> (uint32_t)7U); |
651 | 0 | Hacl_EC_Point_swap_conditional(nq2, nqpq2, bit); |
652 | 0 | } |
653 | | |
654 | | static void |
655 | | Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step( |
656 | | uint64_t *nq, |
657 | | uint64_t *nqpq, |
658 | | uint64_t *nq2, |
659 | | uint64_t *nqpq2, |
660 | | uint64_t *q, |
661 | | uint8_t byt |
662 | | ) |
663 | 0 | { |
664 | 0 | uint8_t byt1; |
665 | 0 | Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); |
666 | 0 | byt1 = byt << (uint32_t)1U; |
667 | 0 | Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); |
668 | 0 | } |
669 | | |
670 | | static void |
671 | | Hacl_EC_Ladder_SmallLoop_cmult_small_loop( |
672 | | uint64_t *nq, |
673 | | uint64_t *nqpq, |
674 | | uint64_t *nq2, |
675 | | uint64_t *nqpq2, |
676 | | uint64_t *q, |
677 | | uint8_t byt, |
678 | | uint32_t i |
679 | | ) |
680 | 0 | { |
681 | 0 | if (!(i == (uint32_t)0U)) |
682 | 0 | { |
683 | 0 | uint32_t i_ = i - (uint32_t)1U; |
684 | 0 | uint8_t byt_; |
685 | 0 | Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt); |
686 | 0 | byt_ = byt << (uint32_t)2U; |
687 | 0 | Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_); |
688 | 0 | } |
689 | 0 | } |
690 | | |
691 | | static void |
692 | | Hacl_EC_Ladder_BigLoop_cmult_big_loop( |
693 | | uint8_t *n1, |
694 | | uint64_t *nq, |
695 | | uint64_t *nqpq, |
696 | | uint64_t *nq2, |
697 | | uint64_t *nqpq2, |
698 | | uint64_t *q, |
699 | | uint32_t i |
700 | | ) |
701 | 0 | { |
702 | 0 | if (!(i == (uint32_t)0U)) |
703 | 0 | { |
704 | 0 | uint32_t i1 = i - (uint32_t)1U; |
705 | 0 | uint8_t byte = n1[i1]; |
706 | 0 | Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4U); |
707 | 0 | Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1); |
708 | 0 | } |
709 | 0 | } |
710 | | |
711 | | static void Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q) |
712 | 0 | { |
713 | 0 | uint64_t point_buf[40U] = { 0U }; |
714 | 0 | uint64_t *nq = point_buf; |
715 | 0 | uint64_t *nqpq = point_buf + (uint32_t)10U; |
716 | 0 | uint64_t *nq2 = point_buf + (uint32_t)20U; |
717 | 0 | uint64_t *nqpq2 = point_buf + (uint32_t)30U; |
718 | 0 | Hacl_EC_Point_copy(nqpq, q); |
719 | 0 | nq[0U] = (uint64_t)1U; |
720 | 0 | Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32U); |
721 | 0 | Hacl_EC_Point_copy(result, nq); |
722 | 0 | } |
723 | | |
724 | | void Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint) |
725 | 0 | { |
726 | 0 | uint64_t buf0[10U] = { 0U }; |
727 | 0 | uint64_t *x0 = buf0; |
728 | 0 | uint64_t *z = buf0 + (uint32_t)5U; |
729 | 0 | uint64_t *q; |
730 | 0 | Hacl_EC_Format_fexpand(x0, basepoint); |
731 | 0 | z[0U] = (uint64_t)1U; |
732 | 0 | q = buf0; |
733 | 0 | { |
734 | 0 | uint8_t e[32U] = { 0U }; |
735 | 0 | uint8_t e0; |
736 | 0 | uint8_t e31; |
737 | 0 | uint8_t e01; |
738 | 0 | uint8_t e311; |
739 | 0 | uint8_t e312; |
740 | 0 | uint8_t *scalar; |
741 | 0 | memcpy(e, secret, (uint32_t)32U * sizeof secret[0U]); |
742 | 0 | e0 = e[0U]; |
743 | 0 | e31 = e[31U]; |
744 | 0 | e01 = e0 & (uint8_t)248U; |
745 | 0 | e311 = e31 & (uint8_t)127U; |
746 | 0 | e312 = e311 | (uint8_t)64U; |
747 | 0 | e[0U] = e01; |
748 | 0 | e[31U] = e312; |
749 | 0 | scalar = e; |
750 | 0 | { |
751 | 0 | uint64_t buf[15U] = { 0U }; |
752 | 0 | uint64_t *nq = buf; |
753 | 0 | uint64_t *x = nq; |
754 | 0 | x[0U] = (uint64_t)1U; |
755 | 0 | Hacl_EC_Ladder_cmult(nq, scalar, q); |
756 | 0 | Hacl_EC_Format_scalar_of_point(mypublic, nq); |
757 | 0 | } |
758 | 0 | } |
759 | 0 | } |
760 | | |