/src/nss/lib/freebl/verified/Hacl_P256.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /* MIT License  | 
2  |  |  *  | 
3  |  |  * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation  | 
4  |  |  * Copyright (c) 2022-2023 HACL* Contributors  | 
5  |  |  *  | 
6  |  |  * Permission is hereby granted, free of charge, to any person obtaining a copy  | 
7  |  |  * of this software and associated documentation files (the "Software"), to deal  | 
8  |  |  * in the Software without restriction, including without limitation the rights  | 
9  |  |  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell  | 
10  |  |  * copies of the Software, and to permit persons to whom the Software is  | 
11  |  |  * furnished to do so, subject to the following conditions:  | 
12  |  |  *  | 
13  |  |  * The above copyright notice and this permission notice shall be included in all  | 
14  |  |  * copies or substantial portions of the Software.  | 
15  |  |  *  | 
16  |  |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR  | 
17  |  |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,  | 
18  |  |  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE  | 
19  |  |  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER  | 
20  |  |  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,  | 
21  |  |  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE  | 
22  |  |  * SOFTWARE.  | 
23  |  |  */  | 
24  |  |  | 
25  |  | #include "internal/Hacl_P256.h"  | 
26  |  |  | 
27  |  | #include "internal/Hacl_P256_PrecompTable.h"  | 
28  |  | #include "internal/Hacl_Krmllib.h"  | 
29  |  | #include "internal/Hacl_Bignum_Base.h"  | 
30  |  | #include "lib_intrinsics.h"  | 
31  |  |  | 
32  |  | static inline uint64_t  | 
33  |  | bn_is_zero_mask4(uint64_t *f)  | 
34  | 0  | { | 
35  | 0  |     uint64_t bn_zero[4U] = { 0U }; | 
36  | 0  |     uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
37  | 0  |     KRML_MAYBE_FOR4(i,  | 
38  | 0  |                     (uint32_t)0U,  | 
39  | 0  |                     (uint32_t)4U,  | 
40  | 0  |                     (uint32_t)1U,  | 
41  | 0  |                     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);  | 
42  | 0  |                     mask = uu____0 & mask;);  | 
43  | 0  |     uint64_t mask1 = mask;  | 
44  | 0  |     uint64_t res = mask1;  | 
45  | 0  |     return res;  | 
46  | 0  | }  | 
47  |  |  | 
48  |  | static inline bool  | 
49  |  | bn_is_zero_vartime4(uint64_t *f)  | 
50  | 0  | { | 
51  | 0  |     uint64_t m = bn_is_zero_mask4(f);  | 
52  | 0  |     return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
53  | 0  | }  | 
54  |  |  | 
55  |  | static inline uint64_t  | 
56  |  | bn_is_eq_mask4(uint64_t *a, uint64_t *b)  | 
57  | 0  | { | 
58  | 0  |     uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
59  | 0  |     KRML_MAYBE_FOR4(i,  | 
60  | 0  |                     (uint32_t)0U,  | 
61  | 0  |                     (uint32_t)4U,  | 
62  | 0  |                     (uint32_t)1U,  | 
63  | 0  |                     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);  | 
64  | 0  |                     mask = uu____0 & mask;);  | 
65  | 0  |     uint64_t mask1 = mask;  | 
66  | 0  |     return mask1;  | 
67  | 0  | }  | 
68  |  |  | 
69  |  | static inline bool  | 
70  |  | bn_is_eq_vartime4(uint64_t *a, uint64_t *b)  | 
71  | 0  | { | 
72  | 0  |     uint64_t m = bn_is_eq_mask4(a, b);  | 
73  | 0  |     return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
74  | 0  | }  | 
75  |  |  | 
76  |  | static inline void  | 
77  |  | bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_t *y)  | 
78  | 0  | { | 
79  | 0  |     uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U);  | 
80  | 0  |     KRML_MAYBE_FOR4(i,  | 
81  | 0  |                     (uint32_t)0U,  | 
82  | 0  |                     (uint32_t)4U,  | 
83  | 0  |                     (uint32_t)1U,  | 
84  | 0  |                     uint64_t *os = res;  | 
85  | 0  |                     uint64_t uu____0 = x[i];  | 
86  | 0  |                     uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0));  | 
87  | 0  |                     os[i] = x1;);  | 
88  | 0  | }  | 
89  |  |  | 
90  |  | static inline void  | 
91  |  | bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)  | 
92  | 0  | { | 
93  | 0  |     uint64_t c0 = (uint64_t)0U;  | 
94  | 0  |     { | 
95  | 0  |         uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];  | 
96  | 0  |         uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];  | 
97  | 0  |         uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;  | 
98  | 0  |         c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);  | 
99  | 0  |         uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
100  | 0  |         uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
101  | 0  |         uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
102  | 0  |         c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);  | 
103  | 0  |         uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
104  | 0  |         uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
105  | 0  |         uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
106  | 0  |         c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);  | 
107  | 0  |         uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
108  | 0  |         uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
109  | 0  |         uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
110  | 0  |         c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);  | 
111  | 0  |     }  | 
112  | 0  |     uint64_t c00 = c0;  | 
113  | 0  |     uint64_t tmp[4U] = { 0U }; | 
114  | 0  |     uint64_t c = (uint64_t)0U;  | 
115  | 0  |     { | 
116  | 0  |         uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];  | 
117  | 0  |         uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];  | 
118  | 0  |         uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;  | 
119  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);  | 
120  | 0  |         uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
121  | 0  |         uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
122  | 0  |         uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
123  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);  | 
124  | 0  |         uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
125  | 0  |         uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
126  | 0  |         uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
127  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);  | 
128  | 0  |         uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
129  | 0  |         uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
130  | 0  |         uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
131  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);  | 
132  | 0  |     }  | 
133  | 0  |     uint64_t c1 = c;  | 
134  | 0  |     uint64_t c2 = c00 - c1;  | 
135  | 0  |     KRML_MAYBE_FOR4(i,  | 
136  | 0  |                     (uint32_t)0U,  | 
137  | 0  |                     (uint32_t)4U,  | 
138  | 0  |                     (uint32_t)1U,  | 
139  | 0  |                     uint64_t *os = res;  | 
140  | 0  |                     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);  | 
141  | 0  |                     os[i] = x1;);  | 
142  | 0  | }  | 
143  |  |  | 
144  |  | static inline uint64_t  | 
145  |  | bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)  | 
146  | 0  | { | 
147  | 0  |     uint64_t c = (uint64_t)0U;  | 
148  | 0  |     { | 
149  | 0  |         uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];  | 
150  | 0  |         uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];  | 
151  | 0  |         uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;  | 
152  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);  | 
153  | 0  |         uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
154  | 0  |         uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
155  | 0  |         uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
156  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);  | 
157  | 0  |         uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
158  | 0  |         uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
159  | 0  |         uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
160  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);  | 
161  | 0  |         uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
162  | 0  |         uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
163  | 0  |         uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
164  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);  | 
165  | 0  |     }  | 
166  | 0  |     uint64_t c0 = c;  | 
167  | 0  |     return c0;  | 
168  | 0  | }  | 
169  |  |  | 
170  |  | static inline void  | 
171  |  | bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)  | 
172  | 0  | { | 
173  | 0  |     uint64_t c0 = (uint64_t)0U;  | 
174  | 0  |     { | 
175  | 0  |         uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];  | 
176  | 0  |         uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];  | 
177  | 0  |         uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;  | 
178  | 0  |         c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);  | 
179  | 0  |         uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
180  | 0  |         uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
181  | 0  |         uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
182  | 0  |         c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);  | 
183  | 0  |         uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
184  | 0  |         uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
185  | 0  |         uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
186  | 0  |         c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);  | 
187  | 0  |         uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
188  | 0  |         uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
189  | 0  |         uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
190  | 0  |         c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);  | 
191  | 0  |     }  | 
192  | 0  |     uint64_t c00 = c0;  | 
193  | 0  |     uint64_t tmp[4U] = { 0U }; | 
194  | 0  |     uint64_t c = (uint64_t)0U;  | 
195  | 0  |     { | 
196  | 0  |         uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];  | 
197  | 0  |         uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];  | 
198  | 0  |         uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;  | 
199  | 0  |         c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);  | 
200  | 0  |         uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
201  | 0  |         uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
202  | 0  |         uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
203  | 0  |         c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);  | 
204  | 0  |         uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
205  | 0  |         uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
206  | 0  |         uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
207  | 0  |         c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);  | 
208  | 0  |         uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
209  | 0  |         uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
210  | 0  |         uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
211  | 0  |         c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);  | 
212  | 0  |     }  | 
213  | 0  |     uint64_t c1 = c;  | 
214  | 0  |     KRML_HOST_IGNORE(c1);  | 
215  | 0  |     uint64_t c2 = (uint64_t)0U - c00;  | 
216  | 0  |     KRML_MAYBE_FOR4(i,  | 
217  | 0  |                     (uint32_t)0U,  | 
218  | 0  |                     (uint32_t)4U,  | 
219  | 0  |                     (uint32_t)1U,  | 
220  | 0  |                     uint64_t *os = res;  | 
221  | 0  |                     uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]);  | 
222  | 0  |                     os[i] = x1;);  | 
223  | 0  | }  | 
224  |  |  | 
225  |  | static inline void  | 
226  |  | bn_mul4(uint64_t *res, uint64_t *x, uint64_t *y)  | 
227  | 0  | { | 
228  | 0  |     memset(res, 0U, (uint32_t)8U * sizeof(uint64_t));  | 
229  | 0  |     KRML_MAYBE_FOR4(  | 
230  | 0  |         i0,  | 
231  | 0  |         (uint32_t)0U,  | 
232  | 0  |         (uint32_t)4U,  | 
233  | 0  |         (uint32_t)1U,  | 
234  | 0  |         uint64_t bj = y[i0];  | 
235  | 0  |         uint64_t *res_j = res + i0;  | 
236  | 0  |         uint64_t c = (uint64_t)0U;  | 
237  | 0  |         { | 
238  | 0  |             uint64_t a_i = x[(uint32_t)4U * (uint32_t)0U];  | 
239  | 0  |             uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;  | 
240  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);  | 
241  | 0  |             uint64_t a_i0 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
242  | 0  |             uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
243  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);  | 
244  | 0  |             uint64_t a_i1 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
245  | 0  |             uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
246  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);  | 
247  | 0  |             uint64_t a_i2 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
248  | 0  |             uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
249  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);  | 
250  | 0  |         } uint64_t r = c;  | 
251  | 0  |         res[(uint32_t)4U + i0] = r;);  | 
252  | 0  | }  | 
253  |  |  | 
254  |  | static inline void  | 
255  |  | bn_sqr4(uint64_t *res, uint64_t *x)  | 
256  | 0  | { | 
257  | 0  |     memset(res, 0U, (uint32_t)8U * sizeof(uint64_t));  | 
258  | 0  |     KRML_MAYBE_FOR4(  | 
259  | 0  |         i0,  | 
260  | 0  |         (uint32_t)0U,  | 
261  | 0  |         (uint32_t)4U,  | 
262  | 0  |         (uint32_t)1U,  | 
263  | 0  |         uint64_t *ab = x;  | 
264  | 0  |         uint64_t a_j = x[i0];  | 
265  | 0  |         uint64_t *res_j = res + i0;  | 
266  | 0  |         uint64_t c = (uint64_t)0U;  | 
267  | 0  |         for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) { | 
268  | 0  |             uint64_t a_i = ab[(uint32_t)4U * i];  | 
269  | 0  |             uint64_t *res_i0 = res_j + (uint32_t)4U * i;  | 
270  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);  | 
271  | 0  |             uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];  | 
272  | 0  |             uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;  | 
273  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);  | 
274  | 0  |             uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];  | 
275  | 0  |             uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;  | 
276  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);  | 
277  | 0  |             uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];  | 
278  | 0  |             uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;  | 
279  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);  | 
280  | 0  |         } for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) { | 
281  | 0  |             uint64_t a_i = ab[i];  | 
282  | 0  |             uint64_t *res_i = res_j + i;  | 
283  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i);  | 
284  | 0  |         } uint64_t r = c;  | 
285  | 0  |         res[i0 + i0] = r;);  | 
286  | 0  |     uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);  | 
287  | 0  |     KRML_HOST_IGNORE(c0);  | 
288  | 0  |     uint64_t tmp[8U] = { 0U }; | 
289  | 0  |     KRML_MAYBE_FOR4(i,  | 
290  | 0  |                     (uint32_t)0U,  | 
291  | 0  |                     (uint32_t)4U,  | 
292  | 0  |                     (uint32_t)1U,  | 
293  | 0  |                     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(x[i], x[i]);  | 
294  | 0  |                     uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));  | 
295  | 0  |                     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);  | 
296  | 0  |                     tmp[(uint32_t)2U * i] = lo;  | 
297  | 0  |                     tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);  | 
298  | 0  |     uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);  | 
299  | 0  |     KRML_HOST_IGNORE(c1);  | 
300  | 0  | }  | 
301  |  |  | 
302  |  | static inline void  | 
303  |  | bn_to_bytes_be4(uint8_t *res, uint64_t *f)  | 
304  | 0  | { | 
305  | 0  |     uint8_t tmp[32U] = { 0U }; | 
306  | 0  |     KRML_HOST_IGNORE(tmp);  | 
307  | 0  |     KRML_MAYBE_FOR4(i,  | 
308  | 0  |                     (uint32_t)0U,  | 
309  | 0  |                     (uint32_t)4U,  | 
310  | 0  |                     (uint32_t)1U,  | 
311  | 0  |                     store64_be(res + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););  | 
312  | 0  | }  | 
313  |  |  | 
314  |  | static inline void  | 
315  |  | bn_from_bytes_be4(uint64_t *res, uint8_t *b)  | 
316  | 0  | { | 
317  | 0  |     KRML_MAYBE_FOR4(i,  | 
318  | 0  |                     (uint32_t)0U,  | 
319  | 0  |                     (uint32_t)4U,  | 
320  | 0  |                     (uint32_t)1U,  | 
321  | 0  |                     uint64_t *os = res;  | 
322  | 0  |                     uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);  | 
323  | 0  |                     uint64_t x = u;  | 
324  | 0  |                     os[i] = x;);  | 
325  | 0  | }  | 
326  |  |  | 
327  |  | static inline void  | 
328  |  | bn2_to_bytes_be4(uint8_t *res, uint64_t *x, uint64_t *y)  | 
329  | 0  | { | 
330  | 0  |     bn_to_bytes_be4(res, x);  | 
331  | 0  |     bn_to_bytes_be4(res + (uint32_t)32U, y);  | 
332  | 0  | }  | 
333  |  |  | 
334  |  | static inline void  | 
335  |  | make_prime(uint64_t *n)  | 
336  | 0  | { | 
337  | 0  |     n[0U] = (uint64_t)0xffffffffffffffffU;  | 
338  | 0  |     n[1U] = (uint64_t)0xffffffffU;  | 
339  | 0  |     n[2U] = (uint64_t)0x0U;  | 
340  | 0  |     n[3U] = (uint64_t)0xffffffff00000001U;  | 
341  | 0  | }  | 
342  |  |  | 
343  |  | static inline void  | 
344  |  | make_order(uint64_t *n)  | 
345  | 0  | { | 
346  | 0  |     n[0U] = (uint64_t)0xf3b9cac2fc632551U;  | 
347  | 0  |     n[1U] = (uint64_t)0xbce6faada7179e84U;  | 
348  | 0  |     n[2U] = (uint64_t)0xffffffffffffffffU;  | 
349  | 0  |     n[3U] = (uint64_t)0xffffffff00000000U;  | 
350  | 0  | }  | 
351  |  |  | 
352  |  | static inline void  | 
353  |  | make_a_coeff(uint64_t *a)  | 
354  | 0  | { | 
355  | 0  |     a[0U] = (uint64_t)0xfffffffffffffffcU;  | 
356  | 0  |     a[1U] = (uint64_t)0x3ffffffffU;  | 
357  | 0  |     a[2U] = (uint64_t)0x0U;  | 
358  | 0  |     a[3U] = (uint64_t)0xfffffffc00000004U;  | 
359  | 0  | }  | 
360  |  |  | 
361  |  | static inline void  | 
362  |  | make_b_coeff(uint64_t *b)  | 
363  | 0  | { | 
364  | 0  |     b[0U] = (uint64_t)0xd89cdf6229c4bddfU;  | 
365  | 0  |     b[1U] = (uint64_t)0xacf005cd78843090U;  | 
366  | 0  |     b[2U] = (uint64_t)0xe5a220abf7212ed6U;  | 
367  | 0  |     b[3U] = (uint64_t)0xdc30061d04874834U;  | 
368  | 0  | }  | 
369  |  |  | 
370  |  | static inline void  | 
371  |  | make_g_x(uint64_t *n)  | 
372  | 0  | { | 
373  | 0  |     n[0U] = (uint64_t)0x79e730d418a9143cU;  | 
374  | 0  |     n[1U] = (uint64_t)0x75ba95fc5fedb601U;  | 
375  | 0  |     n[2U] = (uint64_t)0x79fb732b77622510U;  | 
376  | 0  |     n[3U] = (uint64_t)0x18905f76a53755c6U;  | 
377  | 0  | }  | 
378  |  |  | 
379  |  | static inline void  | 
380  |  | make_g_y(uint64_t *n)  | 
381  | 0  | { | 
382  | 0  |     n[0U] = (uint64_t)0xddf25357ce95560aU;  | 
383  | 0  |     n[1U] = (uint64_t)0x8b4ab8e4ba19e45cU;  | 
384  | 0  |     n[2U] = (uint64_t)0xd2e88688dd21f325U;  | 
385  | 0  |     n[3U] = (uint64_t)0x8571ff1825885d85U;  | 
386  | 0  | }  | 
387  |  |  | 
388  |  | static inline void  | 
389  |  | make_fmont_R2(uint64_t *n)  | 
390  | 0  | { | 
391  | 0  |     n[0U] = (uint64_t)0x3U;  | 
392  | 0  |     n[1U] = (uint64_t)0xfffffffbffffffffU;  | 
393  | 0  |     n[2U] = (uint64_t)0xfffffffffffffffeU;  | 
394  | 0  |     n[3U] = (uint64_t)0x4fffffffdU;  | 
395  | 0  | }  | 
396  |  |  | 
397  |  | static inline void  | 
398  |  | make_fzero(uint64_t *n)  | 
399  | 0  | { | 
400  | 0  |     n[0U] = (uint64_t)0U;  | 
401  | 0  |     n[1U] = (uint64_t)0U;  | 
402  | 0  |     n[2U] = (uint64_t)0U;  | 
403  | 0  |     n[3U] = (uint64_t)0U;  | 
404  | 0  | }  | 
405  |  |  | 
406  |  | static inline void  | 
407  |  | make_fone(uint64_t *n)  | 
408  | 0  | { | 
409  | 0  |     n[0U] = (uint64_t)0x1U;  | 
410  | 0  |     n[1U] = (uint64_t)0xffffffff00000000U;  | 
411  | 0  |     n[2U] = (uint64_t)0xffffffffffffffffU;  | 
412  | 0  |     n[3U] = (uint64_t)0xfffffffeU;  | 
413  | 0  | }  | 
414  |  |  | 
415  |  | static inline uint64_t  | 
416  |  | bn_is_lt_prime_mask4(uint64_t *f)  | 
417  | 0  | { | 
418  | 0  |     uint64_t tmp[4U] = { 0U }; | 
419  | 0  |     make_prime(tmp);  | 
420  | 0  |     uint64_t c = bn_sub4(tmp, f, tmp);  | 
421  | 0  |     return (uint64_t)0U - c;  | 
422  | 0  | }  | 
423  |  |  | 
424  |  | static inline uint64_t  | 
425  |  | feq_mask(uint64_t *a, uint64_t *b)  | 
426  | 0  | { | 
427  | 0  |     uint64_t r = bn_is_eq_mask4(a, b);  | 
428  | 0  |     return r;  | 
429  | 0  | }  | 
430  |  |  | 
431  |  | static inline void  | 
432  |  | fadd0(uint64_t *res, uint64_t *x, uint64_t *y)  | 
433  | 0  | { | 
434  | 0  |     uint64_t n[4U] = { 0U }; | 
435  | 0  |     make_prime(n);  | 
436  | 0  |     bn_add_mod4(res, n, x, y);  | 
437  | 0  | }  | 
438  |  |  | 
439  |  | static inline void  | 
440  |  | fsub0(uint64_t *res, uint64_t *x, uint64_t *y)  | 
441  | 0  | { | 
442  | 0  |     uint64_t n[4U] = { 0U }; | 
443  | 0  |     make_prime(n);  | 
444  | 0  |     bn_sub_mod4(res, n, x, y);  | 
445  | 0  | }  | 
446  |  |  | 
447  |  | static inline void  | 
448  |  | fnegate_conditional_vartime(uint64_t *f, bool is_negate)  | 
449  | 0  | { | 
450  | 0  |     uint64_t zero[4U] = { 0U }; | 
451  | 0  |     if (is_negate) { | 
452  | 0  |         fsub0(f, zero, f);  | 
453  | 0  |     }  | 
454  | 0  | }  | 
455  |  |  | 
456  |  | static inline void  | 
457  |  | mont_reduction(uint64_t *res, uint64_t *x)  | 
458  | 0  | { | 
459  | 0  |     uint64_t n[4U] = { 0U }; | 
460  | 0  |     make_prime(n);  | 
461  | 0  |     uint64_t c0 = (uint64_t)0U;  | 
462  | 0  |     KRML_MAYBE_FOR4(  | 
463  | 0  |         i0,  | 
464  | 0  |         (uint32_t)0U,  | 
465  | 0  |         (uint32_t)4U,  | 
466  | 0  |         (uint32_t)1U,  | 
467  | 0  |         uint64_t qj = (uint64_t)1U * x[i0];  | 
468  | 0  |         uint64_t *res_j0 = x + i0;  | 
469  | 0  |         uint64_t c = (uint64_t)0U;  | 
470  | 0  |         { | 
471  | 0  |             uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];  | 
472  | 0  |             uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;  | 
473  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);  | 
474  | 0  |             uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
475  | 0  |             uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
476  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);  | 
477  | 0  |             uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
478  | 0  |             uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
479  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);  | 
480  | 0  |             uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
481  | 0  |             uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
482  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);  | 
483  | 0  |         } uint64_t r = c;  | 
484  | 0  |         uint64_t c1 = r;  | 
485  | 0  |         uint64_t *resb = x + (uint32_t)4U + i0;  | 
486  | 0  |         uint64_t res_j = x[(uint32_t)4U + i0];  | 
487  | 0  |         c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););  | 
488  | 0  |     memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof(uint64_t));  | 
489  | 0  |     uint64_t c00 = c0;  | 
490  | 0  |     uint64_t tmp[4U] = { 0U }; | 
491  | 0  |     uint64_t c = (uint64_t)0U;  | 
492  | 0  |     { | 
493  | 0  |         uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];  | 
494  | 0  |         uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];  | 
495  | 0  |         uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;  | 
496  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);  | 
497  | 0  |         uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
498  | 0  |         uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
499  | 0  |         uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
500  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);  | 
501  | 0  |         uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
502  | 0  |         uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
503  | 0  |         uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
504  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);  | 
505  | 0  |         uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
506  | 0  |         uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
507  | 0  |         uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
508  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);  | 
509  | 0  |     }  | 
510  | 0  |     uint64_t c1 = c;  | 
511  | 0  |     uint64_t c2 = c00 - c1;  | 
512  | 0  |     KRML_MAYBE_FOR4(i,  | 
513  | 0  |                     (uint32_t)0U,  | 
514  | 0  |                     (uint32_t)4U,  | 
515  | 0  |                     (uint32_t)1U,  | 
516  | 0  |                     uint64_t *os = res;  | 
517  | 0  |                     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);  | 
518  | 0  |                     os[i] = x1;);  | 
519  | 0  | }  | 
520  |  |  | 
521  |  | static inline void  | 
522  |  | fmul0(uint64_t *res, uint64_t *x, uint64_t *y)  | 
523  | 0  | { | 
524  | 0  |     uint64_t tmp[8U] = { 0U }; | 
525  | 0  |     bn_mul4(tmp, x, y);  | 
526  | 0  |     mont_reduction(res, tmp);  | 
527  | 0  | }  | 
528  |  |  | 
529  |  | static inline void  | 
530  |  | fsqr0(uint64_t *res, uint64_t *x)  | 
531  | 0  | { | 
532  | 0  |     uint64_t tmp[8U] = { 0U }; | 
533  | 0  |     bn_sqr4(tmp, x);  | 
534  | 0  |     mont_reduction(res, tmp);  | 
535  | 0  | }  | 
536  |  |  | 
537  |  | static inline void  | 
538  |  | from_mont(uint64_t *res, uint64_t *a)  | 
539  | 0  | { | 
540  | 0  |     uint64_t tmp[8U] = { 0U }; | 
541  | 0  |     memcpy(tmp, a, (uint32_t)4U * sizeof(uint64_t));  | 
542  | 0  |     mont_reduction(res, tmp);  | 
543  | 0  | }  | 
544  |  |  | 
545  |  | static inline void  | 
546  |  | to_mont(uint64_t *res, uint64_t *a)  | 
547  | 0  | { | 
548  | 0  |     uint64_t r2modn[4U] = { 0U }; | 
549  | 0  |     make_fmont_R2(r2modn);  | 
550  | 0  |     fmul0(res, a, r2modn);  | 
551  | 0  | }  | 
552  |  |  | 
553  |  | static inline void  | 
554  |  | fmul_by_b_coeff(uint64_t *res, uint64_t *x)  | 
555  | 0  | { | 
556  | 0  |     uint64_t b_coeff[4U] = { 0U }; | 
557  | 0  |     make_b_coeff(b_coeff);  | 
558  | 0  |     fmul0(res, b_coeff, x);  | 
559  | 0  | }  | 
560  |  |  | 
561  |  | static inline void  | 
562  |  | fcube(uint64_t *res, uint64_t *x)  | 
563  | 0  | { | 
564  | 0  |     fsqr0(res, x);  | 
565  | 0  |     fmul0(res, res, x);  | 
566  | 0  | }  | 
567  |  |  | 
568  |  | static inline void  | 
569  |  | finv(uint64_t *res, uint64_t *a)  | 
570  | 0  | { | 
571  | 0  |     uint64_t tmp[16U] = { 0U }; | 
572  | 0  |     uint64_t *x30 = tmp;  | 
573  | 0  |     uint64_t *x2 = tmp + (uint32_t)4U;  | 
574  | 0  |     uint64_t *tmp1 = tmp + (uint32_t)8U;  | 
575  | 0  |     uint64_t *tmp2 = tmp + (uint32_t)12U;  | 
576  | 0  |     memcpy(x2, a, (uint32_t)4U * sizeof(uint64_t));  | 
577  | 0  |     { | 
578  | 0  |         fsqr0(x2, x2);  | 
579  | 0  |     }  | 
580  | 0  |     fmul0(x2, x2, a);  | 
581  | 0  |     memcpy(x30, x2, (uint32_t)4U * sizeof(uint64_t));  | 
582  | 0  |     { | 
583  | 0  |         fsqr0(x30, x30);  | 
584  | 0  |     }  | 
585  | 0  |     fmul0(x30, x30, a);  | 
586  | 0  |     memcpy(tmp1, x30, (uint32_t)4U * sizeof(uint64_t));  | 
587  | 0  |     KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););  | 
588  | 0  |     fmul0(tmp1, tmp1, x30);  | 
589  | 0  |     memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
590  | 0  |     KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, fsqr0(tmp2, tmp2););  | 
591  | 0  |     fmul0(tmp2, tmp2, tmp1);  | 
592  | 0  |     memcpy(tmp1, tmp2, (uint32_t)4U * sizeof(uint64_t));  | 
593  | 0  |     KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););  | 
594  | 0  |     fmul0(tmp1, tmp1, x30);  | 
595  | 0  |     memcpy(x30, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
596  | 0  |     KRML_MAYBE_FOR15(i, (uint32_t)0U, (uint32_t)15U, (uint32_t)1U, fsqr0(x30, x30););  | 
597  | 0  |     fmul0(x30, x30, tmp1);  | 
598  | 0  |     memcpy(tmp1, x30, (uint32_t)4U * sizeof(uint64_t));  | 
599  | 0  |     KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp1, tmp1););  | 
600  | 0  |     fmul0(tmp1, tmp1, x2);  | 
601  | 0  |     memcpy(x2, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
602  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { | 
603  | 0  |         fsqr0(x2, x2);  | 
604  | 0  |     }  | 
605  | 0  |     fmul0(x2, x2, a);  | 
606  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++) { | 
607  | 0  |         fsqr0(x2, x2);  | 
608  | 0  |     }  | 
609  | 0  |     fmul0(x2, x2, tmp1);  | 
610  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { | 
611  | 0  |         fsqr0(x2, x2);  | 
612  | 0  |     }  | 
613  | 0  |     fmul0(x2, x2, tmp1);  | 
614  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)30U; i++) { | 
615  | 0  |         fsqr0(x2, x2);  | 
616  | 0  |     }  | 
617  | 0  |     fmul0(x2, x2, x30);  | 
618  | 0  |     KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(x2, x2););  | 
619  | 0  |     fmul0(tmp1, x2, a);  | 
620  | 0  |     memcpy(res, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
621  | 0  | }  | 
622  |  |  | 
623  |  | static inline void  | 
624  |  | fsqrt(uint64_t *res, uint64_t *a)  | 
625  | 0  | { | 
626  | 0  |     uint64_t tmp[8U] = { 0U }; | 
627  | 0  |     uint64_t *tmp1 = tmp;  | 
628  | 0  |     uint64_t *tmp2 = tmp + (uint32_t)4U;  | 
629  | 0  |     memcpy(tmp1, a, (uint32_t)4U * sizeof(uint64_t));  | 
630  | 0  |     { | 
631  | 0  |         fsqr0(tmp1, tmp1);  | 
632  | 0  |     }  | 
633  | 0  |     fmul0(tmp1, tmp1, a);  | 
634  | 0  |     memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
635  | 0  |     KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp2, tmp2););  | 
636  | 0  |     fmul0(tmp2, tmp2, tmp1);  | 
637  | 0  |     memcpy(tmp1, tmp2, (uint32_t)4U * sizeof(uint64_t));  | 
638  | 0  |     KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, fsqr0(tmp1, tmp1););  | 
639  | 0  |     fmul0(tmp1, tmp1, tmp2);  | 
640  | 0  |     memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
641  | 0  |     KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, fsqr0(tmp2, tmp2););  | 
642  | 0  |     fmul0(tmp2, tmp2, tmp1);  | 
643  | 0  |     memcpy(tmp1, tmp2, (uint32_t)4U * sizeof(uint64_t));  | 
644  | 0  |     KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, fsqr0(tmp1, tmp1););  | 
645  | 0  |     fmul0(tmp1, tmp1, tmp2);  | 
646  | 0  |     memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
647  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { | 
648  | 0  |         fsqr0(tmp2, tmp2);  | 
649  | 0  |     }  | 
650  | 0  |     fmul0(tmp2, tmp2, a);  | 
651  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)96U; i++) { | 
652  | 0  |         fsqr0(tmp2, tmp2);  | 
653  | 0  |     }  | 
654  | 0  |     fmul0(tmp2, tmp2, a);  | 
655  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)94U; i++) { | 
656  | 0  |         fsqr0(tmp2, tmp2);  | 
657  | 0  |     }  | 
658  | 0  |     memcpy(res, tmp2, (uint32_t)4U * sizeof(uint64_t));  | 
659  | 0  | }  | 
660  |  |  | 
661  |  | static inline void  | 
662  |  | make_base_point(uint64_t *p)  | 
663  | 0  | { | 
664  | 0  |     uint64_t *x = p;  | 
665  | 0  |     uint64_t *y = p + (uint32_t)4U;  | 
666  | 0  |     uint64_t *z = p + (uint32_t)8U;  | 
667  | 0  |     make_g_x(x);  | 
668  | 0  |     make_g_y(y);  | 
669  | 0  |     make_fone(z);  | 
670  | 0  | }  | 
671  |  |  | 
672  |  | static inline void  | 
673  |  | make_point_at_inf(uint64_t *p)  | 
674  | 0  | { | 
675  | 0  |     uint64_t *x = p;  | 
676  | 0  |     uint64_t *y = p + (uint32_t)4U;  | 
677  | 0  |     uint64_t *z = p + (uint32_t)8U;  | 
678  | 0  |     make_fzero(x);  | 
679  | 0  |     make_fone(y);  | 
680  | 0  |     make_fzero(z);  | 
681  | 0  | }  | 
682  |  |  | 
683  |  | static inline bool  | 
684  |  | is_point_at_inf_vartime(uint64_t *p)  | 
685  | 0  | { | 
686  | 0  |     uint64_t *pz = p + (uint32_t)8U;  | 
687  | 0  |     return bn_is_zero_vartime4(pz);  | 
688  | 0  | }  | 
689  |  |  | 
690  |  | static inline void  | 
691  |  | to_aff_point(uint64_t *res, uint64_t *p)  | 
692  | 0  | { | 
693  | 0  |     uint64_t zinv[4U] = { 0U }; | 
694  | 0  |     uint64_t *px = p;  | 
695  | 0  |     uint64_t *py = p + (uint32_t)4U;  | 
696  | 0  |     uint64_t *pz = p + (uint32_t)8U;  | 
697  | 0  |     uint64_t *x = res;  | 
698  | 0  |     uint64_t *y = res + (uint32_t)4U;  | 
699  | 0  |     finv(zinv, pz);  | 
700  | 0  |     fmul0(x, px, zinv);  | 
701  | 0  |     fmul0(y, py, zinv);  | 
702  | 0  |     from_mont(x, x);  | 
703  | 0  |     from_mont(y, y);  | 
704  | 0  | }  | 
705  |  |  | 
706  |  | static inline void  | 
707  |  | to_aff_point_x(uint64_t *res, uint64_t *p)  | 
708  | 0  | { | 
709  | 0  |     uint64_t zinv[4U] = { 0U }; | 
710  | 0  |     uint64_t *px = p;  | 
711  | 0  |     uint64_t *pz = p + (uint32_t)8U;  | 
712  | 0  |     finv(zinv, pz);  | 
713  | 0  |     fmul0(res, px, zinv);  | 
714  | 0  |     from_mont(res, res);  | 
715  | 0  | }  | 
716  |  |  | 
717  |  | static inline void  | 
718  |  | to_proj_point(uint64_t *res, uint64_t *p)  | 
719  | 0  | { | 
720  | 0  |     uint64_t *px = p;  | 
721  | 0  |     uint64_t *py = p + (uint32_t)4U;  | 
722  | 0  |     uint64_t *rx = res;  | 
723  | 0  |     uint64_t *ry = res + (uint32_t)4U;  | 
724  | 0  |     uint64_t *rz = res + (uint32_t)8U;  | 
725  | 0  |     to_mont(rx, px);  | 
726  | 0  |     to_mont(ry, py);  | 
727  | 0  |     make_fone(rz);  | 
728  | 0  | }  | 
729  |  |  | 
730  |  | static inline bool  | 
731  |  | is_on_curve_vartime(uint64_t *p)  | 
732  | 0  | { | 
733  | 0  |     uint64_t rp[4U] = { 0U }; | 
734  | 0  |     uint64_t tx[4U] = { 0U }; | 
735  | 0  |     uint64_t ty[4U] = { 0U }; | 
736  | 0  |     uint64_t *px = p;  | 
737  | 0  |     uint64_t *py = p + (uint32_t)4U;  | 
738  | 0  |     to_mont(tx, px);  | 
739  | 0  |     to_mont(ty, py);  | 
740  | 0  |     uint64_t tmp[4U] = { 0U }; | 
741  | 0  |     fcube(rp, tx);  | 
742  | 0  |     make_a_coeff(tmp);  | 
743  | 0  |     fmul0(tmp, tmp, tx);  | 
744  | 0  |     fadd0(rp, tmp, rp);  | 
745  | 0  |     make_b_coeff(tmp);  | 
746  | 0  |     fadd0(rp, tmp, rp);  | 
747  | 0  |     fsqr0(ty, ty);  | 
748  | 0  |     uint64_t r = feq_mask(ty, rp);  | 
749  | 0  |     bool r0 = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
750  | 0  |     return r0;  | 
751  | 0  | }  | 
752  |  |  | 
753  |  | static inline void  | 
754  |  | aff_point_store(uint8_t *res, uint64_t *p)  | 
755  | 0  | { | 
756  | 0  |     uint64_t *px = p;  | 
757  | 0  |     uint64_t *py = p + (uint32_t)4U;  | 
758  | 0  |     bn2_to_bytes_be4(res, px, py);  | 
759  | 0  | }  | 
760  |  |  | 
761  |  | static inline void  | 
762  |  | point_store(uint8_t *res, uint64_t *p)  | 
763  | 0  | { | 
764  | 0  |     uint64_t aff_p[8U] = { 0U }; | 
765  | 0  |     to_aff_point(aff_p, p);  | 
766  | 0  |     aff_point_store(res, aff_p);  | 
767  | 0  | }  | 
768  |  |  | 
769  |  | static inline bool  | 
770  |  | aff_point_load_vartime(uint64_t *p, uint8_t *b)  | 
771  | 0  | { | 
772  | 0  |     uint8_t *p_x = b;  | 
773  | 0  |     uint8_t *p_y = b + (uint32_t)32U;  | 
774  | 0  |     uint64_t *bn_p_x = p;  | 
775  | 0  |     uint64_t *bn_p_y = p + (uint32_t)4U;  | 
776  | 0  |     bn_from_bytes_be4(bn_p_x, p_x);  | 
777  | 0  |     bn_from_bytes_be4(bn_p_y, p_y);  | 
778  | 0  |     uint64_t *px = p;  | 
779  | 0  |     uint64_t *py = p + (uint32_t)4U;  | 
780  | 0  |     uint64_t lessX = bn_is_lt_prime_mask4(px);  | 
781  | 0  |     uint64_t lessY = bn_is_lt_prime_mask4(py);  | 
782  | 0  |     uint64_t res = lessX & lessY;  | 
783  | 0  |     bool is_xy_valid = res == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
784  | 0  |     if (!is_xy_valid) { | 
785  | 0  |         return false;  | 
786  | 0  |     }  | 
787  | 0  |     return is_on_curve_vartime(p);  | 
788  | 0  | }  | 
789  |  |  | 
790  |  | static inline bool  | 
791  |  | load_point_vartime(uint64_t *p, uint8_t *b)  | 
792  | 0  | { | 
793  | 0  |     uint64_t p_aff[8U] = { 0U }; | 
794  | 0  |     bool res = aff_point_load_vartime(p_aff, b);  | 
795  | 0  |     if (res) { | 
796  | 0  |         to_proj_point(p, p_aff);  | 
797  | 0  |     }  | 
798  | 0  |     return res;  | 
799  | 0  | }  | 
800  |  |  | 
801  |  | static inline bool  | 
802  |  | aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_t *s)  | 
803  | 0  | { | 
804  | 0  |     uint8_t s0 = s[0U];  | 
805  | 0  |     uint8_t s01 = s0;  | 
806  | 0  |     if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U)) { | 
807  | 0  |         return false;  | 
808  | 0  |     }  | 
809  | 0  |     uint8_t *xb = s + (uint32_t)1U;  | 
810  | 0  |     bn_from_bytes_be4(x, xb);  | 
811  | 0  |     uint64_t is_x_valid = bn_is_lt_prime_mask4(x);  | 
812  | 0  |     bool is_x_valid1 = is_x_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
813  | 0  |     bool is_y_odd = s01 == (uint8_t)0x03U;  | 
814  | 0  |     if (!is_x_valid1) { | 
815  | 0  |         return false;  | 
816  | 0  |     }  | 
817  | 0  |     uint64_t y2M[4U] = { 0U }; | 
818  | 0  |     uint64_t xM[4U] = { 0U }; | 
819  | 0  |     uint64_t yM[4U] = { 0U }; | 
820  | 0  |     to_mont(xM, x);  | 
821  | 0  |     uint64_t tmp[4U] = { 0U }; | 
822  | 0  |     fcube(y2M, xM);  | 
823  | 0  |     make_a_coeff(tmp);  | 
824  | 0  |     fmul0(tmp, tmp, xM);  | 
825  | 0  |     fadd0(y2M, tmp, y2M);  | 
826  | 0  |     make_b_coeff(tmp);  | 
827  | 0  |     fadd0(y2M, tmp, y2M);  | 
828  | 0  |     fsqrt(yM, y2M);  | 
829  | 0  |     from_mont(y, yM);  | 
830  | 0  |     fsqr0(yM, yM);  | 
831  | 0  |     uint64_t r = feq_mask(yM, y2M);  | 
832  | 0  |     bool is_y_valid = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
833  | 0  |     bool is_y_valid0 = is_y_valid;  | 
834  | 0  |     if (!is_y_valid0) { | 
835  | 0  |         return false;  | 
836  | 0  |     }  | 
837  | 0  |     uint64_t is_y_odd1 = y[0U] & (uint64_t)1U;  | 
838  | 0  |     bool is_y_odd2 = is_y_odd1 == (uint64_t)1U;  | 
839  | 0  |     fnegate_conditional_vartime(y, is_y_odd2 != is_y_odd);  | 
840  | 0  |     return true;  | 
841  | 0  | }  | 
842  |  |  | 
843  |  | static inline void  | 
844  |  | point_double(uint64_t *res, uint64_t *p)  | 
845  | 0  | { | 
846  | 0  |     uint64_t tmp[20U] = { 0U }; | 
847  | 0  |     uint64_t *x = p;  | 
848  | 0  |     uint64_t *z = p + (uint32_t)8U;  | 
849  | 0  |     uint64_t *x3 = res;  | 
850  | 0  |     uint64_t *y3 = res + (uint32_t)4U;  | 
851  | 0  |     uint64_t *z3 = res + (uint32_t)8U;  | 
852  | 0  |     uint64_t *t0 = tmp;  | 
853  | 0  |     uint64_t *t1 = tmp + (uint32_t)4U;  | 
854  | 0  |     uint64_t *t2 = tmp + (uint32_t)8U;  | 
855  | 0  |     uint64_t *t3 = tmp + (uint32_t)12U;  | 
856  | 0  |     uint64_t *t4 = tmp + (uint32_t)16U;  | 
857  | 0  |     uint64_t *x1 = p;  | 
858  | 0  |     uint64_t *y = p + (uint32_t)4U;  | 
859  | 0  |     uint64_t *z1 = p + (uint32_t)8U;  | 
860  | 0  |     fsqr0(t0, x1);  | 
861  | 0  |     fsqr0(t1, y);  | 
862  | 0  |     fsqr0(t2, z1);  | 
863  | 0  |     fmul0(t3, x1, y);  | 
864  | 0  |     fadd0(t3, t3, t3);  | 
865  | 0  |     fmul0(t4, y, z1);  | 
866  | 0  |     fmul0(z3, x, z);  | 
867  | 0  |     fadd0(z3, z3, z3);  | 
868  | 0  |     fmul_by_b_coeff(y3, t2);  | 
869  | 0  |     fsub0(y3, y3, z3);  | 
870  | 0  |     fadd0(x3, y3, y3);  | 
871  | 0  |     fadd0(y3, x3, y3);  | 
872  | 0  |     fsub0(x3, t1, y3);  | 
873  | 0  |     fadd0(y3, t1, y3);  | 
874  | 0  |     fmul0(y3, x3, y3);  | 
875  | 0  |     fmul0(x3, x3, t3);  | 
876  | 0  |     fadd0(t3, t2, t2);  | 
877  | 0  |     fadd0(t2, t2, t3);  | 
878  | 0  |     fmul_by_b_coeff(z3, z3);  | 
879  | 0  |     fsub0(z3, z3, t2);  | 
880  | 0  |     fsub0(z3, z3, t0);  | 
881  | 0  |     fadd0(t3, z3, z3);  | 
882  | 0  |     fadd0(z3, z3, t3);  | 
883  | 0  |     fadd0(t3, t0, t0);  | 
884  | 0  |     fadd0(t0, t3, t0);  | 
885  | 0  |     fsub0(t0, t0, t2);  | 
886  | 0  |     fmul0(t0, t0, z3);  | 
887  | 0  |     fadd0(y3, y3, t0);  | 
888  | 0  |     fadd0(t0, t4, t4);  | 
889  | 0  |     fmul0(z3, t0, z3);  | 
890  | 0  |     fsub0(x3, x3, z3);  | 
891  | 0  |     fmul0(z3, t0, t1);  | 
892  | 0  |     fadd0(z3, z3, z3);  | 
893  | 0  |     fadd0(z3, z3, z3);  | 
894  | 0  | }  | 
895  |  |  | 
896  |  | static inline void  | 
897  |  | point_add(uint64_t *res, uint64_t *p, uint64_t *q)  | 
898  | 0  | { | 
899  | 0  |     uint64_t tmp[36U] = { 0U }; | 
900  | 0  |     uint64_t *t0 = tmp;  | 
901  | 0  |     uint64_t *t1 = tmp + (uint32_t)24U;  | 
902  | 0  |     uint64_t *x3 = t1;  | 
903  | 0  |     uint64_t *y3 = t1 + (uint32_t)4U;  | 
904  | 0  |     uint64_t *z3 = t1 + (uint32_t)8U;  | 
905  | 0  |     uint64_t *t01 = t0;  | 
906  | 0  |     uint64_t *t11 = t0 + (uint32_t)4U;  | 
907  | 0  |     uint64_t *t2 = t0 + (uint32_t)8U;  | 
908  | 0  |     uint64_t *t3 = t0 + (uint32_t)12U;  | 
909  | 0  |     uint64_t *t4 = t0 + (uint32_t)16U;  | 
910  | 0  |     uint64_t *t5 = t0 + (uint32_t)20U;  | 
911  | 0  |     uint64_t *x1 = p;  | 
912  | 0  |     uint64_t *y1 = p + (uint32_t)4U;  | 
913  | 0  |     uint64_t *z10 = p + (uint32_t)8U;  | 
914  | 0  |     uint64_t *x20 = q;  | 
915  | 0  |     uint64_t *y20 = q + (uint32_t)4U;  | 
916  | 0  |     uint64_t *z20 = q + (uint32_t)8U;  | 
917  | 0  |     fmul0(t01, x1, x20);  | 
918  | 0  |     fmul0(t11, y1, y20);  | 
919  | 0  |     fmul0(t2, z10, z20);  | 
920  | 0  |     fadd0(t3, x1, y1);  | 
921  | 0  |     fadd0(t4, x20, y20);  | 
922  | 0  |     fmul0(t3, t3, t4);  | 
923  | 0  |     fadd0(t4, t01, t11);  | 
924  | 0  |     uint64_t *y10 = p + (uint32_t)4U;  | 
925  | 0  |     uint64_t *z11 = p + (uint32_t)8U;  | 
926  | 0  |     uint64_t *y2 = q + (uint32_t)4U;  | 
927  | 0  |     uint64_t *z21 = q + (uint32_t)8U;  | 
928  | 0  |     fsub0(t3, t3, t4);  | 
929  | 0  |     fadd0(t4, y10, z11);  | 
930  | 0  |     fadd0(t5, y2, z21);  | 
931  | 0  |     fmul0(t4, t4, t5);  | 
932  | 0  |     fadd0(t5, t11, t2);  | 
933  | 0  |     fsub0(t4, t4, t5);  | 
934  | 0  |     uint64_t *x10 = p;  | 
935  | 0  |     uint64_t *z1 = p + (uint32_t)8U;  | 
936  | 0  |     uint64_t *x2 = q;  | 
937  | 0  |     uint64_t *z2 = q + (uint32_t)8U;  | 
938  | 0  |     fadd0(x3, x10, z1);  | 
939  | 0  |     fadd0(y3, x2, z2);  | 
940  | 0  |     fmul0(x3, x3, y3);  | 
941  | 0  |     fadd0(y3, t01, t2);  | 
942  | 0  |     fsub0(y3, x3, y3);  | 
943  | 0  |     fmul_by_b_coeff(z3, t2);  | 
944  | 0  |     fsub0(x3, y3, z3);  | 
945  | 0  |     fadd0(z3, x3, x3);  | 
946  | 0  |     fadd0(x3, x3, z3);  | 
947  | 0  |     fsub0(z3, t11, x3);  | 
948  | 0  |     fadd0(x3, t11, x3);  | 
949  | 0  |     fmul_by_b_coeff(y3, y3);  | 
950  | 0  |     fadd0(t11, t2, t2);  | 
951  | 0  |     fadd0(t2, t11, t2);  | 
952  | 0  |     fsub0(y3, y3, t2);  | 
953  | 0  |     fsub0(y3, y3, t01);  | 
954  | 0  |     fadd0(t11, y3, y3);  | 
955  | 0  |     fadd0(y3, t11, y3);  | 
956  | 0  |     fadd0(t11, t01, t01);  | 
957  | 0  |     fadd0(t01, t11, t01);  | 
958  | 0  |     fsub0(t01, t01, t2);  | 
959  | 0  |     fmul0(t11, t4, y3);  | 
960  | 0  |     fmul0(t2, t01, y3);  | 
961  | 0  |     fmul0(y3, x3, z3);  | 
962  | 0  |     fadd0(y3, y3, t2);  | 
963  | 0  |     fmul0(x3, t3, x3);  | 
964  | 0  |     fsub0(x3, x3, t11);  | 
965  | 0  |     fmul0(z3, t4, z3);  | 
966  | 0  |     fmul0(t11, t3, t01);  | 
967  | 0  |     fadd0(z3, z3, t11);  | 
968  | 0  |     memcpy(res, t1, (uint32_t)12U * sizeof(uint64_t));  | 
969  | 0  | }  | 
970  |  |  | 
971  |  | static inline void  | 
972  |  | point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)  | 
973  | 0  | { | 
974  | 0  |     uint64_t table[192U] = { 0U }; | 
975  | 0  |     uint64_t tmp[12U] = { 0U }; | 
976  | 0  |     uint64_t *t0 = table;  | 
977  | 0  |     uint64_t *t1 = table + (uint32_t)12U;  | 
978  | 0  |     make_point_at_inf(t0);  | 
979  | 0  |     memcpy(t1, p, (uint32_t)12U * sizeof(uint64_t));  | 
980  | 0  |     KRML_MAYBE_FOR7(i,  | 
981  | 0  |                     (uint32_t)0U,  | 
982  | 0  |                     (uint32_t)7U,  | 
983  | 0  |                     (uint32_t)1U,  | 
984  | 0  |                     uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)12U;  | 
985  | 0  |                     point_double(tmp, t11);  | 
986  | 0  |                     memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,  | 
987  | 0  |                            tmp,  | 
988  | 0  |                            (uint32_t)12U * sizeof(uint64_t));  | 
989  | 0  |                     uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;  | 
990  | 0  |                     point_add(tmp, p, t2);  | 
991  | 0  |                     memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,  | 
992  | 0  |                            tmp,  | 
993  | 0  |                            (uint32_t)12U * sizeof(uint64_t)););  | 
994  | 0  |     make_point_at_inf(res);  | 
995  | 0  |     uint64_t tmp0[12U] = { 0U }; | 
996  | 0  |     for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) { | 
997  | 0  |         KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););  | 
998  | 0  |         uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;  | 
999  | 0  |         uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);  | 
1000  | 0  |         memcpy(tmp0, (uint64_t *)table, (uint32_t)12U * sizeof(uint64_t));  | 
1001  | 0  |         KRML_MAYBE_FOR15(i1,  | 
1002  | 0  |                          (uint32_t)0U,  | 
1003  | 0  |                          (uint32_t)15U,  | 
1004  | 0  |                          (uint32_t)1U,  | 
1005  | 0  |                          uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));  | 
1006  | 0  |                          const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)12U;  | 
1007  | 0  |                          KRML_MAYBE_FOR12(i,  | 
1008  | 0  |                                           (uint32_t)0U,  | 
1009  | 0  |                                           (uint32_t)12U,  | 
1010  | 0  |                                           (uint32_t)1U,  | 
1011  | 0  |                                           uint64_t *os = tmp0;  | 
1012  | 0  |                                           uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);  | 
1013  | 0  |                                           os[i] = x;););  | 
1014  | 0  |         point_add(res, res, tmp0);  | 
1015  | 0  |     }  | 
1016  | 0  | }  | 
1017  |  |  | 
1018  |  | static inline void  | 
1019  |  | precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)  | 
1020  | 0  | { | 
1021  | 0  |     memcpy(tmp, (uint64_t *)table, (uint32_t)12U * sizeof(uint64_t));  | 
1022  | 0  |     KRML_MAYBE_FOR15(i0,  | 
1023  | 0  |                      (uint32_t)0U,  | 
1024  | 0  |                      (uint32_t)15U,  | 
1025  | 0  |                      (uint32_t)1U,  | 
1026  | 0  |                      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));  | 
1027  | 0  |                      const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)12U;  | 
1028  | 0  |                      KRML_MAYBE_FOR12(i,  | 
1029  | 0  |                                       (uint32_t)0U,  | 
1030  | 0  |                                       (uint32_t)12U,  | 
1031  | 0  |                                       (uint32_t)1U,  | 
1032  | 0  |                                       uint64_t *os = tmp;  | 
1033  | 0  |                                       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);  | 
1034  | 0  |                                       os[i] = x;););  | 
1035  | 0  | }  | 
1036  |  |  | 
1037  |  | static inline void  | 
1038  |  | point_mul_g(uint64_t *res, uint64_t *scalar)  | 
1039  | 0  | { | 
1040  | 0  |     uint64_t q1[12U] = { 0U }; | 
1041  | 0  |     make_base_point(q1);  | 
1042  | 0  |     uint64_t  | 
1043  | 0  |         q2[12U] = { | 
1044  | 0  |             (uint64_t)1499621593102562565U, (uint64_t)16692369783039433128U,  | 
1045  | 0  |             (uint64_t)15337520135922861848U, (uint64_t)5455737214495366228U,  | 
1046  | 0  |             (uint64_t)17827017231032529600U, (uint64_t)12413621606240782649U,  | 
1047  | 0  |             (uint64_t)2290483008028286132U, (uint64_t)15752017553340844820U,  | 
1048  | 0  |             (uint64_t)4846430910634234874U, (uint64_t)10861682798464583253U,  | 
1049  | 0  |             (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U  | 
1050  | 0  |         };  | 
1051  | 0  |     uint64_t  | 
1052  | 0  |         q3[12U] = { | 
1053  | 0  |             (uint64_t)14619254753077084366U, (uint64_t)13913835116514008593U,  | 
1054  | 0  |             (uint64_t)15060744674088488145U, (uint64_t)17668414598203068685U,  | 
1055  | 0  |             (uint64_t)10761169236902342334U, (uint64_t)15467027479157446221U,  | 
1056  | 0  |             (uint64_t)14989185522423469618U, (uint64_t)14354539272510107003U,  | 
1057  | 0  |             (uint64_t)14298211796392133693U, (uint64_t)13270323784253711450U,  | 
1058  | 0  |             (uint64_t)13380964971965046957U, (uint64_t)8686204248456909699U  | 
1059  | 0  |         };  | 
1060  | 0  |     uint64_t  | 
1061  | 0  |         q4[12U] = { | 
1062  | 0  |             (uint64_t)7870395003430845958U, (uint64_t)18001862936410067720U,  | 
1063  | 0  |             (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,  | 
1064  | 0  |             (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U,  | 
1065  | 0  |             (uint64_t)7139806720777708306U, (uint64_t)8253938546650739833U,  | 
1066  | 0  |             (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,  | 
1067  | 0  |             (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U  | 
1068  | 0  |         };  | 
1069  | 0  |     uint64_t *r1 = scalar;  | 
1070  | 0  |     uint64_t *r2 = scalar + (uint32_t)1U;  | 
1071  | 0  |     uint64_t *r3 = scalar + (uint32_t)2U;  | 
1072  | 0  |     uint64_t *r4 = scalar + (uint32_t)3U;  | 
1073  | 0  |     make_point_at_inf(res);  | 
1074  | 0  |     uint64_t tmp[12U] = { 0U }; | 
1075  | 0  |     KRML_MAYBE_FOR16(i,  | 
1076  | 0  |                      (uint32_t)0U,  | 
1077  | 0  |                      (uint32_t)16U,  | 
1078  | 0  |                      (uint32_t)1U,  | 
1079  | 0  |                      KRML_MAYBE_FOR4(i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););  | 
1080  | 0  |                      uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;  | 
1081  | 0  |                      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);  | 
1082  | 0  |                      precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);  | 
1083  | 0  |                      point_add(res, res, tmp);  | 
1084  | 0  |                      uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;  | 
1085  | 0  |                      uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);  | 
1086  | 0  |                      precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);  | 
1087  | 0  |                      point_add(res, res, tmp);  | 
1088  | 0  |                      uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;  | 
1089  | 0  |                      uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);  | 
1090  | 0  |                      precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);  | 
1091  | 0  |                      point_add(res, res, tmp);  | 
1092  | 0  |                      uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;  | 
1093  | 0  |                      uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);  | 
1094  | 0  |                      precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);  | 
1095  | 0  |                      point_add(res, res, tmp););  | 
1096  | 0  |     KRML_HOST_IGNORE(q1);  | 
1097  | 0  |     KRML_HOST_IGNORE(q2);  | 
1098  | 0  |     KRML_HOST_IGNORE(q3);  | 
1099  | 0  |     KRML_HOST_IGNORE(q4);  | 
1100  | 0  | }  | 
1101  |  |  | 
1102  |  | static inline void  | 
1103  |  | point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t *q2)  | 
1104  | 0  | { | 
1105  | 0  |     uint64_t q1[12U] = { 0U }; | 
1106  | 0  |     make_base_point(q1);  | 
1107  | 0  |     uint64_t table2[384U] = { 0U }; | 
1108  | 0  |     uint64_t tmp[12U] = { 0U }; | 
1109  | 0  |     uint64_t *t0 = table2;  | 
1110  | 0  |     uint64_t *t1 = table2 + (uint32_t)12U;  | 
1111  | 0  |     make_point_at_inf(t0);  | 
1112  | 0  |     memcpy(t1, q2, (uint32_t)12U * sizeof(uint64_t));  | 
1113  | 0  |     KRML_MAYBE_FOR15(i,  | 
1114  | 0  |                      (uint32_t)0U,  | 
1115  | 0  |                      (uint32_t)15U,  | 
1116  | 0  |                      (uint32_t)1U,  | 
1117  | 0  |                      uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)12U;  | 
1118  | 0  |                      point_double(tmp, t11);  | 
1119  | 0  |                      memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,  | 
1120  | 0  |                             tmp,  | 
1121  | 0  |                             (uint32_t)12U * sizeof(uint64_t));  | 
1122  | 0  |                      uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;  | 
1123  | 0  |                      point_add(tmp, q2, t2);  | 
1124  | 0  |                      memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,  | 
1125  | 0  |                             tmp,  | 
1126  | 0  |                             (uint32_t)12U * sizeof(uint64_t)););  | 
1127  | 0  |     uint64_t tmp0[12U] = { 0U }; | 
1128  | 0  |     uint32_t i0 = (uint32_t)255U;  | 
1129  | 0  |     uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);  | 
1130  | 0  |     uint32_t bits_l32 = (uint32_t)bits_c;  | 
1131  | 0  |     const uint64_t  | 
1132  | 0  |         *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)12U;  | 
1133  | 0  |     memcpy(res, (uint64_t *)a_bits_l, (uint32_t)12U * sizeof(uint64_t));  | 
1134  | 0  |     uint32_t i1 = (uint32_t)255U;  | 
1135  | 0  |     uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);  | 
1136  | 0  |     uint32_t bits_l320 = (uint32_t)bits_c0;  | 
1137  | 0  |     const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)12U;  | 
1138  | 0  |     memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)12U * sizeof(uint64_t));  | 
1139  | 0  |     point_add(res, res, tmp0);  | 
1140  | 0  |     uint64_t tmp1[12U] = { 0U }; | 
1141  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++) { | 
1142  | 0  |         KRML_MAYBE_FOR5(i2, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, point_double(res, res););  | 
1143  | 0  |         uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;  | 
1144  | 0  |         uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);  | 
1145  | 0  |         uint32_t bits_l321 = (uint32_t)bits_l;  | 
1146  | 0  |         const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)12U;  | 
1147  | 0  |         memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)12U * sizeof(uint64_t));  | 
1148  | 0  |         point_add(res, res, tmp1);  | 
1149  | 0  |         uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;  | 
1150  | 0  |         uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);  | 
1151  | 0  |         uint32_t bits_l322 = (uint32_t)bits_l0;  | 
1152  | 0  |         const uint64_t  | 
1153  | 0  |             *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)12U;  | 
1154  | 0  |         memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)12U * sizeof(uint64_t));  | 
1155  | 0  |         point_add(res, res, tmp1);  | 
1156  | 0  |     }  | 
1157  | 0  | }  | 
1158  |  |  | 
1159  |  | static inline uint64_t  | 
1160  |  | bn_is_lt_order_mask4(uint64_t *f)  | 
1161  | 0  | { | 
1162  | 0  |     uint64_t tmp[4U] = { 0U }; | 
1163  | 0  |     make_order(tmp);  | 
1164  | 0  |     uint64_t c = bn_sub4(tmp, f, tmp);  | 
1165  | 0  |     return (uint64_t)0U - c;  | 
1166  | 0  | }  | 
1167  |  |  | 
1168  |  | static inline uint64_t  | 
1169  |  | bn_is_lt_order_and_gt_zero_mask4(uint64_t *f)  | 
1170  | 0  | { | 
1171  | 0  |     uint64_t is_lt_order = bn_is_lt_order_mask4(f);  | 
1172  | 0  |     uint64_t is_eq_zero = bn_is_zero_mask4(f);  | 
1173  | 0  |     return is_lt_order & ~is_eq_zero;  | 
1174  | 0  | }  | 
1175  |  |  | 
1176  |  | static inline void  | 
1177  |  | qmod_short(uint64_t *res, uint64_t *x)  | 
1178  | 0  | { | 
1179  | 0  |     uint64_t tmp[4U] = { 0U }; | 
1180  | 0  |     make_order(tmp);  | 
1181  | 0  |     uint64_t c = bn_sub4(tmp, x, tmp);  | 
1182  | 0  |     bn_cmovznz4(res, c, tmp, x);  | 
1183  | 0  | }  | 
1184  |  |  | 
1185  |  | static inline void  | 
1186  |  | qadd(uint64_t *res, uint64_t *x, uint64_t *y)  | 
1187  | 0  | { | 
1188  | 0  |     uint64_t n[4U] = { 0U }; | 
1189  | 0  |     make_order(n);  | 
1190  | 0  |     bn_add_mod4(res, n, x, y);  | 
1191  | 0  | }  | 
1192  |  |  | 
1193  |  | static inline void  | 
1194  |  | qmont_reduction(uint64_t *res, uint64_t *x)  | 
1195  | 0  | { | 
1196  | 0  |     uint64_t n[4U] = { 0U }; | 
1197  | 0  |     make_order(n);  | 
1198  | 0  |     uint64_t c0 = (uint64_t)0U;  | 
1199  | 0  |     KRML_MAYBE_FOR4(  | 
1200  | 0  |         i0,  | 
1201  | 0  |         (uint32_t)0U,  | 
1202  | 0  |         (uint32_t)4U,  | 
1203  | 0  |         (uint32_t)1U,  | 
1204  | 0  |         uint64_t qj = (uint64_t)0xccd1c8aaee00bc4fU * x[i0];  | 
1205  | 0  |         uint64_t *res_j0 = x + i0;  | 
1206  | 0  |         uint64_t c = (uint64_t)0U;  | 
1207  | 0  |         { | 
1208  | 0  |             uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];  | 
1209  | 0  |             uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;  | 
1210  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);  | 
1211  | 0  |             uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
1212  | 0  |             uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
1213  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);  | 
1214  | 0  |             uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
1215  | 0  |             uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
1216  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);  | 
1217  | 0  |             uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
1218  | 0  |             uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
1219  | 0  |             c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);  | 
1220  | 0  |         } uint64_t r = c;  | 
1221  | 0  |         uint64_t c1 = r;  | 
1222  | 0  |         uint64_t *resb = x + (uint32_t)4U + i0;  | 
1223  | 0  |         uint64_t res_j = x[(uint32_t)4U + i0];  | 
1224  | 0  |         c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););  | 
1225  | 0  |     memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof(uint64_t));  | 
1226  | 0  |     uint64_t c00 = c0;  | 
1227  | 0  |     uint64_t tmp[4U] = { 0U }; | 
1228  | 0  |     uint64_t c = (uint64_t)0U;  | 
1229  | 0  |     { | 
1230  | 0  |         uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];  | 
1231  | 0  |         uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];  | 
1232  | 0  |         uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;  | 
1233  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);  | 
1234  | 0  |         uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
1235  | 0  |         uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];  | 
1236  | 0  |         uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;  | 
1237  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);  | 
1238  | 0  |         uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
1239  | 0  |         uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];  | 
1240  | 0  |         uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;  | 
1241  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);  | 
1242  | 0  |         uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
1243  | 0  |         uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];  | 
1244  | 0  |         uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;  | 
1245  | 0  |         c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);  | 
1246  | 0  |     }  | 
1247  | 0  |     uint64_t c1 = c;  | 
1248  | 0  |     uint64_t c2 = c00 - c1;  | 
1249  | 0  |     KRML_MAYBE_FOR4(i,  | 
1250  | 0  |                     (uint32_t)0U,  | 
1251  | 0  |                     (uint32_t)4U,  | 
1252  | 0  |                     (uint32_t)1U,  | 
1253  | 0  |                     uint64_t *os = res;  | 
1254  | 0  |                     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);  | 
1255  | 0  |                     os[i] = x1;);  | 
1256  | 0  | }  | 
1257  |  |  | 
1258  |  | static inline void  | 
1259  |  | from_qmont(uint64_t *res, uint64_t *x)  | 
1260  | 0  | { | 
1261  | 0  |     uint64_t tmp[8U] = { 0U }; | 
1262  | 0  |     memcpy(tmp, x, (uint32_t)4U * sizeof(uint64_t));  | 
1263  | 0  |     qmont_reduction(res, tmp);  | 
1264  | 0  | }  | 
1265  |  |  | 
1266  |  | static inline void  | 
1267  |  | qmul(uint64_t *res, uint64_t *x, uint64_t *y)  | 
1268  | 0  | { | 
1269  | 0  |     uint64_t tmp[8U] = { 0U }; | 
1270  | 0  |     bn_mul4(tmp, x, y);  | 
1271  | 0  |     qmont_reduction(res, tmp);  | 
1272  | 0  | }  | 
1273  |  |  | 
1274  |  | static inline void  | 
1275  |  | qsqr(uint64_t *res, uint64_t *x)  | 
1276  | 0  | { | 
1277  | 0  |     uint64_t tmp[8U] = { 0U }; | 
1278  | 0  |     bn_sqr4(tmp, x);  | 
1279  | 0  |     qmont_reduction(res, tmp);  | 
1280  | 0  | }  | 
1281  |  |  | 
1282  |  | bool  | 
1283  |  | Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)  | 
1284  | 0  | { | 
1285  | 0  |     uint64_t tmp[16U] = { 0U }; | 
1286  | 0  |     uint64_t *sk = tmp;  | 
1287  | 0  |     uint64_t *pk = tmp + (uint32_t)4U;  | 
1288  | 0  |     bn_from_bytes_be4(sk, private_key);  | 
1289  | 0  |     uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);  | 
1290  | 0  |     uint64_t oneq[4U] = { 0U }; | 
1291  | 0  |     oneq[0U] = (uint64_t)1U;  | 
1292  | 0  |     oneq[1U] = (uint64_t)0U;  | 
1293  | 0  |     oneq[2U] = (uint64_t)0U;  | 
1294  | 0  |     oneq[3U] = (uint64_t)0U;  | 
1295  | 0  |     KRML_MAYBE_FOR4(i,  | 
1296  | 0  |                     (uint32_t)0U,  | 
1297  | 0  |                     (uint32_t)4U,  | 
1298  | 0  |                     (uint32_t)1U,  | 
1299  | 0  |                     uint64_t *os = sk;  | 
1300  | 0  |                     uint64_t uu____0 = oneq[i];  | 
1301  | 0  |                     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));  | 
1302  | 0  |                     os[i] = x;);  | 
1303  | 0  |     uint64_t is_sk_valid = is_b_valid;  | 
1304  | 0  |     point_mul_g(pk, sk);  | 
1305  | 0  |     point_store(public_key, pk);  | 
1306  | 0  |     return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
1307  | 0  | }  | 
1308  |  |  | 
1309  |  | bool  | 
1310  |  | Hacl_Impl_P256_DH_ecp256dh_r(  | 
1311  |  |     uint8_t *shared_secret,  | 
1312  |  |     uint8_t *their_pubkey,  | 
1313  |  |     uint8_t *private_key)  | 
1314  | 0  | { | 
1315  | 0  |     uint64_t tmp[16U] = { 0U }; | 
1316  | 0  |     uint64_t *sk = tmp;  | 
1317  | 0  |     uint64_t *pk = tmp + (uint32_t)4U;  | 
1318  | 0  |     bool is_pk_valid = load_point_vartime(pk, their_pubkey);  | 
1319  | 0  |     bn_from_bytes_be4(sk, private_key);  | 
1320  | 0  |     uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);  | 
1321  | 0  |     uint64_t oneq[4U] = { 0U }; | 
1322  | 0  |     oneq[0U] = (uint64_t)1U;  | 
1323  | 0  |     oneq[1U] = (uint64_t)0U;  | 
1324  | 0  |     oneq[2U] = (uint64_t)0U;  | 
1325  | 0  |     oneq[3U] = (uint64_t)0U;  | 
1326  | 0  |     KRML_MAYBE_FOR4(i,  | 
1327  | 0  |                     (uint32_t)0U,  | 
1328  | 0  |                     (uint32_t)4U,  | 
1329  | 0  |                     (uint32_t)1U,  | 
1330  | 0  |                     uint64_t *os = sk;  | 
1331  | 0  |                     uint64_t uu____0 = oneq[i];  | 
1332  | 0  |                     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));  | 
1333  | 0  |                     os[i] = x;);  | 
1334  | 0  |     uint64_t is_sk_valid = is_b_valid;  | 
1335  | 0  |     uint64_t ss_proj[12U] = { 0U }; | 
1336  | 0  |     if (is_pk_valid) { | 
1337  | 0  |         point_mul(ss_proj, sk, pk);  | 
1338  | 0  |         point_store(shared_secret, ss_proj);  | 
1339  | 0  |     }  | 
1340  | 0  |     return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;  | 
1341  | 0  | }  | 
1342  |  |  | 
1343  |  | static inline void  | 
1344  |  | qinv(uint64_t *res, uint64_t *r)  | 
1345  | 0  | { | 
1346  | 0  |     uint64_t tmp[28U] = { 0U }; | 
1347  | 0  |     uint64_t *x6 = tmp;  | 
1348  | 0  |     uint64_t *x_11 = tmp + (uint32_t)4U;  | 
1349  | 0  |     uint64_t *x_101 = tmp + (uint32_t)8U;  | 
1350  | 0  |     uint64_t *x_111 = tmp + (uint32_t)12U;  | 
1351  | 0  |     uint64_t *x_1111 = tmp + (uint32_t)16U;  | 
1352  | 0  |     uint64_t *x_10101 = tmp + (uint32_t)20U;  | 
1353  | 0  |     uint64_t *x_101111 = tmp + (uint32_t)24U;  | 
1354  | 0  |     memcpy(x6, r, (uint32_t)4U * sizeof(uint64_t));  | 
1355  | 0  |     { | 
1356  | 0  |         qsqr(x6, x6);  | 
1357  | 0  |     }  | 
1358  | 0  |     qmul(x_11, x6, r);  | 
1359  | 0  |     qmul(x_101, x6, x_11);  | 
1360  | 0  |     qmul(x_111, x6, x_101);  | 
1361  | 0  |     memcpy(x6, x_101, (uint32_t)4U * sizeof(uint64_t));  | 
1362  | 0  |     { | 
1363  | 0  |         qsqr(x6, x6);  | 
1364  | 0  |     }  | 
1365  | 0  |     qmul(x_1111, x_101, x6);  | 
1366  | 0  |     { | 
1367  | 0  |         qsqr(x6, x6);  | 
1368  | 0  |     }  | 
1369  | 0  |     qmul(x_10101, x6, r);  | 
1370  | 0  |     memcpy(x6, x_10101, (uint32_t)4U * sizeof(uint64_t));  | 
1371  | 0  |     { | 
1372  | 0  |         qsqr(x6, x6);  | 
1373  | 0  |     }  | 
1374  | 0  |     qmul(x_101111, x_101, x6);  | 
1375  | 0  |     qmul(x6, x_10101, x6);  | 
1376  | 0  |     uint64_t tmp1[4U] = { 0U }; | 
1377  | 0  |     KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(x6, x6););  | 
1378  | 0  |     qmul(x6, x6, x_11);  | 
1379  | 0  |     memcpy(tmp1, x6, (uint32_t)4U * sizeof(uint64_t));  | 
1380  | 0  |     KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1381  | 0  |     qmul(tmp1, tmp1, x6);  | 
1382  | 0  |     memcpy(x6, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
1383  | 0  |     KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, qsqr(x6, x6););  | 
1384  | 0  |     qmul(x6, x6, tmp1);  | 
1385  | 0  |     memcpy(tmp1, x6, (uint32_t)4U * sizeof(uint64_t));  | 
1386  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++) { | 
1387  | 0  |         qsqr(tmp1, tmp1);  | 
1388  | 0  |     }  | 
1389  | 0  |     qmul(tmp1, tmp1, x6);  | 
1390  | 0  |     for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { | 
1391  | 0  |         qsqr(tmp1, tmp1);  | 
1392  | 0  |     }  | 
1393  | 0  |     qmul(tmp1, tmp1, x6);  | 
1394  | 0  |     KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1395  | 0  |     qmul(tmp1, tmp1, x_101111);  | 
1396  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1397  | 0  |     qmul(tmp1, tmp1, x_111);  | 
1398  | 0  |     KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1399  | 0  |     qmul(tmp1, tmp1, x_11);  | 
1400  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1401  | 0  |     qmul(tmp1, tmp1, x_1111);  | 
1402  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1403  | 0  |     qmul(tmp1, tmp1, x_10101);  | 
1404  | 0  |     KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1405  | 0  |     qmul(tmp1, tmp1, x_101);  | 
1406  | 0  |     KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1407  | 0  |     qmul(tmp1, tmp1, x_101);  | 
1408  | 0  |     KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1409  | 0  |     qmul(tmp1, tmp1, x_101);  | 
1410  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1411  | 0  |     qmul(tmp1, tmp1, x_111);  | 
1412  | 0  |     KRML_MAYBE_FOR9(i, (uint32_t)0U, (uint32_t)9U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1413  | 0  |     qmul(tmp1, tmp1, x_101111);  | 
1414  | 0  |     KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1415  | 0  |     qmul(tmp1, tmp1, x_1111);  | 
1416  | 0  |     KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1417  | 0  |     qmul(tmp1, tmp1, r);  | 
1418  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1419  | 0  |     qmul(tmp1, tmp1, r);  | 
1420  | 0  |     KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1421  | 0  |     qmul(tmp1, tmp1, x_1111);  | 
1422  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1423  | 0  |     qmul(tmp1, tmp1, x_111);  | 
1424  | 0  |     KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1425  | 0  |     qmul(tmp1, tmp1, x_111);  | 
1426  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1427  | 0  |     qmul(tmp1, tmp1, x_111);  | 
1428  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1429  | 0  |     qmul(tmp1, tmp1, x_101);  | 
1430  | 0  |     KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1431  | 0  |     qmul(tmp1, tmp1, x_11);  | 
1432  | 0  |     KRML_MAYBE_FOR10(i, (uint32_t)0U, (uint32_t)10U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1433  | 0  |     qmul(tmp1, tmp1, x_101111);  | 
1434  | 0  |     KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1435  | 0  |     qmul(tmp1, tmp1, x_11);  | 
1436  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1437  | 0  |     qmul(tmp1, tmp1, x_11);  | 
1438  | 0  |     KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1439  | 0  |     qmul(tmp1, tmp1, x_11);  | 
1440  | 0  |     KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1441  | 0  |     qmul(tmp1, tmp1, r);  | 
1442  | 0  |     KRML_MAYBE_FOR7(i, (uint32_t)0U, (uint32_t)7U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1443  | 0  |     qmul(tmp1, tmp1, x_10101);  | 
1444  | 0  |     KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););  | 
1445  | 0  |     qmul(tmp1, tmp1, x_1111);  | 
1446  | 0  |     memcpy(x6, tmp1, (uint32_t)4U * sizeof(uint64_t));  | 
1447  | 0  |     memcpy(res, x6, (uint32_t)4U * sizeof(uint64_t));  | 
1448  | 0  | }  | 
1449  |  |  | 
1450  |  | static inline void  | 
1451  |  | qmul_mont(uint64_t *sinv, uint64_t *b, uint64_t *res)  | 
1452  | 0  | { | 
1453  | 0  |     uint64_t tmp[4U] = { 0U }; | 
1454  | 0  |     from_qmont(tmp, b);  | 
1455  | 0  |     qmul(res, sinv, tmp);  | 
1456  | 0  | }  | 
1457  |  |  | 
1458  |  | static inline bool  | 
1459  |  | ecdsa_verify_msg_as_qelem(  | 
1460  |  |     uint64_t *m_q,  | 
1461  |  |     uint8_t *public_key,  | 
1462  |  |     uint8_t *signature_r,  | 
1463  |  |     uint8_t *signature_s)  | 
1464  | 0  | { | 
1465  | 0  |     uint64_t tmp[28U] = { 0U }; | 
1466  | 0  |     uint64_t *pk = tmp;  | 
1467  | 0  |     uint64_t *r_q = tmp + (uint32_t)12U;  | 
1468  | 0  |     uint64_t *s_q = tmp + (uint32_t)16U;  | 
1469  | 0  |     uint64_t *u1 = tmp + (uint32_t)20U;  | 
1470  | 0  |     uint64_t *u2 = tmp + (uint32_t)24U;  | 
1471  | 0  |     bool is_pk_valid = load_point_vartime(pk, public_key);  | 
1472  | 0  |     bn_from_bytes_be4(r_q, signature_r);  | 
1473  | 0  |     bn_from_bytes_be4(s_q, signature_s);  | 
1474  | 0  |     uint64_t is_r_valid = bn_is_lt_order_and_gt_zero_mask4(r_q);  | 
1475  | 0  |     uint64_t is_s_valid = bn_is_lt_order_and_gt_zero_mask4(s_q);  | 
1476  | 0  |     bool  | 
1477  | 0  |         is_rs_valid =  | 
1478  | 0  |             is_r_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_s_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
1479  | 0  |     if (!(is_pk_valid && is_rs_valid)) { | 
1480  | 0  |         return false;  | 
1481  | 0  |     }  | 
1482  | 0  |     uint64_t sinv[4U] = { 0U }; | 
1483  | 0  |     qinv(sinv, s_q);  | 
1484  | 0  |     qmul_mont(sinv, m_q, u1);  | 
1485  | 0  |     qmul_mont(sinv, r_q, u2);  | 
1486  | 0  |     uint64_t res[12U] = { 0U }; | 
1487  | 0  |     point_mul_double_g(res, u1, u2, pk);  | 
1488  | 0  |     if (is_point_at_inf_vartime(res)) { | 
1489  | 0  |         return false;  | 
1490  | 0  |     }  | 
1491  | 0  |     uint64_t x[4U] = { 0U }; | 
1492  | 0  |     to_aff_point_x(x, res);  | 
1493  | 0  |     qmod_short(x, x);  | 
1494  | 0  |     bool res1 = bn_is_eq_vartime4(x, r_q);  | 
1495  | 0  |     return res1;  | 
1496  | 0  | }  | 
1497  |  |  | 
1498  |  | static inline bool  | 
1499  |  | ecdsa_sign_msg_as_qelem(  | 
1500  |  |     uint8_t *signature,  | 
1501  |  |     uint64_t *m_q,  | 
1502  |  |     uint8_t *private_key,  | 
1503  |  |     uint8_t *nonce)  | 
1504  | 0  | { | 
1505  | 0  |     uint64_t rsdk_q[16U] = { 0U }; | 
1506  | 0  |     uint64_t *r_q = rsdk_q;  | 
1507  | 0  |     uint64_t *s_q = rsdk_q + (uint32_t)4U;  | 
1508  | 0  |     uint64_t *d_a = rsdk_q + (uint32_t)8U;  | 
1509  | 0  |     uint64_t *k_q = rsdk_q + (uint32_t)12U;  | 
1510  | 0  |     bn_from_bytes_be4(d_a, private_key);  | 
1511  | 0  |     uint64_t is_b_valid0 = bn_is_lt_order_and_gt_zero_mask4(d_a);  | 
1512  | 0  |     uint64_t oneq0[4U] = { 0U }; | 
1513  | 0  |     oneq0[0U] = (uint64_t)1U;  | 
1514  | 0  |     oneq0[1U] = (uint64_t)0U;  | 
1515  | 0  |     oneq0[2U] = (uint64_t)0U;  | 
1516  | 0  |     oneq0[3U] = (uint64_t)0U;  | 
1517  | 0  |     KRML_MAYBE_FOR4(i,  | 
1518  | 0  |                     (uint32_t)0U,  | 
1519  | 0  |                     (uint32_t)4U,  | 
1520  | 0  |                     (uint32_t)1U,  | 
1521  | 0  |                     uint64_t *os = d_a;  | 
1522  | 0  |                     uint64_t uu____0 = oneq0[i];  | 
1523  | 0  |                     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));  | 
1524  | 0  |                     os[i] = x;);  | 
1525  | 0  |     uint64_t is_sk_valid = is_b_valid0;  | 
1526  | 0  |     bn_from_bytes_be4(k_q, nonce);  | 
1527  | 0  |     uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(k_q);  | 
1528  | 0  |     uint64_t oneq[4U] = { 0U }; | 
1529  | 0  |     oneq[0U] = (uint64_t)1U;  | 
1530  | 0  |     oneq[1U] = (uint64_t)0U;  | 
1531  | 0  |     oneq[2U] = (uint64_t)0U;  | 
1532  | 0  |     oneq[3U] = (uint64_t)0U;  | 
1533  | 0  |     KRML_MAYBE_FOR4(i,  | 
1534  | 0  |                     (uint32_t)0U,  | 
1535  | 0  |                     (uint32_t)4U,  | 
1536  | 0  |                     (uint32_t)1U,  | 
1537  | 0  |                     uint64_t *os = k_q;  | 
1538  | 0  |                     uint64_t uu____1 = oneq[i];  | 
1539  | 0  |                     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));  | 
1540  | 0  |                     os[i] = x;);  | 
1541  | 0  |     uint64_t is_nonce_valid = is_b_valid;  | 
1542  | 0  |     uint64_t are_sk_nonce_valid = is_sk_valid & is_nonce_valid;  | 
1543  | 0  |     uint64_t p[12U] = { 0U }; | 
1544  | 0  |     point_mul_g(p, k_q);  | 
1545  | 0  |     to_aff_point_x(r_q, p);  | 
1546  | 0  |     qmod_short(r_q, r_q);  | 
1547  | 0  |     uint64_t kinv[4U] = { 0U }; | 
1548  | 0  |     qinv(kinv, k_q);  | 
1549  | 0  |     qmul(s_q, r_q, d_a);  | 
1550  | 0  |     from_qmont(m_q, m_q);  | 
1551  | 0  |     qadd(s_q, m_q, s_q);  | 
1552  | 0  |     qmul(s_q, kinv, s_q);  | 
1553  | 0  |     bn2_to_bytes_be4(signature, r_q, s_q);  | 
1554  | 0  |     uint64_t is_r_zero = bn_is_zero_mask4(r_q);  | 
1555  | 0  |     uint64_t is_s_zero = bn_is_zero_mask4(s_q);  | 
1556  | 0  |     uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);  | 
1557  | 0  |     bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
1558  | 0  |     return res;  | 
1559  | 0  | }  | 
1560  |  |  | 
1561  |  | /*******************************************************************************  | 
1562  |  |  | 
1563  |  |  Verified C library for ECDSA and ECDH functions over the P-256 NIST curve.  | 
1564  |  |  | 
1565  |  |  This module implements signing and verification, key validation, conversions  | 
1566  |  |  between various point representations, and ECDH key agreement.  | 
1567  |  |  | 
1568  |  | *******************************************************************************/  | 
1569  |  |  | 
1570  |  | /*****************/  | 
1571  |  | /* ECDSA signing */  | 
1572  |  | /*****************/  | 
1573  |  |  | 
1574  |  | /**  | 
1575  |  | Create an ECDSA signature WITHOUT hashing first.  | 
1576  |  |  | 
1577  |  |   This function is intended to receive a hash of the input.  | 
1578  |  |   For convenience, we recommend using one of the hash-and-sign combined functions above.  | 
1579  |  |  | 
1580  |  |   The argument `msg` MUST be at least 32 bytes (i.e. `msg_len >= 32`).  | 
1581  |  |  | 
1582  |  |   NOTE: The equivalent functions in OpenSSL and Fiat-Crypto both accept inputs  | 
1583  |  |   smaller than 32 bytes. These libraries left-pad the input with enough zeroes to  | 
1584  |  |   reach the minimum 32 byte size. Clients who need behavior identical to OpenSSL  | 
1585  |  |   need to perform the left-padding themselves.  | 
1586  |  |  | 
1587  |  |   The function returns `true` for successful creation of an ECDSA signature and `false` otherwise.  | 
1588  |  |  | 
1589  |  |   The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1590  |  |   The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len].  | 
1591  |  |   The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32].  | 
1592  |  |  | 
1593  |  |   The function also checks whether `private_key` and `nonce` are valid values:  | 
1594  |  |     • 0 < `private_key` < the order of the curve  | 
1595  |  |     • 0 < `nonce` < the order of the curve  | 
1596  |  | */  | 
1597  |  | bool  | 
1598  |  | Hacl_P256_ecdsa_sign_p256_without_hash(  | 
1599  |  |     uint8_t *signature,  | 
1600  |  |     uint32_t msg_len,  | 
1601  |  |     uint8_t *msg,  | 
1602  |  |     uint8_t *private_key,  | 
1603  |  |     uint8_t *nonce)  | 
1604  | 0  | { | 
1605  | 0  |     uint64_t m_q[4U] = { 0U }; | 
1606  | 0  |     uint8_t mHash[32U] = { 0U }; | 
1607  | 0  |     memcpy(mHash, msg, (uint32_t)32U * sizeof(uint8_t));  | 
1608  | 0  |     KRML_HOST_IGNORE(msg_len);  | 
1609  | 0  |     uint8_t *mHash32 = mHash;  | 
1610  | 0  |     bn_from_bytes_be4(m_q, mHash32);  | 
1611  | 0  |     qmod_short(m_q, m_q);  | 
1612  | 0  |     bool res = ecdsa_sign_msg_as_qelem(signature, m_q, private_key, nonce);  | 
1613  | 0  |     return res;  | 
1614  | 0  | }  | 
1615  |  |  | 
1616  |  | /**********************/  | 
1617  |  | /* ECDSA verification */  | 
1618  |  | /**********************/  | 
1619  |  |  | 
1620  |  | /**  | 
1621  |  | Verify an ECDSA signature WITHOUT hashing first.  | 
1622  |  |  | 
1623  |  |   This function is intended to receive a hash of the input.  | 
1624  |  |   For convenience, we recommend using one of the hash-and-verify combined functions above.  | 
1625  |  |  | 
1626  |  |   The argument `msg` MUST be at least 32 bytes (i.e. `msg_len >= 32`).  | 
1627  |  |  | 
1628  |  |   The function returns `true` if the signature is valid and `false` otherwise.  | 
1629  |  |  | 
1630  |  |   The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len].  | 
1631  |  |   The argument `public_key` (x || y) points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1632  |  |   The arguments `signature_r` and `signature_s` point to 32 bytes of valid memory, i.e., uint8_t[32].  | 
1633  |  |  | 
1634  |  |   The function also checks whether `public_key` is valid  | 
1635  |  | */  | 
1636  |  | bool  | 
1637  |  | Hacl_P256_ecdsa_verif_without_hash(  | 
1638  |  |     uint32_t msg_len,  | 
1639  |  |     uint8_t *msg,  | 
1640  |  |     uint8_t *public_key,  | 
1641  |  |     uint8_t *signature_r,  | 
1642  |  |     uint8_t *signature_s)  | 
1643  | 0  | { | 
1644  | 0  |     uint64_t m_q[4U] = { 0U }; | 
1645  | 0  |     uint8_t mHash[32U] = { 0U }; | 
1646  | 0  |     memcpy(mHash, msg, (uint32_t)32U * sizeof(uint8_t));  | 
1647  | 0  |     KRML_HOST_IGNORE(msg_len);  | 
1648  | 0  |     uint8_t *mHash32 = mHash;  | 
1649  | 0  |     bn_from_bytes_be4(m_q, mHash32);  | 
1650  | 0  |     qmod_short(m_q, m_q);  | 
1651  | 0  |     bool res = ecdsa_verify_msg_as_qelem(m_q, public_key, signature_r, signature_s);  | 
1652  | 0  |     return res;  | 
1653  | 0  | }  | 
1654  |  |  | 
1655  |  | /******************/  | 
1656  |  | /* Key validation */  | 
1657  |  | /******************/  | 
1658  |  |  | 
1659  |  | /**  | 
1660  |  | Public key validation.  | 
1661  |  |  | 
1662  |  |   The function returns `true` if a public key is valid and `false` otherwise.  | 
1663  |  |  | 
1664  |  |   The argument `public_key` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1665  |  |  | 
1666  |  |   The public key (x || y) is valid (with respect to SP 800-56A):  | 
1667  |  |     • the public key is not the “point at infinity”, represented as O.  | 
1668  |  |     • the affine x and y coordinates of the point represented by the public key are  | 
1669  |  |       in the range [0, p – 1] where p is the prime defining the finite field.  | 
1670  |  |     • y^2 = x^3 + ax + b where a and b are the coefficients of the curve equation.  | 
1671  |  |   The last extract is taken from: https://neilmadden.blog/2017/05/17/so-how-do-you-validate-nist-ecdh-public-keys/  | 
1672  |  | */  | 
1673  |  | bool  | 
1674  |  | Hacl_P256_validate_public_key(uint8_t *public_key)  | 
1675  | 0  | { | 
1676  | 0  |     uint64_t point_jac[12U] = { 0U }; | 
1677  | 0  |     bool res = load_point_vartime(point_jac, public_key);  | 
1678  | 0  |     return res;  | 
1679  | 0  | }  | 
1680  |  |  | 
1681  |  | /**  | 
1682  |  | Private key validation.  | 
1683  |  |  | 
1684  |  |   The function returns `true` if a private key is valid and `false` otherwise.  | 
1685  |  |  | 
1686  |  |   The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32].  | 
1687  |  |  | 
1688  |  |   The private key is valid:  | 
1689  |  |     • 0 < `private_key` < the order of the curve  | 
1690  |  | */  | 
1691  |  | bool  | 
1692  |  | Hacl_P256_validate_private_key(uint8_t *private_key)  | 
1693  | 0  | { | 
1694  | 0  |     uint64_t bn_sk[4U] = { 0U }; | 
1695  | 0  |     bn_from_bytes_be4(bn_sk, private_key);  | 
1696  | 0  |     uint64_t res = bn_is_lt_order_and_gt_zero_mask4(bn_sk);  | 
1697  | 0  |     return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;  | 
1698  | 0  | }  | 
1699  |  |  | 
1700  |  | /*******************************************************************************  | 
1701  |  |   Parsing and Serializing public keys.  | 
1702  |  |  | 
1703  |  |   A public key is a point (x, y) on the P-256 NIST curve.  | 
1704  |  |  | 
1705  |  |   The point can be represented in the following three ways.  | 
1706  |  |     • raw          = [ x || y ], 64 bytes  | 
1707  |  |     • uncompressed = [ 0x04 || x || y ], 65 bytes  | 
1708  |  |     • compressed   = [ (0x02 for even `y` and 0x03 for odd `y`) || x ], 33 bytes  | 
1709  |  |  | 
1710  |  | *******************************************************************************/  | 
1711  |  |  | 
1712  |  | /**  | 
1713  |  | Convert a public key from uncompressed to its raw form.  | 
1714  |  |  | 
1715  |  |   The function returns `true` for successful conversion of a public key and `false` otherwise.  | 
1716  |  |  | 
1717  |  |   The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1718  |  |   The argument `pk` points to 65 bytes of valid memory, i.e., uint8_t[65].  | 
1719  |  |  | 
1720  |  |   The function DOESN'T check whether (x, y) is a valid point.  | 
1721  |  | */  | 
1722  |  | bool  | 
1723  |  | Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw)  | 
1724  | 0  | { | 
1725  | 0  |     uint8_t pk0 = pk[0U];  | 
1726  | 0  |     if (pk0 != (uint8_t)0x04U) { | 
1727  | 0  |         return false;  | 
1728  | 0  |     }  | 
1729  | 0  |     memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof(uint8_t));  | 
1730  | 0  |     return true;  | 
1731  | 0  | }  | 
1732  |  |  | 
1733  |  | /**  | 
1734  |  | Convert a public key from compressed to its raw form.  | 
1735  |  |  | 
1736  |  |   The function returns `true` for successful conversion of a public key and `false` otherwise.  | 
1737  |  |  | 
1738  |  |   The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1739  |  |   The argument `pk` points to 33 bytes of valid memory, i.e., uint8_t[33].  | 
1740  |  |  | 
1741  |  |   The function also checks whether (x, y) is a valid point.  | 
1742  |  | */  | 
1743  |  | bool  | 
1744  |  | Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw)  | 
1745  | 0  | { | 
1746  | 0  |     uint64_t xa[4U] = { 0U }; | 
1747  | 0  |     uint64_t ya[4U] = { 0U }; | 
1748  | 0  |     uint8_t *pk_xb = pk + (uint32_t)1U;  | 
1749  | 0  |     bool b = aff_point_decompress_vartime(xa, ya, pk);  | 
1750  | 0  |     if (b) { | 
1751  | 0  |         memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof(uint8_t));  | 
1752  | 0  |         bn_to_bytes_be4(pk_raw + (uint32_t)32U, ya);  | 
1753  | 0  |     }  | 
1754  | 0  |     return b;  | 
1755  | 0  | }  | 
1756  |  |  | 
1757  |  | /**  | 
1758  |  | Convert a public key from raw to its uncompressed form.  | 
1759  |  |  | 
1760  |  |   The outparam `pk` points to 65 bytes of valid memory, i.e., uint8_t[65].  | 
1761  |  |   The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1762  |  |  | 
1763  |  |   The function DOESN'T check whether (x, y) is a valid point.  | 
1764  |  | */  | 
1765  |  | void  | 
1766  |  | Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk)  | 
1767  | 0  | { | 
1768  | 0  |     pk[0U] = (uint8_t)0x04U;  | 
1769  | 0  |     memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof(uint8_t));  | 
1770  | 0  | }  | 
1771  |  |  | 
1772  |  | /**  | 
1773  |  | Convert a public key from raw to its compressed form.  | 
1774  |  |  | 
1775  |  |   The outparam `pk` points to 33 bytes of valid memory, i.e., uint8_t[33].  | 
1776  |  |   The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1777  |  |  | 
1778  |  |   The function DOESN'T check whether (x, y) is a valid point.  | 
1779  |  | */  | 
1780  |  | void  | 
1781  |  | Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk)  | 
1782  | 0  | { | 
1783  | 0  |     uint8_t *pk_x = pk_raw;  | 
1784  | 0  |     uint8_t *pk_y = pk_raw + (uint32_t)32U;  | 
1785  | 0  |     uint64_t bn_f[4U] = { 0U }; | 
1786  | 0  |     bn_from_bytes_be4(bn_f, pk_y);  | 
1787  | 0  |     uint64_t is_odd_f = bn_f[0U] & (uint64_t)1U;  | 
1788  | 0  |     pk[0U] = (uint8_t)is_odd_f + (uint8_t)0x02U;  | 
1789  | 0  |     memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof(uint8_t));  | 
1790  | 0  | }  | 
1791  |  |  | 
1792  |  | /******************/  | 
1793  |  | /* ECDH agreement */  | 
1794  |  | /******************/  | 
1795  |  |  | 
1796  |  | /**  | 
1797  |  | Compute the public key from the private key.  | 
1798  |  |  | 
1799  |  |   The function returns `true` if a private key is valid and `false` otherwise.  | 
1800  |  |  | 
1801  |  |   The outparam `public_key`  points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1802  |  |   The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32].  | 
1803  |  |  | 
1804  |  |   The private key is valid:  | 
1805  |  |     • 0 < `private_key` < the order of the curve.  | 
1806  |  | */  | 
1807  |  | bool  | 
1808  |  | Hacl_P256_dh_initiator(uint8_t *public_key, uint8_t *private_key)  | 
1809  | 0  | { | 
1810  | 0  |     return Hacl_Impl_P256_DH_ecp256dh_i(public_key, private_key);  | 
1811  | 0  | }  | 
1812  |  |  | 
1813  |  | /**  | 
1814  |  | Execute the diffie-hellmann key exchange.  | 
1815  |  |  | 
1816  |  |   The function returns `true` for successful creation of an ECDH shared secret and  | 
1817  |  |   `false` otherwise.  | 
1818  |  |  | 
1819  |  |   The outparam `shared_secret` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1820  |  |   The argument `their_pubkey` points to 64 bytes of valid memory, i.e., uint8_t[64].  | 
1821  |  |   The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32].  | 
1822  |  |  | 
1823  |  |   The function also checks whether `private_key` and `their_pubkey` are valid.  | 
1824  |  | */  | 
1825  |  | bool  | 
1826  |  | Hacl_P256_dh_responder(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t *private_key)  | 
1827  | 0  | { | 
1828  | 0  |     return Hacl_Impl_P256_DH_ecp256dh_r(shared_secret, their_pubkey, private_key);  | 
1829  | 0  | }  |