Coverage Report

Created: 2022-11-30 06:20

/src/openssl/crypto/ec/ecp_nistz256.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 *                                                                            *
3
 * Copyright 2014 Intel Corporation                                           *
4
 *                                                                            *
5
 * Licensed under the Apache License, Version 2.0 (the "License");            *
6
 * you may not use this file except in compliance with the License.           *
7
 * You may obtain a copy of the License at                                    *
8
 *                                                                            *
9
 *    http://www.apache.org/licenses/LICENSE-2.0                              *
10
 *                                                                            *
11
 * Unless required by applicable law or agreed to in writing, software        *
12
 * distributed under the License is distributed on an "AS IS" BASIS,          *
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   *
14
 * See the License for the specific language governing permissions and        *
15
 * limitations under the License.                                             *
16
 *                                                                            *
17
 ******************************************************************************
18
 *                                                                            *
19
 * Developers and authors:                                                    *
20
 * Shay Gueron (1, 2), and Vlad Krasnov (1)                                   *
21
 * (1) Intel Corporation, Israel Development Center                           *
22
 * (2) University of Haifa                                                    *
23
 * Reference:                                                                 *
24
 * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with *
25
 *                          256 Bit Primes"                                   *
26
 *                                                                            *
27
 ******************************************************************************/
28
29
#include <string.h>
30
31
#include <openssl/bn.h>
32
#include <openssl/err.h>
33
#include <openssl/ec.h>
34
#include "cryptlib.h"
35
36
#include "ec_lcl.h"
37
38
#if BN_BITS2 != 64
39
# define TOBN(hi,lo)    lo,hi
40
#else
41
# define TOBN(hi,lo)    ((BN_ULONG)hi<<32|lo)
42
#endif
43
44
#if defined(__GNUC__)
45
0
# define ALIGN32        __attribute((aligned(32)))
46
#elif defined(_MSC_VER)
47
# define ALIGN32        __declspec(align(32))
48
#else
49
# define ALIGN32
50
#endif
51
52
0
#define ALIGNPTR(p,N)   ((unsigned char *)p+N-(size_t)p%N)
53
0
#define P256_LIMBS      (256/BN_BITS2)
54
55
typedef unsigned short u16;
56
57
typedef struct {
58
    BN_ULONG X[P256_LIMBS];
59
    BN_ULONG Y[P256_LIMBS];
60
    BN_ULONG Z[P256_LIMBS];
61
} P256_POINT;
62
63
typedef struct {
64
    BN_ULONG X[P256_LIMBS];
65
    BN_ULONG Y[P256_LIMBS];
66
} P256_POINT_AFFINE;
67
68
typedef P256_POINT_AFFINE PRECOMP256_ROW[64];
69
70
/* structure for precomputed multiples of the generator */
71
typedef struct ec_pre_comp_st {
72
    const EC_GROUP *group;      /* Parent EC_GROUP object */
73
    size_t w;                   /* Window size */
74
    /*
75
     * Constant time access to the X and Y coordinates of the pre-computed,
76
     * generator multiplies, in the Montgomery domain. Pre-calculated
77
     * multiplies are stored in affine form.
78
     */
79
    PRECOMP256_ROW *precomp;
80
    void *precomp_storage;
81
    int references;
82
} EC_PRE_COMP;
83
84
/* Functions implemented in assembly */
85
/*
86
 * Most of below mentioned functions *preserve* the property of inputs
87
 * being fully reduced, i.e. being in [0, modulus) range. Simply put if
88
 * inputs are fully reduced, then output is too. Note that reverse is
89
 * not true, in sense that given partially reduced inputs output can be
90
 * either, not unlikely reduced. And "most" in first sentence refers to
91
 * the fact that given the calculations flow one can tolerate that
92
 * addition, 1st function below, produces partially reduced result *if*
93
 * multiplications by 2 and 3, which customarily use addition, fully
94
 * reduce it. This effectively gives two options: a) addition produces
95
 * fully reduced result [as long as inputs are, just like remaining
96
 * functions]; b) addition is allowed to produce partially reduced
97
 * result, but multiplications by 2 and 3 perform additional reduction
98
 * step. Choice between the two can be platform-specific, but it was a)
99
 * in all cases so far...
100
 */
101
/* Modular add: res = a+b mod P   */
102
void ecp_nistz256_add(BN_ULONG res[P256_LIMBS],
103
                      const BN_ULONG a[P256_LIMBS],
104
                      const BN_ULONG b[P256_LIMBS]);
105
/* Modular mul by 2: res = 2*a mod P */
106
void ecp_nistz256_mul_by_2(BN_ULONG res[P256_LIMBS],
107
                           const BN_ULONG a[P256_LIMBS]);
108
/* Modular mul by 3: res = 3*a mod P */
109
void ecp_nistz256_mul_by_3(BN_ULONG res[P256_LIMBS],
110
                           const BN_ULONG a[P256_LIMBS]);
111
112
/* Modular div by 2: res = a/2 mod P */
113
void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS],
114
                           const BN_ULONG a[P256_LIMBS]);
115
/* Modular sub: res = a-b mod P   */
116
void ecp_nistz256_sub(BN_ULONG res[P256_LIMBS],
117
                      const BN_ULONG a[P256_LIMBS],
118
                      const BN_ULONG b[P256_LIMBS]);
119
/* Modular neg: res = -a mod P    */
120
void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]);
121
/* Montgomery mul: res = a*b*2^-256 mod P */
122
void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS],
123
                           const BN_ULONG a[P256_LIMBS],
124
                           const BN_ULONG b[P256_LIMBS]);
125
/* Montgomery sqr: res = a*a*2^-256 mod P */
126
void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS],
127
                           const BN_ULONG a[P256_LIMBS]);
128
/* Convert a number from Montgomery domain, by multiplying with 1 */
129
void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS],
130
                            const BN_ULONG in[P256_LIMBS]);
131
/* Convert a number to Montgomery domain, by multiplying with 2^512 mod P*/
132
void ecp_nistz256_to_mont(BN_ULONG res[P256_LIMBS],
133
                          const BN_ULONG in[P256_LIMBS]);
134
/* Functions that perform constant time access to the precomputed tables */
135
void ecp_nistz256_select_w5(P256_POINT * val,
136
                            const P256_POINT * in_t, int index);
137
void ecp_nistz256_select_w7(P256_POINT_AFFINE * val,
138
                            const P256_POINT_AFFINE * in_t, int index);
139
140
/* One converted into the Montgomery domain */
141
static const BN_ULONG ONE[P256_LIMBS] = {
142
    TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000),
143
    TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe)
144
};
145
146
static void *ecp_nistz256_pre_comp_dup(void *);
147
static void ecp_nistz256_pre_comp_free(void *);
148
static void ecp_nistz256_pre_comp_clear_free(void *);
149
static EC_PRE_COMP *ecp_nistz256_pre_comp_new(const EC_GROUP *group);
150
151
/* Precomputed tables for the default generator */
152
#include "ecp_nistz256_table.c"
153
154
/* Recode window to a signed digit, see ecp_nistputil.c for details */
155
static unsigned int _booth_recode_w5(unsigned int in)
156
0
{
157
0
    unsigned int s, d;
158
159
0
    s = ~((in >> 5) - 1);
160
0
    d = (1 << 6) - in - 1;
161
0
    d = (d & s) | (in & ~s);
162
0
    d = (d >> 1) + (d & 1);
163
164
0
    return (d << 1) + (s & 1);
165
0
}
166
167
static unsigned int _booth_recode_w7(unsigned int in)
168
0
{
169
0
    unsigned int s, d;
170
171
0
    s = ~((in >> 7) - 1);
172
0
    d = (1 << 8) - in - 1;
173
0
    d = (d & s) | (in & ~s);
174
0
    d = (d >> 1) + (d & 1);
175
176
0
    return (d << 1) + (s & 1);
177
0
}
178
179
static void copy_conditional(BN_ULONG dst[P256_LIMBS],
180
                             const BN_ULONG src[P256_LIMBS], BN_ULONG move)
181
0
{
182
0
    BN_ULONG mask1 = -move;
183
0
    BN_ULONG mask2 = ~mask1;
184
185
0
    dst[0] = (src[0] & mask1) ^ (dst[0] & mask2);
186
0
    dst[1] = (src[1] & mask1) ^ (dst[1] & mask2);
187
0
    dst[2] = (src[2] & mask1) ^ (dst[2] & mask2);
188
0
    dst[3] = (src[3] & mask1) ^ (dst[3] & mask2);
189
0
    if (P256_LIMBS == 8) {
190
0
        dst[4] = (src[4] & mask1) ^ (dst[4] & mask2);
191
0
        dst[5] = (src[5] & mask1) ^ (dst[5] & mask2);
192
0
        dst[6] = (src[6] & mask1) ^ (dst[6] & mask2);
193
0
        dst[7] = (src[7] & mask1) ^ (dst[7] & mask2);
194
0
    }
195
0
}
196
197
static BN_ULONG is_zero(BN_ULONG in)
198
0
{
199
0
    in |= (0 - in);
200
0
    in = ~in;
201
0
    in &= BN_MASK2;
202
0
    in >>= BN_BITS2 - 1;
203
0
    return in;
204
0
}
205
206
static BN_ULONG is_equal(const BN_ULONG a[P256_LIMBS],
207
                         const BN_ULONG b[P256_LIMBS])
208
0
{
209
0
    BN_ULONG res;
210
211
0
    res = a[0] ^ b[0];
212
0
    res |= a[1] ^ b[1];
213
0
    res |= a[2] ^ b[2];
214
0
    res |= a[3] ^ b[3];
215
0
    if (P256_LIMBS == 8) {
216
0
        res |= a[4] ^ b[4];
217
0
        res |= a[5] ^ b[5];
218
0
        res |= a[6] ^ b[6];
219
0
        res |= a[7] ^ b[7];
220
0
    }
221
222
0
    return is_zero(res);
223
0
}
224
225
static BN_ULONG is_one(const BIGNUM *z)
226
0
{
227
0
    BN_ULONG res = 0;
228
0
    BN_ULONG *a = z->d;
229
230
0
    if (z->top == (P256_LIMBS - P256_LIMBS / 8)) {
231
0
        res = a[0] ^ ONE[0];
232
0
        res |= a[1] ^ ONE[1];
233
0
        res |= a[2] ^ ONE[2];
234
0
        res |= a[3] ^ ONE[3];
235
0
        if (P256_LIMBS == 8) {
236
0
            res |= a[4] ^ ONE[4];
237
0
            res |= a[5] ^ ONE[5];
238
0
            res |= a[6] ^ ONE[6];
239
            /*
240
             * no check for a[7] (being zero) on 32-bit platforms,
241
             * because value of "one" takes only 7 limbs.
242
             */
243
0
        }
244
0
        res = is_zero(res);
245
0
    }
246
247
0
    return res;
248
0
}
249
250
static int ecp_nistz256_set_words(BIGNUM *a, BN_ULONG words[P256_LIMBS])
251
0
 {
252
0
     if (bn_wexpand(a, P256_LIMBS) == NULL) {
253
0
         ECerr(EC_F_ECP_NISTZ256_SET_WORDS, ERR_R_MALLOC_FAILURE);
254
0
         return 0;
255
0
     }
256
0
     memcpy(a->d, words, sizeof(BN_ULONG) * P256_LIMBS);
257
0
     a->top = P256_LIMBS;
258
0
     bn_correct_top(a);
259
0
     return 1;
260
0
}
261
262
#ifndef ECP_NISTZ256_REFERENCE_IMPLEMENTATION
263
void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a);
264
void ecp_nistz256_point_add(P256_POINT *r,
265
                            const P256_POINT *a, const P256_POINT *b);
266
void ecp_nistz256_point_add_affine(P256_POINT *r,
267
                                   const P256_POINT *a,
268
                                   const P256_POINT_AFFINE *b);
269
#else
270
/* Point double: r = 2*a */
271
static void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a)
272
{
273
    BN_ULONG S[P256_LIMBS];
274
    BN_ULONG M[P256_LIMBS];
275
    BN_ULONG Zsqr[P256_LIMBS];
276
    BN_ULONG tmp0[P256_LIMBS];
277
278
    const BN_ULONG *in_x = a->X;
279
    const BN_ULONG *in_y = a->Y;
280
    const BN_ULONG *in_z = a->Z;
281
282
    BN_ULONG *res_x = r->X;
283
    BN_ULONG *res_y = r->Y;
284
    BN_ULONG *res_z = r->Z;
285
286
    ecp_nistz256_mul_by_2(S, in_y);
287
288
    ecp_nistz256_sqr_mont(Zsqr, in_z);
289
290
    ecp_nistz256_sqr_mont(S, S);
291
292
    ecp_nistz256_mul_mont(res_z, in_z, in_y);
293
    ecp_nistz256_mul_by_2(res_z, res_z);
294
295
    ecp_nistz256_add(M, in_x, Zsqr);
296
    ecp_nistz256_sub(Zsqr, in_x, Zsqr);
297
298
    ecp_nistz256_sqr_mont(res_y, S);
299
    ecp_nistz256_div_by_2(res_y, res_y);
300
301
    ecp_nistz256_mul_mont(M, M, Zsqr);
302
    ecp_nistz256_mul_by_3(M, M);
303
304
    ecp_nistz256_mul_mont(S, S, in_x);
305
    ecp_nistz256_mul_by_2(tmp0, S);
306
307
    ecp_nistz256_sqr_mont(res_x, M);
308
309
    ecp_nistz256_sub(res_x, res_x, tmp0);
310
    ecp_nistz256_sub(S, S, res_x);
311
312
    ecp_nistz256_mul_mont(S, S, M);
313
    ecp_nistz256_sub(res_y, S, res_y);
314
}
315
316
/* Point addition: r = a+b */
317
static void ecp_nistz256_point_add(P256_POINT *r,
318
                                   const P256_POINT *a, const P256_POINT *b)
319
{
320
    BN_ULONG U2[P256_LIMBS], S2[P256_LIMBS];
321
    BN_ULONG U1[P256_LIMBS], S1[P256_LIMBS];
322
    BN_ULONG Z1sqr[P256_LIMBS];
323
    BN_ULONG Z2sqr[P256_LIMBS];
324
    BN_ULONG H[P256_LIMBS], R[P256_LIMBS];
325
    BN_ULONG Hsqr[P256_LIMBS];
326
    BN_ULONG Rsqr[P256_LIMBS];
327
    BN_ULONG Hcub[P256_LIMBS];
328
329
    BN_ULONG res_x[P256_LIMBS];
330
    BN_ULONG res_y[P256_LIMBS];
331
    BN_ULONG res_z[P256_LIMBS];
332
333
    BN_ULONG in1infty, in2infty;
334
335
    const BN_ULONG *in1_x = a->X;
336
    const BN_ULONG *in1_y = a->Y;
337
    const BN_ULONG *in1_z = a->Z;
338
339
    const BN_ULONG *in2_x = b->X;
340
    const BN_ULONG *in2_y = b->Y;
341
    const BN_ULONG *in2_z = b->Z;
342
343
    /*
344
     * Infinity in encoded as (,,0)
345
     */
346
    in1infty = (in1_z[0] | in1_z[1] | in1_z[2] | in1_z[3]);
347
    if (P256_LIMBS == 8)
348
        in1infty |= (in1_z[4] | in1_z[5] | in1_z[6] | in1_z[7]);
349
350
    in2infty = (in2_z[0] | in2_z[1] | in2_z[2] | in2_z[3]);
351
    if (P256_LIMBS == 8)
352
        in2infty |= (in2_z[4] | in2_z[5] | in2_z[6] | in2_z[7]);
353
354
    in1infty = is_zero(in1infty);
355
    in2infty = is_zero(in2infty);
356
357
    ecp_nistz256_sqr_mont(Z2sqr, in2_z);        /* Z2^2 */
358
    ecp_nistz256_sqr_mont(Z1sqr, in1_z);        /* Z1^2 */
359
360
    ecp_nistz256_mul_mont(S1, Z2sqr, in2_z);    /* S1 = Z2^3 */
361
    ecp_nistz256_mul_mont(S2, Z1sqr, in1_z);    /* S2 = Z1^3 */
362
363
    ecp_nistz256_mul_mont(S1, S1, in1_y);       /* S1 = Y1*Z2^3 */
364
    ecp_nistz256_mul_mont(S2, S2, in2_y);       /* S2 = Y2*Z1^3 */
365
    ecp_nistz256_sub(R, S2, S1);                /* R = S2 - S1 */
366
367
    ecp_nistz256_mul_mont(U1, in1_x, Z2sqr);    /* U1 = X1*Z2^2 */
368
    ecp_nistz256_mul_mont(U2, in2_x, Z1sqr);    /* U2 = X2*Z1^2 */
369
    ecp_nistz256_sub(H, U2, U1);                /* H = U2 - U1 */
370
371
    /*
372
     * This should not happen during sign/ecdh, so no constant time violation
373
     */
374
    if (is_equal(U1, U2) && !in1infty && !in2infty) {
375
        if (is_equal(S1, S2)) {
376
            ecp_nistz256_point_double(r, a);
377
            return;
378
        } else {
379
            memset(r, 0, sizeof(*r));
380
            return;
381
        }
382
    }
383
384
    ecp_nistz256_sqr_mont(Rsqr, R);             /* R^2 */
385
    ecp_nistz256_mul_mont(res_z, H, in1_z);     /* Z3 = H*Z1*Z2 */
386
    ecp_nistz256_sqr_mont(Hsqr, H);             /* H^2 */
387
    ecp_nistz256_mul_mont(res_z, res_z, in2_z); /* Z3 = H*Z1*Z2 */
388
    ecp_nistz256_mul_mont(Hcub, Hsqr, H);       /* H^3 */
389
390
    ecp_nistz256_mul_mont(U2, U1, Hsqr);        /* U1*H^2 */
391
    ecp_nistz256_mul_by_2(Hsqr, U2);            /* 2*U1*H^2 */
392
393
    ecp_nistz256_sub(res_x, Rsqr, Hsqr);
394
    ecp_nistz256_sub(res_x, res_x, Hcub);
395
396
    ecp_nistz256_sub(res_y, U2, res_x);
397
398
    ecp_nistz256_mul_mont(S2, S1, Hcub);
399
    ecp_nistz256_mul_mont(res_y, R, res_y);
400
    ecp_nistz256_sub(res_y, res_y, S2);
401
402
    copy_conditional(res_x, in2_x, in1infty);
403
    copy_conditional(res_y, in2_y, in1infty);
404
    copy_conditional(res_z, in2_z, in1infty);
405
406
    copy_conditional(res_x, in1_x, in2infty);
407
    copy_conditional(res_y, in1_y, in2infty);
408
    copy_conditional(res_z, in1_z, in2infty);
409
410
    memcpy(r->X, res_x, sizeof(res_x));
411
    memcpy(r->Y, res_y, sizeof(res_y));
412
    memcpy(r->Z, res_z, sizeof(res_z));
413
}
414
415
/* Point addition when b is known to be affine: r = a+b */
416
static void ecp_nistz256_point_add_affine(P256_POINT *r,
417
                                          const P256_POINT *a,
418
                                          const P256_POINT_AFFINE *b)
419
{
420
    BN_ULONG U2[P256_LIMBS], S2[P256_LIMBS];
421
    BN_ULONG Z1sqr[P256_LIMBS];
422
    BN_ULONG H[P256_LIMBS], R[P256_LIMBS];
423
    BN_ULONG Hsqr[P256_LIMBS];
424
    BN_ULONG Rsqr[P256_LIMBS];
425
    BN_ULONG Hcub[P256_LIMBS];
426
427
    BN_ULONG res_x[P256_LIMBS];
428
    BN_ULONG res_y[P256_LIMBS];
429
    BN_ULONG res_z[P256_LIMBS];
430
431
    BN_ULONG in1infty, in2infty;
432
433
    const BN_ULONG *in1_x = a->X;
434
    const BN_ULONG *in1_y = a->Y;
435
    const BN_ULONG *in1_z = a->Z;
436
437
    const BN_ULONG *in2_x = b->X;
438
    const BN_ULONG *in2_y = b->Y;
439
440
    /*
441
     * Infinity in encoded as (,,0)
442
     */
443
    in1infty = (in1_z[0] | in1_z[1] | in1_z[2] | in1_z[3]);
444
    if (P256_LIMBS == 8)
445
        in1infty |= (in1_z[4] | in1_z[5] | in1_z[6] | in1_z[7]);
446
447
    /*
448
     * In affine representation we encode infinity as (0,0), which is
449
     * not on the curve, so it is OK
450
     */
451
    in2infty = (in2_x[0] | in2_x[1] | in2_x[2] | in2_x[3] |
452
                in2_y[0] | in2_y[1] | in2_y[2] | in2_y[3]);
453
    if (P256_LIMBS == 8)
454
        in2infty |= (in2_x[4] | in2_x[5] | in2_x[6] | in2_x[7] |
455
                     in2_y[4] | in2_y[5] | in2_y[6] | in2_y[7]);
456
457
    in1infty = is_zero(in1infty);
458
    in2infty = is_zero(in2infty);
459
460
    ecp_nistz256_sqr_mont(Z1sqr, in1_z);        /* Z1^2 */
461
462
    ecp_nistz256_mul_mont(U2, in2_x, Z1sqr);    /* U2 = X2*Z1^2 */
463
    ecp_nistz256_sub(H, U2, in1_x);             /* H = U2 - U1 */
464
465
    ecp_nistz256_mul_mont(S2, Z1sqr, in1_z);    /* S2 = Z1^3 */
466
467
    ecp_nistz256_mul_mont(res_z, H, in1_z);     /* Z3 = H*Z1*Z2 */
468
469
    ecp_nistz256_mul_mont(S2, S2, in2_y);       /* S2 = Y2*Z1^3 */
470
    ecp_nistz256_sub(R, S2, in1_y);             /* R = S2 - S1 */
471
472
    ecp_nistz256_sqr_mont(Hsqr, H);             /* H^2 */
473
    ecp_nistz256_sqr_mont(Rsqr, R);             /* R^2 */
474
    ecp_nistz256_mul_mont(Hcub, Hsqr, H);       /* H^3 */
475
476
    ecp_nistz256_mul_mont(U2, in1_x, Hsqr);     /* U1*H^2 */
477
    ecp_nistz256_mul_by_2(Hsqr, U2);            /* 2*U1*H^2 */
478
479
    ecp_nistz256_sub(res_x, Rsqr, Hsqr);
480
    ecp_nistz256_sub(res_x, res_x, Hcub);
481
    ecp_nistz256_sub(H, U2, res_x);
482
483
    ecp_nistz256_mul_mont(S2, in1_y, Hcub);
484
    ecp_nistz256_mul_mont(H, H, R);
485
    ecp_nistz256_sub(res_y, H, S2);
486
487
    copy_conditional(res_x, in2_x, in1infty);
488
    copy_conditional(res_x, in1_x, in2infty);
489
490
    copy_conditional(res_y, in2_y, in1infty);
491
    copy_conditional(res_y, in1_y, in2infty);
492
493
    copy_conditional(res_z, ONE, in1infty);
494
    copy_conditional(res_z, in1_z, in2infty);
495
496
    memcpy(r->X, res_x, sizeof(res_x));
497
    memcpy(r->Y, res_y, sizeof(res_y));
498
    memcpy(r->Z, res_z, sizeof(res_z));
499
}
500
#endif
501
502
/* r = in^-1 mod p */
503
static void ecp_nistz256_mod_inverse(BN_ULONG r[P256_LIMBS],
504
                                     const BN_ULONG in[P256_LIMBS])
505
0
{
506
    /*
507
     * The poly is ffffffff 00000001 00000000 00000000 00000000 ffffffff
508
     * ffffffff ffffffff We use FLT and used poly-2 as exponent
509
     */
510
0
    BN_ULONG p2[P256_LIMBS];
511
0
    BN_ULONG p4[P256_LIMBS];
512
0
    BN_ULONG p8[P256_LIMBS];
513
0
    BN_ULONG p16[P256_LIMBS];
514
0
    BN_ULONG p32[P256_LIMBS];
515
0
    BN_ULONG res[P256_LIMBS];
516
0
    int i;
517
518
0
    ecp_nistz256_sqr_mont(res, in);
519
0
    ecp_nistz256_mul_mont(p2, res, in);         /* 3*p */
520
521
0
    ecp_nistz256_sqr_mont(res, p2);
522
0
    ecp_nistz256_sqr_mont(res, res);
523
0
    ecp_nistz256_mul_mont(p4, res, p2);         /* f*p */
524
525
0
    ecp_nistz256_sqr_mont(res, p4);
526
0
    ecp_nistz256_sqr_mont(res, res);
527
0
    ecp_nistz256_sqr_mont(res, res);
528
0
    ecp_nistz256_sqr_mont(res, res);
529
0
    ecp_nistz256_mul_mont(p8, res, p4);         /* ff*p */
530
531
0
    ecp_nistz256_sqr_mont(res, p8);
532
0
    for (i = 0; i < 7; i++)
533
0
        ecp_nistz256_sqr_mont(res, res);
534
0
    ecp_nistz256_mul_mont(p16, res, p8);        /* ffff*p */
535
536
0
    ecp_nistz256_sqr_mont(res, p16);
537
0
    for (i = 0; i < 15; i++)
538
0
        ecp_nistz256_sqr_mont(res, res);
539
0
    ecp_nistz256_mul_mont(p32, res, p16);       /* ffffffff*p */
540
541
0
    ecp_nistz256_sqr_mont(res, p32);
542
0
    for (i = 0; i < 31; i++)
543
0
        ecp_nistz256_sqr_mont(res, res);
544
0
    ecp_nistz256_mul_mont(res, res, in);
545
546
0
    for (i = 0; i < 32 * 4; i++)
547
0
        ecp_nistz256_sqr_mont(res, res);
548
0
    ecp_nistz256_mul_mont(res, res, p32);
549
550
0
    for (i = 0; i < 32; i++)
551
0
        ecp_nistz256_sqr_mont(res, res);
552
0
    ecp_nistz256_mul_mont(res, res, p32);
553
554
0
    for (i = 0; i < 16; i++)
555
0
        ecp_nistz256_sqr_mont(res, res);
556
0
    ecp_nistz256_mul_mont(res, res, p16);
557
558
0
    for (i = 0; i < 8; i++)
559
0
        ecp_nistz256_sqr_mont(res, res);
560
0
    ecp_nistz256_mul_mont(res, res, p8);
561
562
0
    ecp_nistz256_sqr_mont(res, res);
563
0
    ecp_nistz256_sqr_mont(res, res);
564
0
    ecp_nistz256_sqr_mont(res, res);
565
0
    ecp_nistz256_sqr_mont(res, res);
566
0
    ecp_nistz256_mul_mont(res, res, p4);
567
568
0
    ecp_nistz256_sqr_mont(res, res);
569
0
    ecp_nistz256_sqr_mont(res, res);
570
0
    ecp_nistz256_mul_mont(res, res, p2);
571
572
0
    ecp_nistz256_sqr_mont(res, res);
573
0
    ecp_nistz256_sqr_mont(res, res);
574
0
    ecp_nistz256_mul_mont(res, res, in);
575
576
0
    memcpy(r, res, sizeof(res));
577
0
}
578
579
/*
580
 * ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and
581
 * returns one if it fits. Otherwise it returns zero.
582
 */
583
static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS],
584
                                             const BIGNUM *in)
585
0
{
586
0
    if (in->top > P256_LIMBS)
587
0
        return 0;
588
589
0
    memset(out, 0, sizeof(BN_ULONG) * P256_LIMBS);
590
0
    memcpy(out, in->d, sizeof(BN_ULONG) * in->top);
591
0
    return 1;
592
0
}
593
594
/* r = sum(scalar[i]*point[i]) */
595
static int ecp_nistz256_windowed_mul(const EC_GROUP *group,
596
                                      P256_POINT *r,
597
                                      const BIGNUM **scalar,
598
                                      const EC_POINT **point,
599
                                      int num, BN_CTX *ctx)
600
0
{
601
602
0
    int i, j, ret = 0;
603
0
    unsigned int index;
604
0
    unsigned char (*p_str)[33] = NULL;
605
0
    const unsigned int window_size = 5;
606
0
    const unsigned int mask = (1 << (window_size + 1)) - 1;
607
0
    unsigned int wvalue;
608
0
    BN_ULONG tmp[P256_LIMBS];
609
0
    ALIGN32 P256_POINT h;
610
0
    const BIGNUM **scalars = NULL;
611
0
    P256_POINT (*table)[16] = NULL;
612
0
    void *table_storage = NULL;
613
614
0
    if ((table_storage =
615
0
         OPENSSL_malloc(num * 16 * sizeof(P256_POINT) + 64)) == NULL
616
0
        || (p_str =
617
0
            OPENSSL_malloc(num * 33 * sizeof(unsigned char))) == NULL
618
0
        || (scalars = OPENSSL_malloc(num * sizeof(BIGNUM *))) == NULL) {
619
0
        ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, ERR_R_MALLOC_FAILURE);
620
0
        goto err;
621
0
    } else {
622
0
        table = (void *)ALIGNPTR(table_storage, 64);
623
0
    }
624
625
0
    for (i = 0; i < num; i++) {
626
0
        P256_POINT *row = table[i];
627
628
        /* This is an unusual input, we don't guarantee constant-timeness. */
629
0
        if ((BN_num_bits(scalar[i]) > 256) || BN_is_negative(scalar[i])) {
630
0
            BIGNUM *mod;
631
632
0
            if ((mod = BN_CTX_get(ctx)) == NULL)
633
0
                goto err;
634
0
            if (!BN_nnmod(mod, scalar[i], &group->order, ctx)) {
635
0
                ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, ERR_R_BN_LIB);
636
0
                goto err;
637
0
            }
638
0
            scalars[i] = mod;
639
0
        } else
640
0
            scalars[i] = scalar[i];
641
642
0
        for (j = 0; j < scalars[i]->top * BN_BYTES; j += BN_BYTES) {
643
0
            BN_ULONG d = scalars[i]->d[j / BN_BYTES];
644
645
0
            p_str[i][j + 0] = d & 0xff;
646
0
            p_str[i][j + 1] = (d >> 8) & 0xff;
647
0
            p_str[i][j + 2] = (d >> 16) & 0xff;
648
0
            p_str[i][j + 3] = (d >>= 24) & 0xff;
649
0
            if (BN_BYTES == 8) {
650
0
                d >>= 8;
651
0
                p_str[i][j + 4] = d & 0xff;
652
0
                p_str[i][j + 5] = (d >> 8) & 0xff;
653
0
                p_str[i][j + 6] = (d >> 16) & 0xff;
654
0
                p_str[i][j + 7] = (d >> 24) & 0xff;
655
0
            }
656
0
        }
657
0
        for (; j < 33; j++)
658
0
            p_str[i][j] = 0;
659
660
        /* table[0] is implicitly (0,0,0) (the point at infinity),
661
         * therefore it is not stored. All other values are actually
662
         * stored with an offset of -1 in table.
663
         */
664
665
0
        if (!ecp_nistz256_bignum_to_field_elem(row[1 - 1].X, &point[i]->X)
666
0
            || !ecp_nistz256_bignum_to_field_elem(row[1 - 1].Y, &point[i]->Y)
667
0
            || !ecp_nistz256_bignum_to_field_elem(row[1 - 1].Z, &point[i]->Z)) {
668
0
            ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, EC_R_COORDINATES_OUT_OF_RANGE);
669
0
            goto err;
670
0
        }
671
672
0
        ecp_nistz256_point_double(&row[ 2 - 1], &row[ 1 - 1]);
673
0
        ecp_nistz256_point_add   (&row[ 3 - 1], &row[ 2 - 1], &row[1 - 1]);
674
0
        ecp_nistz256_point_double(&row[ 4 - 1], &row[ 2 - 1]);
675
0
        ecp_nistz256_point_double(&row[ 6 - 1], &row[ 3 - 1]);
676
0
        ecp_nistz256_point_double(&row[ 8 - 1], &row[ 4 - 1]);
677
0
        ecp_nistz256_point_double(&row[12 - 1], &row[ 6 - 1]);
678
0
        ecp_nistz256_point_add   (&row[ 5 - 1], &row[ 4 - 1], &row[1 - 1]);
679
0
        ecp_nistz256_point_add   (&row[ 7 - 1], &row[ 6 - 1], &row[1 - 1]);
680
0
        ecp_nistz256_point_add   (&row[ 9 - 1], &row[ 8 - 1], &row[1 - 1]);
681
0
        ecp_nistz256_point_add   (&row[13 - 1], &row[12 - 1], &row[1 - 1]);
682
0
        ecp_nistz256_point_double(&row[14 - 1], &row[ 7 - 1]);
683
0
        ecp_nistz256_point_double(&row[10 - 1], &row[ 5 - 1]);
684
0
        ecp_nistz256_point_add   (&row[15 - 1], &row[14 - 1], &row[1 - 1]);
685
0
        ecp_nistz256_point_add   (&row[11 - 1], &row[10 - 1], &row[1 - 1]);
686
0
        ecp_nistz256_point_add   (&row[16 - 1], &row[15 - 1], &row[1 - 1]);
687
0
    }
688
689
0
    index = 255;
690
691
0
    wvalue = p_str[0][(index - 1) / 8];
692
0
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
693
694
0
    ecp_nistz256_select_w5(r, table[0], _booth_recode_w5(wvalue) >> 1);
695
696
0
    while (index >= 5) {
697
0
        for (i = (index == 255 ? 1 : 0); i < num; i++) {
698
0
            unsigned int off = (index - 1) / 8;
699
700
0
            wvalue = p_str[i][off] | p_str[i][off + 1] << 8;
701
0
            wvalue = (wvalue >> ((index - 1) % 8)) & mask;
702
703
0
            wvalue = _booth_recode_w5(wvalue);
704
705
0
            ecp_nistz256_select_w5(&h, table[i], wvalue >> 1);
706
707
0
            ecp_nistz256_neg(tmp, h.Y);
708
0
            copy_conditional(h.Y, tmp, (wvalue & 1));
709
710
0
            ecp_nistz256_point_add(r, r, &h);
711
0
        }
712
713
0
        index -= window_size;
714
715
0
        ecp_nistz256_point_double(r, r);
716
0
        ecp_nistz256_point_double(r, r);
717
0
        ecp_nistz256_point_double(r, r);
718
0
        ecp_nistz256_point_double(r, r);
719
0
        ecp_nistz256_point_double(r, r);
720
0
    }
721
722
    /* Final window */
723
0
    for (i = 0; i < num; i++) {
724
0
        wvalue = p_str[i][0];
725
0
        wvalue = (wvalue << 1) & mask;
726
727
0
        wvalue = _booth_recode_w5(wvalue);
728
729
0
        ecp_nistz256_select_w5(&h, table[i], wvalue >> 1);
730
731
0
        ecp_nistz256_neg(tmp, h.Y);
732
0
        copy_conditional(h.Y, tmp, wvalue & 1);
733
734
0
        ecp_nistz256_point_add(r, r, &h);
735
0
    }
736
737
0
    ret = 1;
738
0
 err:
739
0
    if (table_storage)
740
0
        OPENSSL_free(table_storage);
741
0
    if (p_str)
742
0
        OPENSSL_free(p_str);
743
0
    if (scalars)
744
0
        OPENSSL_free(scalars);
745
0
    return ret;
746
0
}
747
748
/* Coordinates of G, for which we have precomputed tables */
749
const static BN_ULONG def_xG[P256_LIMBS] = {
750
    TOBN(0x79e730d4, 0x18a9143c), TOBN(0x75ba95fc, 0x5fedb601),
751
    TOBN(0x79fb732b, 0x77622510), TOBN(0x18905f76, 0xa53755c6)
752
};
753
754
const static BN_ULONG def_yG[P256_LIMBS] = {
755
    TOBN(0xddf25357, 0xce95560a), TOBN(0x8b4ab8e4, 0xba19e45c),
756
    TOBN(0xd2e88688, 0xdd21f325), TOBN(0x8571ff18, 0x25885d85)
757
};
758
759
/*
760
 * ecp_nistz256_is_affine_G returns one if |generator| is the standard, P-256
761
 * generator.
762
 */
763
static int ecp_nistz256_is_affine_G(const EC_POINT *generator)
764
0
{
765
0
    return (generator->X.top == P256_LIMBS) &&
766
0
        (generator->Y.top == P256_LIMBS) &&
767
0
        is_equal(generator->X.d, def_xG) &&
768
0
        is_equal(generator->Y.d, def_yG) && is_one(&generator->Z);
769
0
}
770
771
static int ecp_nistz256_mult_precompute(EC_GROUP *group, BN_CTX *ctx)
772
0
{
773
    /*
774
     * We precompute a table for a Booth encoded exponent (wNAF) based
775
     * computation. Each table holds 64 values for safe access, with an
776
     * implicit value of infinity at index zero. We use window of size 7, and
777
     * therefore require ceil(256/7) = 37 tables.
778
     */
779
0
    BIGNUM *order;
780
0
    EC_POINT *P = NULL, *T = NULL;
781
0
    const EC_POINT *generator;
782
0
    EC_PRE_COMP *pre_comp;
783
0
    BN_CTX *new_ctx = NULL;
784
0
    int i, j, k, ret = 0;
785
0
    size_t w;
786
787
0
    PRECOMP256_ROW *preComputedTable = NULL;
788
0
    unsigned char *precomp_storage = NULL;
789
790
    /* if there is an old EC_PRE_COMP object, throw it away */
791
0
    EC_EX_DATA_free_data(&group->extra_data, ecp_nistz256_pre_comp_dup,
792
0
                         ecp_nistz256_pre_comp_free,
793
0
                         ecp_nistz256_pre_comp_clear_free);
794
795
0
    generator = EC_GROUP_get0_generator(group);
796
0
    if (generator == NULL) {
797
0
        ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, EC_R_UNDEFINED_GENERATOR);
798
0
        return 0;
799
0
    }
800
801
0
    if (ecp_nistz256_is_affine_G(generator)) {
802
        /*
803
         * No need to calculate tables for the standard generator because we
804
         * have them statically.
805
         */
806
0
        return 1;
807
0
    }
808
809
0
    if ((pre_comp = ecp_nistz256_pre_comp_new(group)) == NULL)
810
0
        return 0;
811
812
0
    if (ctx == NULL) {
813
0
        ctx = new_ctx = BN_CTX_new();
814
0
        if (ctx == NULL)
815
0
            goto err;
816
0
    }
817
818
0
    BN_CTX_start(ctx);
819
0
    order = BN_CTX_get(ctx);
820
821
0
    if (order == NULL)
822
0
        goto err;
823
824
0
    if (!EC_GROUP_get_order(group, order, ctx))
825
0
        goto err;
826
827
0
    if (BN_is_zero(order)) {
828
0
        ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, EC_R_UNKNOWN_ORDER);
829
0
        goto err;
830
0
    }
831
832
0
    w = 7;
833
834
0
    if ((precomp_storage =
835
0
         OPENSSL_malloc(37 * 64 * sizeof(P256_POINT_AFFINE) + 64)) == NULL) {
836
0
        ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, ERR_R_MALLOC_FAILURE);
837
0
        goto err;
838
0
    } else {
839
0
        preComputedTable = (void *)ALIGNPTR(precomp_storage, 64);
840
0
    }
841
842
0
    P = EC_POINT_new(group);
843
0
    T = EC_POINT_new(group);
844
0
    if (P == NULL || T == NULL)
845
0
        goto err;
846
847
    /*
848
     * The zero entry is implicitly infinity, and we skip it, storing other
849
     * values with -1 offset.
850
     */
851
0
    if (!EC_POINT_copy(T, generator))
852
0
        goto err;
853
854
0
    for (k = 0; k < 64; k++) {
855
0
        if (!EC_POINT_copy(P, T))
856
0
            goto err;
857
0
        for (j = 0; j < 37; j++) {
858
            /*
859
             * It would be faster to use EC_POINTs_make_affine and
860
             * make multiple points affine at the same time.
861
             */
862
0
            if (!EC_POINT_make_affine(group, P, ctx))
863
0
                goto err;
864
0
            if (!ecp_nistz256_bignum_to_field_elem(preComputedTable[j][k].X,
865
0
                                                   &P->X) ||
866
0
                !ecp_nistz256_bignum_to_field_elem(preComputedTable[j][k].Y,
867
0
                                                   &P->Y)) {
868
0
                ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE,
869
0
                      EC_R_COORDINATES_OUT_OF_RANGE);
870
0
                goto err;
871
0
            }
872
0
            for (i = 0; i < 7; i++) {
873
0
                if (!EC_POINT_dbl(group, P, P, ctx))
874
0
                    goto err;
875
0
            }
876
0
        }
877
0
        if (!EC_POINT_add(group, T, T, generator, ctx))
878
0
            goto err;
879
0
    }
880
881
0
    pre_comp->group = group;
882
0
    pre_comp->w = w;
883
0
    pre_comp->precomp = preComputedTable;
884
0
    pre_comp->precomp_storage = precomp_storage;
885
886
0
    precomp_storage = NULL;
887
888
0
    if (!EC_EX_DATA_set_data(&group->extra_data, pre_comp,
889
0
                             ecp_nistz256_pre_comp_dup,
890
0
                             ecp_nistz256_pre_comp_free,
891
0
                             ecp_nistz256_pre_comp_clear_free)) {
892
0
        goto err;
893
0
    }
894
895
0
    pre_comp = NULL;
896
897
0
    ret = 1;
898
899
0
 err:
900
0
    if (ctx != NULL)
901
0
        BN_CTX_end(ctx);
902
0
    BN_CTX_free(new_ctx);
903
904
0
    if (pre_comp)
905
0
        ecp_nistz256_pre_comp_free(pre_comp);
906
0
    if (precomp_storage)
907
0
        OPENSSL_free(precomp_storage);
908
0
    if (P)
909
0
        EC_POINT_free(P);
910
0
    if (T)
911
0
        EC_POINT_free(T);
912
0
    return ret;
913
0
}
914
915
/*
916
 * Note that by default ECP_NISTZ256_AVX2 is undefined. While it's great
917
 * code processing 4 points in parallel, corresponding serial operation
918
 * is several times slower, because it uses 29x29=58-bit multiplication
919
 * as opposite to 64x64=128-bit in integer-only scalar case. As result
920
 * it doesn't provide *significant* performance improvement. Note that
921
 * just defining ECP_NISTZ256_AVX2 is not sufficient to make it work,
922
 * you'd need to compile even asm/ecp_nistz256-avx.pl module.
923
 */
924
#if defined(ECP_NISTZ256_AVX2)
925
# if !(defined(__x86_64) || defined(__x86_64__)) || \
926
       defined(_M_AMD64) || defined(_MX64)) || \
927
     !(defined(__GNUC__) || defined(_MSC_VER)) /* this is for ALIGN32 */
928
#  undef ECP_NISTZ256_AVX2
929
# else
930
/* Constant time access, loading four values, from four consecutive tables */
931
void ecp_nistz256_avx2_select_w7(P256_POINT_AFFINE * val,
932
                                 const P256_POINT_AFFINE * in_t, int index);
933
void ecp_nistz256_avx2_multi_select_w7(void *result, const void *in, int index0,
934
                                       int index1, int index2, int index3);
935
void ecp_nistz256_avx2_transpose_convert(void *RESULTx4, const void *in);
936
void ecp_nistz256_avx2_convert_transpose_back(void *result, const void *Ax4);
937
void ecp_nistz256_avx2_point_add_affine_x4(void *RESULTx4, const void *Ax4,
938
                                           const void *Bx4);
939
void ecp_nistz256_avx2_point_add_affines_x4(void *RESULTx4, const void *Ax4,
940
                                            const void *Bx4);
941
void ecp_nistz256_avx2_to_mont(void *RESULTx4, const void *Ax4);
942
void ecp_nistz256_avx2_from_mont(void *RESULTx4, const void *Ax4);
943
void ecp_nistz256_avx2_set1(void *RESULTx4);
944
int ecp_nistz_avx2_eligible(void);
945
946
static void booth_recode_w7(unsigned char *sign,
947
                            unsigned char *digit, unsigned char in)
948
{
949
    unsigned char s, d;
950
951
    s = ~((in >> 7) - 1);
952
    d = (1 << 8) - in - 1;
953
    d = (d & s) | (in & ~s);
954
    d = (d >> 1) + (d & 1);
955
956
    *sign = s & 1;
957
    *digit = d;
958
}
959
960
/*
961
 * ecp_nistz256_avx2_mul_g performs multiplication by G, using only the
962
 * precomputed table. It does 4 affine point additions in parallel,
963
 * significantly speeding up point multiplication for a fixed value.
964
 */
965
static void ecp_nistz256_avx2_mul_g(P256_POINT *r,
966
                                    unsigned char p_str[33],
967
                                    const P256_POINT_AFFINE(*preComputedTable)[64])
968
{
969
    const unsigned int window_size = 7;
970
    const unsigned int mask = (1 << (window_size + 1)) - 1;
971
    unsigned int wvalue;
972
    /* Using 4 windows at a time */
973
    unsigned char sign0, digit0;
974
    unsigned char sign1, digit1;
975
    unsigned char sign2, digit2;
976
    unsigned char sign3, digit3;
977
    unsigned int index = 0;
978
    BN_ULONG tmp[P256_LIMBS];
979
    int i;
980
981
    ALIGN32 BN_ULONG aX4[4 * 9 * 3] = { 0 };
982
    ALIGN32 BN_ULONG bX4[4 * 9 * 2] = { 0 };
983
    ALIGN32 P256_POINT_AFFINE point_arr[P256_LIMBS];
984
    ALIGN32 P256_POINT res_point_arr[P256_LIMBS];
985
986
    /* Initial four windows */
987
    wvalue = *((u16 *) & p_str[0]);
988
    wvalue = (wvalue << 1) & mask;
989
    index += window_size;
990
    booth_recode_w7(&sign0, &digit0, wvalue);
991
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
992
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
993
    index += window_size;
994
    booth_recode_w7(&sign1, &digit1, wvalue);
995
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
996
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
997
    index += window_size;
998
    booth_recode_w7(&sign2, &digit2, wvalue);
999
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1000
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1001
    index += window_size;
1002
    booth_recode_w7(&sign3, &digit3, wvalue);
1003
1004
    ecp_nistz256_avx2_multi_select_w7(point_arr, preComputedTable[0],
1005
                                      digit0, digit1, digit2, digit3);
1006
1007
    ecp_nistz256_neg(tmp, point_arr[0].Y);
1008
    copy_conditional(point_arr[0].Y, tmp, sign0);
1009
    ecp_nistz256_neg(tmp, point_arr[1].Y);
1010
    copy_conditional(point_arr[1].Y, tmp, sign1);
1011
    ecp_nistz256_neg(tmp, point_arr[2].Y);
1012
    copy_conditional(point_arr[2].Y, tmp, sign2);
1013
    ecp_nistz256_neg(tmp, point_arr[3].Y);
1014
    copy_conditional(point_arr[3].Y, tmp, sign3);
1015
1016
    ecp_nistz256_avx2_transpose_convert(aX4, point_arr);
1017
    ecp_nistz256_avx2_to_mont(aX4, aX4);
1018
    ecp_nistz256_avx2_to_mont(&aX4[4 * 9], &aX4[4 * 9]);
1019
    ecp_nistz256_avx2_set1(&aX4[4 * 9 * 2]);
1020
1021
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1022
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1023
    index += window_size;
1024
    booth_recode_w7(&sign0, &digit0, wvalue);
1025
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1026
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1027
    index += window_size;
1028
    booth_recode_w7(&sign1, &digit1, wvalue);
1029
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1030
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1031
    index += window_size;
1032
    booth_recode_w7(&sign2, &digit2, wvalue);
1033
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1034
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1035
    index += window_size;
1036
    booth_recode_w7(&sign3, &digit3, wvalue);
1037
1038
    ecp_nistz256_avx2_multi_select_w7(point_arr, preComputedTable[4 * 1],
1039
                                      digit0, digit1, digit2, digit3);
1040
1041
    ecp_nistz256_neg(tmp, point_arr[0].Y);
1042
    copy_conditional(point_arr[0].Y, tmp, sign0);
1043
    ecp_nistz256_neg(tmp, point_arr[1].Y);
1044
    copy_conditional(point_arr[1].Y, tmp, sign1);
1045
    ecp_nistz256_neg(tmp, point_arr[2].Y);
1046
    copy_conditional(point_arr[2].Y, tmp, sign2);
1047
    ecp_nistz256_neg(tmp, point_arr[3].Y);
1048
    copy_conditional(point_arr[3].Y, tmp, sign3);
1049
1050
    ecp_nistz256_avx2_transpose_convert(bX4, point_arr);
1051
    ecp_nistz256_avx2_to_mont(bX4, bX4);
1052
    ecp_nistz256_avx2_to_mont(&bX4[4 * 9], &bX4[4 * 9]);
1053
    /* Optimized when both inputs are affine */
1054
    ecp_nistz256_avx2_point_add_affines_x4(aX4, aX4, bX4);
1055
1056
    for (i = 2; i < 9; i++) {
1057
        wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1058
        wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1059
        index += window_size;
1060
        booth_recode_w7(&sign0, &digit0, wvalue);
1061
        wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1062
        wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1063
        index += window_size;
1064
        booth_recode_w7(&sign1, &digit1, wvalue);
1065
        wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1066
        wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1067
        index += window_size;
1068
        booth_recode_w7(&sign2, &digit2, wvalue);
1069
        wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1070
        wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1071
        index += window_size;
1072
        booth_recode_w7(&sign3, &digit3, wvalue);
1073
1074
        ecp_nistz256_avx2_multi_select_w7(point_arr,
1075
                                          preComputedTable[4 * i],
1076
                                          digit0, digit1, digit2, digit3);
1077
1078
        ecp_nistz256_neg(tmp, point_arr[0].Y);
1079
        copy_conditional(point_arr[0].Y, tmp, sign0);
1080
        ecp_nistz256_neg(tmp, point_arr[1].Y);
1081
        copy_conditional(point_arr[1].Y, tmp, sign1);
1082
        ecp_nistz256_neg(tmp, point_arr[2].Y);
1083
        copy_conditional(point_arr[2].Y, tmp, sign2);
1084
        ecp_nistz256_neg(tmp, point_arr[3].Y);
1085
        copy_conditional(point_arr[3].Y, tmp, sign3);
1086
1087
        ecp_nistz256_avx2_transpose_convert(bX4, point_arr);
1088
        ecp_nistz256_avx2_to_mont(bX4, bX4);
1089
        ecp_nistz256_avx2_to_mont(&bX4[4 * 9], &bX4[4 * 9]);
1090
1091
        ecp_nistz256_avx2_point_add_affine_x4(aX4, aX4, bX4);
1092
    }
1093
1094
    ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 0], &aX4[4 * 9 * 0]);
1095
    ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 1], &aX4[4 * 9 * 1]);
1096
    ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 2], &aX4[4 * 9 * 2]);
1097
1098
    ecp_nistz256_avx2_convert_transpose_back(res_point_arr, aX4);
1099
    /* Last window is performed serially */
1100
    wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1101
    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1102
    booth_recode_w7(&sign0, &digit0, wvalue);
1103
    ecp_nistz256_avx2_select_w7((P256_POINT_AFFINE *) r,
1104
                                preComputedTable[36], digit0);
1105
    ecp_nistz256_neg(tmp, r->Y);
1106
    copy_conditional(r->Y, tmp, sign0);
1107
    memcpy(r->Z, ONE, sizeof(ONE));
1108
    /* Sum the four windows */
1109
    ecp_nistz256_point_add(r, r, &res_point_arr[0]);
1110
    ecp_nistz256_point_add(r, r, &res_point_arr[1]);
1111
    ecp_nistz256_point_add(r, r, &res_point_arr[2]);
1112
    ecp_nistz256_point_add(r, r, &res_point_arr[3]);
1113
}
1114
# endif
1115
#endif
1116
1117
static int ecp_nistz256_set_from_affine(EC_POINT *out, const EC_GROUP *group,
1118
                                        const P256_POINT_AFFINE *in,
1119
                                        BN_CTX *ctx)
1120
0
{
1121
0
    BIGNUM x, y;
1122
0
    BN_ULONG d_x[P256_LIMBS], d_y[P256_LIMBS];
1123
0
    int ret = 0;
1124
1125
0
    memcpy(d_x, in->X, sizeof(d_x));
1126
0
    x.d = d_x;
1127
0
    x.dmax = x.top = P256_LIMBS;
1128
0
    x.neg = 0;
1129
0
    x.flags = BN_FLG_STATIC_DATA;
1130
1131
0
    memcpy(d_y, in->Y, sizeof(d_y));
1132
0
    y.d = d_y;
1133
0
    y.dmax = y.top = P256_LIMBS;
1134
0
    y.neg = 0;
1135
0
    y.flags = BN_FLG_STATIC_DATA;
1136
1137
0
    ret = EC_POINT_set_affine_coordinates_GFp(group, out, &x, &y, ctx);
1138
1139
0
    return ret;
1140
0
}
1141
1142
/* r = scalar*G + sum(scalars[i]*points[i]) */
1143
static int ecp_nistz256_points_mul(const EC_GROUP *group,
1144
                                   EC_POINT *r,
1145
                                   const BIGNUM *scalar,
1146
                                   size_t num,
1147
                                   const EC_POINT *points[],
1148
                                   const BIGNUM *scalars[], BN_CTX *ctx)
1149
0
{
1150
0
    int i = 0, ret = 0, no_precomp_for_generator = 0, p_is_infinity = 0;
1151
0
    size_t j;
1152
0
    unsigned char p_str[33] = { 0 };
1153
0
    const PRECOMP256_ROW *preComputedTable = NULL;
1154
0
    const EC_PRE_COMP *pre_comp = NULL;
1155
0
    const EC_POINT *generator = NULL;
1156
0
    unsigned int index = 0;
1157
0
    BN_CTX *new_ctx = NULL;
1158
0
    const BIGNUM **new_scalars = NULL;
1159
0
    const EC_POINT **new_points = NULL;
1160
0
    const unsigned int window_size = 7;
1161
0
    const unsigned int mask = (1 << (window_size + 1)) - 1;
1162
0
    unsigned int wvalue;
1163
0
    ALIGN32 union {
1164
0
        P256_POINT p;
1165
0
        P256_POINT_AFFINE a;
1166
0
    } t, p;
1167
0
    BIGNUM *tmp_scalar;
1168
1169
0
    if (group->meth != r->meth) {
1170
0
        ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_INCOMPATIBLE_OBJECTS);
1171
0
        return 0;
1172
0
    }
1173
1174
0
    if ((scalar == NULL) && (num == 0))
1175
0
        return EC_POINT_set_to_infinity(group, r);
1176
1177
0
    for (j = 0; j < num; j++) {
1178
0
        if (group->meth != points[j]->meth) {
1179
0
            ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_INCOMPATIBLE_OBJECTS);
1180
0
            return 0;
1181
0
        }
1182
0
    }
1183
1184
0
    if (ctx == NULL) {
1185
0
        ctx = new_ctx = BN_CTX_new();
1186
0
        if (ctx == NULL)
1187
0
            goto err;
1188
0
    }
1189
1190
0
    BN_CTX_start(ctx);
1191
1192
0
    if (scalar) {
1193
0
        generator = EC_GROUP_get0_generator(group);
1194
0
        if (generator == NULL) {
1195
0
            ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_UNDEFINED_GENERATOR);
1196
0
            goto err;
1197
0
        }
1198
1199
        /* look if we can use precomputed multiples of generator */
1200
0
        pre_comp =
1201
0
            EC_EX_DATA_get_data(group->extra_data, ecp_nistz256_pre_comp_dup,
1202
0
                                ecp_nistz256_pre_comp_free,
1203
0
                                ecp_nistz256_pre_comp_clear_free);
1204
1205
0
        if (pre_comp) {
1206
            /*
1207
             * If there is a precomputed table for the generator, check that
1208
             * it was generated with the same generator.
1209
             */
1210
0
            EC_POINT *pre_comp_generator = EC_POINT_new(group);
1211
0
            if (pre_comp_generator == NULL)
1212
0
                goto err;
1213
1214
0
            if (!ecp_nistz256_set_from_affine
1215
0
                (pre_comp_generator, group, pre_comp->precomp[0], ctx)) {
1216
0
                EC_POINT_free(pre_comp_generator);
1217
0
                goto err;
1218
0
            }
1219
1220
0
            if (0 == EC_POINT_cmp(group, generator, pre_comp_generator, ctx))
1221
0
                preComputedTable = (const PRECOMP256_ROW *)pre_comp->precomp;
1222
1223
0
            EC_POINT_free(pre_comp_generator);
1224
0
        }
1225
1226
0
        if (preComputedTable == NULL && ecp_nistz256_is_affine_G(generator)) {
1227
            /*
1228
             * If there is no precomputed data, but the generator
1229
             * is the default, a hardcoded table of precomputed
1230
             * data is used. This is because applications, such as
1231
             * Apache, do not use EC_KEY_precompute_mult.
1232
             */
1233
0
            preComputedTable = (const PRECOMP256_ROW *)ecp_nistz256_precomputed;
1234
0
        }
1235
1236
0
        if (preComputedTable) {
1237
0
            if ((BN_num_bits(scalar) > 256)
1238
0
                || BN_is_negative(scalar)) {
1239
0
                if ((tmp_scalar = BN_CTX_get(ctx)) == NULL)
1240
0
                    goto err;
1241
1242
0
                if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx)) {
1243
0
                    ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_BN_LIB);
1244
0
                    goto err;
1245
0
                }
1246
0
                scalar = tmp_scalar;
1247
0
            }
1248
1249
0
            for (i = 0; i < scalar->top * BN_BYTES; i += BN_BYTES) {
1250
0
                BN_ULONG d = scalar->d[i / BN_BYTES];
1251
1252
0
                p_str[i + 0] = d & 0xff;
1253
0
                p_str[i + 1] = (d >> 8) & 0xff;
1254
0
                p_str[i + 2] = (d >> 16) & 0xff;
1255
0
                p_str[i + 3] = (d >>= 24) & 0xff;
1256
0
                if (BN_BYTES == 8) {
1257
0
                    d >>= 8;
1258
0
                    p_str[i + 4] = d & 0xff;
1259
0
                    p_str[i + 5] = (d >> 8) & 0xff;
1260
0
                    p_str[i + 6] = (d >> 16) & 0xff;
1261
0
                    p_str[i + 7] = (d >> 24) & 0xff;
1262
0
                }
1263
0
            }
1264
1265
0
            for (; i < 33; i++)
1266
0
                p_str[i] = 0;
1267
1268
#if defined(ECP_NISTZ256_AVX2)
1269
            if (ecp_nistz_avx2_eligible()) {
1270
                ecp_nistz256_avx2_mul_g(&p.p, p_str, preComputedTable);
1271
            } else
1272
#endif
1273
0
            {
1274
0
                BN_ULONG infty;
1275
1276
                /* First window */
1277
0
                wvalue = (p_str[0] << 1) & mask;
1278
0
                index += window_size;
1279
1280
0
                wvalue = _booth_recode_w7(wvalue);
1281
1282
0
                ecp_nistz256_select_w7(&p.a, preComputedTable[0], wvalue >> 1);
1283
1284
0
                ecp_nistz256_neg(p.p.Z, p.p.Y);
1285
0
                copy_conditional(p.p.Y, p.p.Z, wvalue & 1);
1286
1287
                /*
1288
                 * Since affine infinity is encoded as (0,0) and
1289
                 * Jacobian ias (,,0), we need to harmonize them
1290
                 * by assigning "one" or zero to Z.
1291
                 */
1292
0
                infty = (p.p.X[0] | p.p.X[1] | p.p.X[2] | p.p.X[3] |
1293
0
                         p.p.Y[0] | p.p.Y[1] | p.p.Y[2] | p.p.Y[3]);
1294
0
                if (P256_LIMBS == 8)
1295
0
                    infty |= (p.p.X[4] | p.p.X[5] | p.p.X[6] | p.p.X[7] |
1296
0
                              p.p.Y[4] | p.p.Y[5] | p.p.Y[6] | p.p.Y[7]);
1297
1298
0
                infty = 0 - is_zero(infty);
1299
0
                infty = ~infty;
1300
1301
0
                p.p.Z[0] = ONE[0] & infty;
1302
0
                p.p.Z[1] = ONE[1] & infty;
1303
0
                p.p.Z[2] = ONE[2] & infty;
1304
0
                p.p.Z[3] = ONE[3] & infty;
1305
0
                if (P256_LIMBS == 8) {
1306
0
                    p.p.Z[4] = ONE[4] & infty;
1307
0
                    p.p.Z[5] = ONE[5] & infty;
1308
0
                    p.p.Z[6] = ONE[6] & infty;
1309
0
                    p.p.Z[7] = ONE[7] & infty;
1310
0
                }
1311
1312
0
                for (i = 1; i < 37; i++) {
1313
0
                    unsigned int off = (index - 1) / 8;
1314
0
                    wvalue = p_str[off] | p_str[off + 1] << 8;
1315
0
                    wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1316
0
                    index += window_size;
1317
1318
0
                    wvalue = _booth_recode_w7(wvalue);
1319
1320
0
                    ecp_nistz256_select_w7(&t.a,
1321
0
                                           preComputedTable[i], wvalue >> 1);
1322
1323
0
                    ecp_nistz256_neg(t.p.Z, t.a.Y);
1324
0
                    copy_conditional(t.a.Y, t.p.Z, wvalue & 1);
1325
1326
0
                    ecp_nistz256_point_add_affine(&p.p, &p.p, &t.a);
1327
0
                }
1328
0
            }
1329
0
        } else {
1330
0
            p_is_infinity = 1;
1331
0
            no_precomp_for_generator = 1;
1332
0
        }
1333
0
    } else
1334
0
        p_is_infinity = 1;
1335
1336
0
    if (no_precomp_for_generator) {
1337
        /*
1338
         * Without a precomputed table for the generator, it has to be
1339
         * handled like a normal point.
1340
         */
1341
0
        new_scalars = OPENSSL_malloc((num + 1) * sizeof(BIGNUM *));
1342
0
        if (!new_scalars) {
1343
0
            ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1344
0
            goto err;
1345
0
        }
1346
1347
0
        new_points = OPENSSL_malloc((num + 1) * sizeof(EC_POINT *));
1348
0
        if (!new_points) {
1349
0
            ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1350
0
            goto err;
1351
0
        }
1352
1353
0
        memcpy(new_scalars, scalars, num * sizeof(BIGNUM *));
1354
0
        new_scalars[num] = scalar;
1355
0
        memcpy(new_points, points, num * sizeof(EC_POINT *));
1356
0
        new_points[num] = generator;
1357
1358
0
        scalars = new_scalars;
1359
0
        points = new_points;
1360
0
        num++;
1361
0
    }
1362
1363
0
    if (num) {
1364
0
        P256_POINT *out = &t.p;
1365
0
        if (p_is_infinity)
1366
0
            out = &p.p;
1367
1368
0
        if (!ecp_nistz256_windowed_mul(group, out, scalars, points, num, ctx))
1369
0
            goto err;
1370
1371
0
        if (!p_is_infinity)
1372
0
            ecp_nistz256_point_add(&p.p, &p.p, out);
1373
0
    }
1374
1375
    /* Not constant-time, but we're only operating on the public output. */
1376
0
    if (!ecp_nistz256_set_words(&r->X, p.p.X) ||
1377
0
        !ecp_nistz256_set_words(&r->Y, p.p.Y) ||
1378
0
        !ecp_nistz256_set_words(&r->Z, p.p.Z)) {
1379
0
        goto err;
1380
0
    }
1381
0
    r->Z_is_one = is_one(&r->Z) & 1;
1382
1383
0
    ret = 1;
1384
1385
0
err:
1386
0
    if (ctx)
1387
0
        BN_CTX_end(ctx);
1388
0
    BN_CTX_free(new_ctx);
1389
0
    if (new_points)
1390
0
        OPENSSL_free(new_points);
1391
0
    if (new_scalars)
1392
0
        OPENSSL_free(new_scalars);
1393
0
    return ret;
1394
0
}
1395
1396
static int ecp_nistz256_get_affine(const EC_GROUP *group,
1397
                                   const EC_POINT *point,
1398
                                   BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
1399
0
{
1400
0
    BN_ULONG z_inv2[P256_LIMBS];
1401
0
    BN_ULONG z_inv3[P256_LIMBS];
1402
0
    BN_ULONG x_aff[P256_LIMBS];
1403
0
    BN_ULONG y_aff[P256_LIMBS];
1404
0
    BN_ULONG point_x[P256_LIMBS], point_y[P256_LIMBS], point_z[P256_LIMBS];
1405
0
    BN_ULONG x_ret[P256_LIMBS], y_ret[P256_LIMBS];
1406
1407
0
    if (EC_POINT_is_at_infinity(group, point)) {
1408
0
        ECerr(EC_F_ECP_NISTZ256_GET_AFFINE, EC_R_POINT_AT_INFINITY);
1409
0
        return 0;
1410
0
    }
1411
1412
0
    if (!ecp_nistz256_bignum_to_field_elem(point_x, &point->X) ||
1413
0
        !ecp_nistz256_bignum_to_field_elem(point_y, &point->Y) ||
1414
0
        !ecp_nistz256_bignum_to_field_elem(point_z, &point->Z)) {
1415
0
        ECerr(EC_F_ECP_NISTZ256_GET_AFFINE, EC_R_COORDINATES_OUT_OF_RANGE);
1416
0
        return 0;
1417
0
    }
1418
1419
0
    ecp_nistz256_mod_inverse(z_inv3, point_z);
1420
0
    ecp_nistz256_sqr_mont(z_inv2, z_inv3);
1421
0
    ecp_nistz256_mul_mont(x_aff, z_inv2, point_x);
1422
1423
0
    if (x != NULL) {
1424
0
        ecp_nistz256_from_mont(x_ret, x_aff);
1425
0
        if (!ecp_nistz256_set_words(x, x_ret))
1426
0
            return 0;
1427
0
    }
1428
1429
0
    if (y != NULL) {
1430
0
        ecp_nistz256_mul_mont(z_inv3, z_inv3, z_inv2);
1431
0
        ecp_nistz256_mul_mont(y_aff, z_inv3, point_y);
1432
0
        ecp_nistz256_from_mont(y_ret, y_aff);
1433
0
        if (!ecp_nistz256_set_words(y, y_ret))
1434
0
            return 0;
1435
0
    }
1436
1437
0
    return 1;
1438
0
}
1439
1440
static EC_PRE_COMP *ecp_nistz256_pre_comp_new(const EC_GROUP *group)
1441
0
{
1442
0
    EC_PRE_COMP *ret = NULL;
1443
1444
0
    if (!group)
1445
0
        return NULL;
1446
1447
0
    ret = (EC_PRE_COMP *)OPENSSL_malloc(sizeof(EC_PRE_COMP));
1448
1449
0
    if (!ret) {
1450
0
        ECerr(EC_F_ECP_NISTZ256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1451
0
        return ret;
1452
0
    }
1453
1454
0
    ret->group = group;
1455
0
    ret->w = 6;                 /* default */
1456
0
    ret->precomp = NULL;
1457
0
    ret->precomp_storage = NULL;
1458
0
    ret->references = 1;
1459
0
    return ret;
1460
0
}
1461
1462
static void *ecp_nistz256_pre_comp_dup(void *src_)
1463
0
{
1464
0
    EC_PRE_COMP *src = src_;
1465
1466
    /* no need to actually copy, these objects never change! */
1467
0
    CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
1468
1469
0
    return src_;
1470
0
}
1471
1472
static void ecp_nistz256_pre_comp_free(void *pre_)
1473
0
{
1474
0
    int i;
1475
0
    EC_PRE_COMP *pre = pre_;
1476
1477
0
    if (!pre)
1478
0
        return;
1479
1480
0
    i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
1481
0
    if (i > 0)
1482
0
        return;
1483
1484
0
    if (pre->precomp_storage)
1485
0
        OPENSSL_free(pre->precomp_storage);
1486
1487
0
    OPENSSL_free(pre);
1488
0
}
1489
1490
static void ecp_nistz256_pre_comp_clear_free(void *pre_)
1491
0
{
1492
0
    int i;
1493
0
    EC_PRE_COMP *pre = pre_;
1494
1495
0
    if (!pre)
1496
0
        return;
1497
1498
0
    i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
1499
0
    if (i > 0)
1500
0
        return;
1501
1502
0
    if (pre->precomp_storage) {
1503
0
        OPENSSL_cleanse(pre->precomp,
1504
0
                        32 * sizeof(unsigned char) * (1 << pre->w) * 2 * 37);
1505
0
        OPENSSL_free(pre->precomp_storage);
1506
0
    }
1507
0
    OPENSSL_cleanse(pre, sizeof *pre);
1508
0
    OPENSSL_free(pre);
1509
0
}
1510
1511
static int ecp_nistz256_window_have_precompute_mult(const EC_GROUP *group)
1512
0
{
1513
    /* There is a hard-coded table for the default generator. */
1514
0
    const EC_POINT *generator = EC_GROUP_get0_generator(group);
1515
0
    if (generator != NULL && ecp_nistz256_is_affine_G(generator)) {
1516
        /* There is a hard-coded table for the default generator. */
1517
0
        return 1;
1518
0
    }
1519
1520
0
    return EC_EX_DATA_get_data(group->extra_data, ecp_nistz256_pre_comp_dup,
1521
0
                               ecp_nistz256_pre_comp_free,
1522
0
                               ecp_nistz256_pre_comp_clear_free) != NULL;
1523
0
}
1524
1525
const EC_METHOD *EC_GFp_nistz256_method(void)
1526
0
{
1527
0
    static const EC_METHOD ret = {
1528
0
        EC_FLAGS_DEFAULT_OCT,
1529
0
        NID_X9_62_prime_field,
1530
0
        ec_GFp_mont_group_init,
1531
0
        ec_GFp_mont_group_finish,
1532
0
        ec_GFp_mont_group_clear_finish,
1533
0
        ec_GFp_mont_group_copy,
1534
0
        ec_GFp_mont_group_set_curve,
1535
0
        ec_GFp_simple_group_get_curve,
1536
0
        ec_GFp_simple_group_get_degree,
1537
0
        ec_GFp_simple_group_check_discriminant,
1538
0
        ec_GFp_simple_point_init,
1539
0
        ec_GFp_simple_point_finish,
1540
0
        ec_GFp_simple_point_clear_finish,
1541
0
        ec_GFp_simple_point_copy,
1542
0
        ec_GFp_simple_point_set_to_infinity,
1543
0
        ec_GFp_simple_set_Jprojective_coordinates_GFp,
1544
0
        ec_GFp_simple_get_Jprojective_coordinates_GFp,
1545
0
        ec_GFp_simple_point_set_affine_coordinates,
1546
0
        ecp_nistz256_get_affine,
1547
0
        0, 0, 0,
1548
0
        ec_GFp_simple_add,
1549
0
        ec_GFp_simple_dbl,
1550
0
        ec_GFp_simple_invert,
1551
0
        ec_GFp_simple_is_at_infinity,
1552
0
        ec_GFp_simple_is_on_curve,
1553
0
        ec_GFp_simple_cmp,
1554
0
        ec_GFp_simple_make_affine,
1555
0
        ec_GFp_simple_points_make_affine,
1556
0
        ecp_nistz256_points_mul,                    /* mul */
1557
0
        ecp_nistz256_mult_precompute,               /* precompute_mult */
1558
0
        ecp_nistz256_window_have_precompute_mult,   /* have_precompute_mult */
1559
0
        ec_GFp_mont_field_mul,
1560
0
        ec_GFp_mont_field_sqr,
1561
0
        0,                                          /* field_div */
1562
0
        ec_GFp_mont_field_encode,
1563
0
        ec_GFp_mont_field_decode,
1564
0
        ec_GFp_mont_field_set_to_one
1565
0
    };
1566
1567
0
    return &ret;
1568
0
}