Coverage Report

Created: 2025-12-31 06:58

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl30/crypto/ec/ecp_nistputil.c
Line
Count
Source
1
/*
2
 * Copyright 2011-2021 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* Copyright 2011 Google Inc.
11
 *
12
 * Licensed under the Apache License, Version 2.0 (the "License");
13
 *
14
 * you may not use this file except in compliance with the License.
15
 * You may obtain a copy of the License at
16
 *
17
 *     http://www.apache.org/licenses/LICENSE-2.0
18
 *
19
 *  Unless required by applicable law or agreed to in writing, software
20
 *  distributed under the License is distributed on an "AS IS" BASIS,
21
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
 *  See the License for the specific language governing permissions and
23
 *  limitations under the License.
24
 */
25
26
/*
27
 * ECDSA low level APIs are deprecated for public use, but still ok for
28
 * internal use.
29
 */
30
#include "internal/deprecated.h"
31
32
#include <openssl/opensslconf.h>
33
34
/*
35
 * Common utility functions for ecp_nistp224.c, ecp_nistp256.c, ecp_nistp521.c.
36
 */
37
38
#include <stddef.h>
39
#include "ec_local.h"
40
41
/*
42
 * Convert an array of points into affine coordinates. (If the point at
43
 * infinity is found (Z = 0), it remains unchanged.) This function is
44
 * essentially an equivalent to EC_POINTs_make_affine(), but works with the
45
 * internal representation of points as used by ecp_nistp###.c rather than
46
 * with (BIGNUM-based) EC_POINT data structures. point_array is the
47
 * input/output buffer ('num' points in projective form, i.e. three
48
 * coordinates each), based on an internal representation of field elements
49
 * of size 'felem_size'. tmp_felems needs to point to a temporary array of
50
 * 'num'+1 field elements for storage of intermediate values.
51
 */
52
void ossl_ec_GFp_nistp_points_make_affine_internal(size_t num, void *point_array,
53
    size_t felem_size,
54
    void *tmp_felems,
55
    void (*felem_one)(void *out),
56
    int (*felem_is_zero)(const void
57
            *in),
58
    void (*felem_assign)(void *out,
59
        const void
60
            *in),
61
    void (*felem_square)(void *out,
62
        const void
63
            *in),
64
    void (*felem_mul)(void *out,
65
        const void
66
            *in1,
67
        const void
68
            *in2),
69
    void (*felem_inv)(void *out,
70
        const void
71
            *in),
72
    void (*felem_contract)(void
73
                               *out,
74
        const void
75
            *in))
76
0
{
77
0
    int i = 0;
78
79
0
#define tmp_felem(I) (&((char *)tmp_felems)[(I) * felem_size])
80
0
#define X(I) (&((char *)point_array)[3 * (I) * felem_size])
81
0
#define Y(I) (&((char *)point_array)[(3 * (I) + 1) * felem_size])
82
0
#define Z(I) (&((char *)point_array)[(3 * (I) + 2) * felem_size])
83
84
0
    if (!felem_is_zero(Z(0)))
85
0
        felem_assign(tmp_felem(0), Z(0));
86
0
    else
87
0
        felem_one(tmp_felem(0));
88
0
    for (i = 1; i < (int)num; i++) {
89
0
        if (!felem_is_zero(Z(i)))
90
0
            felem_mul(tmp_felem(i), tmp_felem(i - 1), Z(i));
91
0
        else
92
0
            felem_assign(tmp_felem(i), tmp_felem(i - 1));
93
0
    }
94
    /*
95
     * Now each tmp_felem(i) is the product of Z(0) .. Z(i), skipping any
96
     * zero-valued factors: if Z(i) = 0, we essentially pretend that Z(i) = 1
97
     */
98
99
0
    felem_inv(tmp_felem(num - 1), tmp_felem(num - 1));
100
0
    for (i = num - 1; i >= 0; i--) {
101
0
        if (i > 0)
102
            /*
103
             * tmp_felem(i-1) is the product of Z(0) .. Z(i-1), tmp_felem(i)
104
             * is the inverse of the product of Z(0) .. Z(i)
105
             */
106
            /* 1/Z(i) */
107
0
            felem_mul(tmp_felem(num), tmp_felem(i - 1), tmp_felem(i));
108
0
        else
109
0
            felem_assign(tmp_felem(num), tmp_felem(0)); /* 1/Z(0) */
110
111
0
        if (!felem_is_zero(Z(i))) {
112
0
            if (i > 0)
113
                /*
114
                 * For next iteration, replace tmp_felem(i-1) by its inverse
115
                 */
116
0
                felem_mul(tmp_felem(i - 1), tmp_felem(i), Z(i));
117
118
            /*
119
             * Convert point (X, Y, Z) into affine form (X/(Z^2), Y/(Z^3), 1)
120
             */
121
0
            felem_square(Z(i), tmp_felem(num)); /* 1/(Z^2) */
122
0
            felem_mul(X(i), X(i), Z(i)); /* X/(Z^2) */
123
0
            felem_mul(Z(i), Z(i), tmp_felem(num)); /* 1/(Z^3) */
124
0
            felem_mul(Y(i), Y(i), Z(i)); /* Y/(Z^3) */
125
0
            felem_contract(X(i), X(i));
126
0
            felem_contract(Y(i), Y(i));
127
0
            felem_one(Z(i));
128
0
        } else {
129
0
            if (i > 0)
130
                /*
131
                 * For next iteration, replace tmp_felem(i-1) by its inverse
132
                 */
133
0
                felem_assign(tmp_felem(i - 1), tmp_felem(i));
134
0
        }
135
0
    }
136
0
}
137
138
/*-
139
 * This function looks at 5+1 scalar bits (5 current, 1 adjacent less
140
 * significant bit), and recodes them into a signed digit for use in fast point
141
 * multiplication: the use of signed rather than unsigned digits means that
142
 * fewer points need to be precomputed, given that point inversion is easy
143
 * (a precomputed point dP makes -dP available as well).
144
 *
145
 * BACKGROUND:
146
 *
147
 * Signed digits for multiplication were introduced by Booth ("A signed binary
148
 * multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV,
149
 * pt. 2 (1951), pp. 236-240), in that case for multiplication of integers.
150
 * Booth's original encoding did not generally improve the density of nonzero
151
 * digits over the binary representation, and was merely meant to simplify the
152
 * handling of signed factors given in two's complement; but it has since been
153
 * shown to be the basis of various signed-digit representations that do have
154
 * further advantages, including the wNAF, using the following general approach:
155
 *
156
 * (1) Given a binary representation
157
 *
158
 *       b_k  ...  b_2  b_1  b_0,
159
 *
160
 *     of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1
161
 *     by using bit-wise subtraction as follows:
162
 *
163
 *        b_k     b_(k-1)  ...  b_2  b_1  b_0
164
 *      -         b_k      ...  b_3  b_2  b_1  b_0
165
 *       -----------------------------------------
166
 *        s_(k+1) s_k      ...  s_3  s_2  s_1  s_0
167
 *
168
 *     A left-shift followed by subtraction of the original value yields a new
169
 *     representation of the same value, using signed bits s_i = b_(i-1) - b_i.
170
 *     This representation from Booth's paper has since appeared in the
171
 *     literature under a variety of different names including "reversed binary
172
 *     form", "alternating greedy expansion", "mutual opposite form", and
173
 *     "sign-alternating {+-1}-representation".
174
 *
175
 *     An interesting property is that among the nonzero bits, values 1 and -1
176
 *     strictly alternate.
177
 *
178
 * (2) Various window schemes can be applied to the Booth representation of
179
 *     integers: for example, right-to-left sliding windows yield the wNAF
180
 *     (a signed-digit encoding independently discovered by various researchers
181
 *     in the 1990s), and left-to-right sliding windows yield a left-to-right
182
 *     equivalent of the wNAF (independently discovered by various researchers
183
 *     around 2004).
184
 *
185
 * To prevent leaking information through side channels in point multiplication,
186
 * we need to recode the given integer into a regular pattern: sliding windows
187
 * as in wNAFs won't do, we need their fixed-window equivalent -- which is a few
188
 * decades older: we'll be using the so-called "modified Booth encoding" due to
189
 * MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49
190
 * (1961), pp. 67-91), in a radix-2^5 setting.  That is, we always combine five
191
 * signed bits into a signed digit:
192
 *
193
 *       s_(5j + 4) s_(5j + 3) s_(5j + 2) s_(5j + 1) s_(5j)
194
 *
195
 * The sign-alternating property implies that the resulting digit values are
196
 * integers from -16 to 16.
197
 *
198
 * Of course, we don't actually need to compute the signed digits s_i as an
199
 * intermediate step (that's just a nice way to see how this scheme relates
200
 * to the wNAF): a direct computation obtains the recoded digit from the
201
 * six bits b_(5j + 4) ... b_(5j - 1).
202
 *
203
 * This function takes those six bits as an integer (0 .. 63), writing the
204
 * recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute
205
 * value, in the range 0 .. 16).  Note that this integer essentially provides
206
 * the input bits "shifted to the left" by one position: for example, the input
207
 * to compute the least significant recoded digit, given that there's no bit
208
 * b_-1, has to be b_4 b_3 b_2 b_1 b_0 0.
209
 *
210
 */
211
void ossl_ec_GFp_nistp_recode_scalar_bits(unsigned char *sign,
212
    unsigned char *digit, unsigned char in)
213
190k
{
214
190k
    unsigned char s, d;
215
216
190k
    s = ~((in >> 5) - 1); /* sets all bits to MSB(in), 'in' seen as
217
                           * 6-bit value */
218
190k
    d = (1 << 6) - in - 1;
219
190k
    d = (d & s) | (in & ~s);
220
190k
    d = (d >> 1) + (d & 1);
221
222
190k
    *sign = s & 1;
223
190k
    *digit = d;
224
190k
}