/src/freeradius-server/src/protocols/radius/encode.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This library is free software; you can redistribute it and/or |
3 | | * modify it under the terms of the GNU Lesser General Public |
4 | | * License as published by the Free Software Foundation; either |
5 | | * version 2.1 of the License, or (at your option) any later version. |
6 | | * |
7 | | * This library is distributed in the hope that it will be useful, |
8 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
10 | | * Lesser General Public License for more details. |
11 | | * |
12 | | * You should have received a copy of the GNU Lesser General Public |
13 | | * License along with this library; if not, write to the Free Software |
14 | | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA |
15 | | */ |
16 | | |
17 | | /** |
18 | | * $Id: 58855b647352386e991eb723df67a0351ab1e44f $ |
19 | | * |
20 | | * @file protocols/radius/encode.c |
21 | | * @brief Functions to encode RADIUS attributes |
22 | | * |
23 | | * @copyright 2000-2003,2006-2015 The FreeRADIUS server project |
24 | | */ |
25 | | RCSID("$Id: 58855b647352386e991eb723df67a0351ab1e44f $") |
26 | | |
27 | | #include <freeradius-devel/util/dbuff.h> |
28 | | #include <freeradius-devel/util/md5.h> |
29 | | #include <freeradius-devel/util/struct.h> |
30 | | #include <freeradius-devel/io/test_point.h> |
31 | | #include <freeradius-devel/protocol/radius/freeradius.internal.h> |
32 | | #include "attrs.h" |
33 | | |
34 | 0 | #define TAG_VALID(x) ((x) > 0 && (x) < 0x20) |
35 | | |
36 | | static ssize_t encode_value(fr_dbuff_t *dbuff, |
37 | | fr_da_stack_t *da_stack, unsigned int depth, |
38 | | fr_dcursor_t *cursor, void *encode_ctx); |
39 | | |
40 | | static ssize_t encode_child(fr_dbuff_t *dbuff, |
41 | | fr_da_stack_t *da_stack, unsigned int depth, |
42 | | fr_dcursor_t *cursor, void *encode_ctx); |
43 | | |
44 | | /** "encrypt" a password RADIUS style |
45 | | * |
46 | | * Input and output buffers can be identical if in-place encryption is needed. |
47 | | */ |
48 | | static ssize_t encode_password(fr_dbuff_t *dbuff, fr_dbuff_marker_t *input, size_t inlen, fr_radius_encode_ctx_t *packet_ctx) |
49 | 0 | { |
50 | 0 | fr_md5_ctx_t *md5_ctx, *md5_ctx_old; |
51 | 0 | uint8_t digest[RADIUS_AUTH_VECTOR_LENGTH]; |
52 | 0 | uint8_t passwd[RADIUS_MAX_PASS_LENGTH] = {0}; |
53 | 0 | size_t i, n; |
54 | 0 | size_t len; |
55 | | |
56 | | /* |
57 | | * If the length is zero, round it up. |
58 | | */ |
59 | 0 | len = inlen; |
60 | |
|
61 | 0 | if (len > RADIUS_MAX_PASS_LENGTH) len = RADIUS_MAX_PASS_LENGTH; |
62 | |
|
63 | 0 | (void) fr_dbuff_out_memcpy(passwd, input, len); |
64 | 0 | if (len < sizeof(passwd)) memset(passwd + len, 0, sizeof(passwd) - len); |
65 | |
|
66 | 0 | if (len == 0) len = AUTH_PASS_LEN; |
67 | 0 | else if ((len & 0x0f) != 0) { |
68 | 0 | len += 0x0f; |
69 | 0 | len &= ~0x0f; |
70 | 0 | } |
71 | |
|
72 | 0 | md5_ctx = fr_md5_ctx_alloc_from_list(); |
73 | 0 | md5_ctx_old = fr_md5_ctx_alloc_from_list(); |
74 | |
|
75 | 0 | fr_md5_update(md5_ctx, (uint8_t const *) packet_ctx->common->secret, packet_ctx->common->secret_length); |
76 | 0 | fr_md5_ctx_copy(md5_ctx_old, md5_ctx); |
77 | | |
78 | | /* |
79 | | * Do first pass. |
80 | | */ |
81 | 0 | fr_md5_update(md5_ctx, packet_ctx->request_authenticator, AUTH_PASS_LEN); |
82 | |
|
83 | 0 | for (n = 0; n < len; n += AUTH_PASS_LEN) { |
84 | 0 | if (n > 0) { |
85 | 0 | fr_md5_ctx_copy(md5_ctx, md5_ctx_old); |
86 | 0 | fr_md5_update(md5_ctx, passwd + n - AUTH_PASS_LEN, AUTH_PASS_LEN); |
87 | 0 | } |
88 | |
|
89 | 0 | fr_md5_final(digest, md5_ctx); |
90 | 0 | for (i = 0; i < AUTH_PASS_LEN; i++) passwd[i + n] ^= digest[i]; |
91 | 0 | } |
92 | |
|
93 | 0 | fr_md5_ctx_free_from_list(&md5_ctx); |
94 | 0 | fr_md5_ctx_free_from_list(&md5_ctx_old); |
95 | |
|
96 | 0 | return fr_dbuff_in_memcpy(dbuff, passwd, len); |
97 | 0 | } |
98 | | |
99 | | |
100 | | static ssize_t encode_tunnel_password(fr_dbuff_t *dbuff, fr_dbuff_marker_t *in, size_t inlen, fr_radius_encode_ctx_t *packet_ctx) |
101 | 0 | { |
102 | 0 | fr_md5_ctx_t *md5_ctx, *md5_ctx_old; |
103 | 0 | uint8_t digest[RADIUS_AUTH_VECTOR_LENGTH]; |
104 | 0 | uint8_t tpasswd[RADIUS_MAX_STRING_LENGTH]; |
105 | 0 | size_t i, n; |
106 | 0 | uint32_t r; |
107 | 0 | size_t output_len, encrypted_len, padding; |
108 | 0 | ssize_t slen; |
109 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, RADIUS_MAX_STRING_LENGTH); |
110 | | |
111 | | /* |
112 | | * Limit the maximum size of the input password. 2 bytes |
113 | | * are taken up by the salt, and one by the encoded |
114 | | * "length" field. |
115 | | */ |
116 | 0 | if (inlen > (RADIUS_MAX_STRING_LENGTH - 3)) { |
117 | 0 | fail: |
118 | 0 | fr_strerror_const("Input password is too large for tunnel password encoding"); |
119 | 0 | return -(inlen + 3); |
120 | 0 | } |
121 | | |
122 | | /* |
123 | | * Length of the encrypted data is the clear-text |
124 | | * password length plus one byte which encodes the length |
125 | | * of the password. We round up to the nearest encoding |
126 | | * block, and bound it by the size of the output buffer, |
127 | | * while accounting for 2 bytes of salt. |
128 | | * |
129 | | * And also ensuring that we don't truncate the input |
130 | | * password. |
131 | | */ |
132 | 0 | encrypted_len = ROUND_UP(inlen + 1, 16); |
133 | 0 | if (encrypted_len > (RADIUS_MAX_STRING_LENGTH - 2)) encrypted_len = (RADIUS_MAX_STRING_LENGTH - 2); |
134 | | |
135 | | /* |
136 | | * Get the number of padding bytes in the last block. |
137 | | */ |
138 | 0 | padding = encrypted_len - (inlen + 1); |
139 | |
|
140 | 0 | output_len = encrypted_len + 2; /* account for the salt */ |
141 | | |
142 | | /* |
143 | | * We will have up to 253 octets of data in the output |
144 | | * buffer, some of which are padding. |
145 | | * |
146 | | * If we over-run the output buffer, see if we can drop |
147 | | * some of the padding bytes. If not, we return an error |
148 | | * instead of truncating the password. |
149 | | * |
150 | | * Otherwise we lower the amount of data we copy into the |
151 | | * output buffer, because the last bit is just padding, |
152 | | * and can be safely discarded. |
153 | | */ |
154 | 0 | slen = fr_dbuff_set(&work_dbuff, output_len); |
155 | 0 | if (slen < 0) { |
156 | 0 | if (((size_t) -slen) > padding) goto fail; |
157 | | |
158 | 0 | output_len += slen; |
159 | 0 | } |
160 | 0 | fr_dbuff_set_to_start(&work_dbuff); |
161 | | |
162 | | /* |
163 | | * Copy the password over, and fill the remainder with random data. |
164 | | */ |
165 | 0 | (void) fr_dbuff_out_memcpy(tpasswd + 3, in, inlen); |
166 | |
|
167 | 0 | for (i = 3 + inlen; i < sizeof(tpasswd); i++) { |
168 | 0 | tpasswd[i] = fr_fast_rand(&packet_ctx->rand_ctx); |
169 | 0 | } |
170 | | |
171 | | /* |
172 | | * Generate salt. The RFCs say: |
173 | | * |
174 | | * The high bit of salt[0] must be set, each salt in a |
175 | | * packet should be unique, and they should be random |
176 | | * |
177 | | * So, we set the high bit, add in a counter, and then |
178 | | * add in some PRNG data. should be OK.. |
179 | | */ |
180 | 0 | r = fr_fast_rand(&packet_ctx->rand_ctx); |
181 | 0 | tpasswd[0] = (0x80 | (((packet_ctx->salt_offset++) & 0x07) << 4) | ((r >> 8) & 0x0f)); |
182 | 0 | tpasswd[1] = r & 0xff; |
183 | 0 | tpasswd[2] = inlen; /* length of the password string */ |
184 | |
|
185 | 0 | md5_ctx = fr_md5_ctx_alloc_from_list(); |
186 | 0 | md5_ctx_old = fr_md5_ctx_alloc_from_list(); |
187 | |
|
188 | 0 | fr_md5_update(md5_ctx, (uint8_t const *) packet_ctx->common->secret, packet_ctx->common->secret_length); |
189 | 0 | fr_md5_ctx_copy(md5_ctx_old, md5_ctx); |
190 | |
|
191 | 0 | fr_md5_update(md5_ctx, packet_ctx->request_authenticator, RADIUS_AUTH_VECTOR_LENGTH); |
192 | 0 | fr_md5_update(md5_ctx, &tpasswd[0], 2); |
193 | | |
194 | | /* |
195 | | * Do various hashing, and XOR the length+password with |
196 | | * the output of the hash blocks. |
197 | | */ |
198 | 0 | for (n = 0; n < encrypted_len; n += AUTH_PASS_LEN) { |
199 | 0 | size_t block_len; |
200 | |
|
201 | 0 | if (n > 0) { |
202 | 0 | fr_md5_ctx_copy(md5_ctx, md5_ctx_old); |
203 | 0 | fr_md5_update(md5_ctx, tpasswd + 2 + n - AUTH_PASS_LEN, AUTH_PASS_LEN); |
204 | 0 | } |
205 | 0 | fr_md5_final(digest, md5_ctx); |
206 | |
|
207 | 0 | block_len = encrypted_len - n; |
208 | 0 | if (block_len > AUTH_PASS_LEN) block_len = AUTH_PASS_LEN; |
209 | |
|
210 | 0 | for (i = 0; i < block_len; i++) tpasswd[i + 2 + n] ^= digest[i]; |
211 | 0 | } |
212 | |
|
213 | 0 | fr_md5_ctx_free_from_list(&md5_ctx); |
214 | 0 | fr_md5_ctx_free_from_list(&md5_ctx_old); |
215 | |
|
216 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, tpasswd, output_len); |
217 | | |
218 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
219 | 0 | } |
220 | | |
221 | | /* |
222 | | * Encode the contents of an attribute of type TLV. |
223 | | */ |
224 | | static ssize_t encode_tlv(fr_dbuff_t *dbuff, |
225 | | fr_da_stack_t *da_stack, unsigned int depth, |
226 | | fr_dcursor_t *cursor, void *encode_ctx) |
227 | 0 | { |
228 | 0 | ssize_t slen; |
229 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
230 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
231 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, RADIUS_MAX_STRING_LENGTH); |
232 | |
|
233 | 0 | for (;;) { |
234 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
235 | | |
236 | | /* |
237 | | * This attribute carries sub-TLVs. The sub-TLVs |
238 | | * can only carry a total of 253 bytes of data. |
239 | | */ |
240 | | |
241 | | /* |
242 | | * Determine the nested type and call the appropriate encoder |
243 | | */ |
244 | 0 | if (!da_stack->da[depth + 1]) { |
245 | 0 | fr_dcursor_t child_cursor; |
246 | |
|
247 | 0 | if (vp->da != da_stack->da[depth]) { |
248 | 0 | fr_strerror_printf("%s: Can't encode empty TLV", __FUNCTION__); |
249 | 0 | return 0; |
250 | 0 | } |
251 | | |
252 | 0 | fr_pair_dcursor_child_iter_init(&child_cursor, &vp->vp_group, cursor); |
253 | 0 | vp = fr_dcursor_current(&child_cursor); |
254 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
255 | | |
256 | | /* |
257 | | * Call ourselves recursively to encode children. |
258 | | */ |
259 | 0 | slen = encode_tlv(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
260 | 0 | if (slen < 0) return slen; |
261 | | |
262 | 0 | vp = fr_dcursor_next(cursor); |
263 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
264 | |
|
265 | 0 | } else { |
266 | 0 | slen = encode_child(&work_dbuff, da_stack, depth + 1, cursor, encode_ctx); |
267 | 0 | } |
268 | 0 | if (slen < 0) return slen; |
269 | | |
270 | | /* |
271 | | * If nothing updated the attribute, stop |
272 | | */ |
273 | 0 | if (!fr_dcursor_current(cursor) || (vp == fr_dcursor_current(cursor))) break; |
274 | | |
275 | | /* |
276 | | * We can encode multiple sub TLVs, if after |
277 | | * rebuilding the TLV Stack, the attribute |
278 | | * at this depth is the same. |
279 | | */ |
280 | 0 | if ((da != da_stack->da[depth]) || (da_stack->depth < da->depth)) break; |
281 | 0 | vp = fr_dcursor_current(cursor); |
282 | 0 | } |
283 | | |
284 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
285 | 0 | } |
286 | | |
287 | | static ssize_t encode_pairs(fr_dbuff_t *dbuff, fr_pair_list_t const *vps, void *encode_ctx) |
288 | 0 | { |
289 | 0 | ssize_t slen; |
290 | 0 | fr_pair_t const *vp; |
291 | 0 | fr_dcursor_t cursor; |
292 | | |
293 | | /* |
294 | | * Note that we skip tags inside of tags! |
295 | | */ |
296 | 0 | fr_pair_dcursor_iter_init(&cursor, vps, fr_proto_next_encodable, dict_radius); |
297 | 0 | while ((vp = fr_dcursor_current(&cursor))) { |
298 | 0 | PAIR_VERIFY(vp); |
299 | | |
300 | | /* |
301 | | * Encode an individual VP |
302 | | */ |
303 | 0 | slen = fr_radius_encode_pair(dbuff, &cursor, encode_ctx); |
304 | 0 | if (slen < 0) return slen; |
305 | 0 | } |
306 | | |
307 | 0 | return fr_dbuff_used(dbuff); |
308 | 0 | } |
309 | | |
310 | | |
311 | | /** Encodes the data portion of an attribute |
312 | | * |
313 | | * @return |
314 | | * > 0, Length of the data portion. |
315 | | * = 0, we could not encode anything, skip this attribute (and don't encode the header) |
316 | | * unless it's one of a list of exceptions. |
317 | | * < 0, How many additional bytes we'd need as a negative integer. |
318 | | * PAIR_ENCODE_FATAL_ERROR - Abort encoding the packet. |
319 | | */ |
320 | | static ssize_t encode_value(fr_dbuff_t *dbuff, |
321 | | fr_da_stack_t *da_stack, unsigned int depth, |
322 | | fr_dcursor_t *cursor, void *encode_ctx) |
323 | 0 | { |
324 | 0 | ssize_t slen; |
325 | 0 | size_t len; |
326 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
327 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
328 | 0 | fr_radius_encode_ctx_t *packet_ctx = encode_ctx; |
329 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
330 | 0 | fr_dbuff_t value_dbuff; |
331 | 0 | fr_dbuff_marker_t value_start, src, dest; |
332 | 0 | bool encrypted = false; |
333 | |
|
334 | 0 | PAIR_VERIFY(vp); |
335 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
336 | | |
337 | | /* |
338 | | * TLVs are just another type of value. |
339 | | */ |
340 | 0 | if (da->type == FR_TYPE_TLV) return encode_tlv(dbuff, da_stack, depth, cursor, encode_ctx); |
341 | | |
342 | 0 | if (da->type == FR_TYPE_GROUP) return fr_pair_ref_to_network(dbuff, da_stack, depth, cursor); |
343 | | |
344 | | /* |
345 | | * Catch errors early on. |
346 | | */ |
347 | 0 | if (flag_encrypted(&vp->da->flags) && !packet_ctx) { |
348 | 0 | fr_strerror_const("Asked to encrypt attribute, but no packet context provided"); |
349 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
350 | 0 | } |
351 | | |
352 | | /* |
353 | | * This has special requirements. |
354 | | */ |
355 | 0 | if ((vp->vp_type == FR_TYPE_STRUCT) || (da->type == FR_TYPE_STRUCT)) { |
356 | 0 | slen = fr_struct_to_network(&work_dbuff, da_stack, depth, cursor, encode_ctx, encode_value, encode_child); |
357 | 0 | if (slen <= 0) return slen; |
358 | | |
359 | 0 | vp = fr_dcursor_current(cursor); |
360 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
361 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
362 | 0 | } |
363 | | |
364 | | /* |
365 | | * If it's not a TLV, it should be a value type RFC |
366 | | * attribute make sure that it is. |
367 | | */ |
368 | 0 | if (da_stack->da[depth + 1] != NULL) { |
369 | 0 | fr_strerror_printf("%s: Encoding value but not at top of stack", __FUNCTION__); |
370 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
371 | 0 | } |
372 | | |
373 | 0 | if (vp->da != da) { |
374 | 0 | fr_strerror_printf("%s: Top of stack does not match vp->da", __FUNCTION__); |
375 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
376 | 0 | } |
377 | | |
378 | 0 | if (fr_type_is_structural(da->type)) { |
379 | 0 | fr_strerror_printf("%s: Called with structural type %s", __FUNCTION__, |
380 | 0 | fr_type_to_str(da_stack->da[depth]->type)); |
381 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
382 | 0 | } |
383 | | |
384 | | /* |
385 | | * Write tag byte |
386 | | * |
387 | | * The Tag field is one octet in length and is intended to provide a |
388 | | * means of grouping attributes in the same packet which refer to the |
389 | | * same tunnel. If the value of the Tag field is greater than 0x00 |
390 | | * and less than or equal to 0x1F, it SHOULD be interpreted as |
391 | | * indicating which tunnel (of several alternatives) this attribute |
392 | | * pertains. If the Tag field is greater than 0x1F, it SHOULD be |
393 | | * interpreted as the first byte of the following String field. |
394 | | * |
395 | | * If the first byte of the string value looks like a |
396 | | * tag, then we always encode a tag byte, even one that |
397 | | * is zero. |
398 | | */ |
399 | 0 | if ((vp->vp_type == FR_TYPE_STRING) && flag_has_tag(&vp->da->flags)) { |
400 | 0 | if (packet_ctx->tag) { |
401 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint8_t)packet_ctx->tag); |
402 | 0 | } else if (TAG_VALID(vp->vp_strvalue[0])) { |
403 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint8_t)0x00); |
404 | 0 | } |
405 | 0 | } |
406 | | |
407 | | /* |
408 | | * Starting here is a value that may require encryption. |
409 | | */ |
410 | 0 | value_dbuff = FR_DBUFF(&work_dbuff); |
411 | 0 | fr_dbuff_marker(&value_start, &value_dbuff); |
412 | 0 | fr_dbuff_marker(&src, &value_dbuff); |
413 | 0 | fr_dbuff_marker(&dest, &value_dbuff); |
414 | |
|
415 | 0 | switch (vp->vp_type) { |
416 | | /* |
417 | | * IPv4 addresses are normal, but IPv6 addresses are special to RADIUS. |
418 | | */ |
419 | 0 | case FR_TYPE_COMBO_IP_ADDR: |
420 | 0 | if (vp->vp_ip.af == AF_INET) goto encode; |
421 | 0 | FALL_THROUGH; |
422 | | |
423 | | /* |
424 | | * Common encoder might add scope byte, which we don't want. |
425 | | */ |
426 | 0 | case FR_TYPE_IPV6_ADDR: |
427 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&value_dbuff, vp->vp_ipv6addr, sizeof(vp->vp_ipv6addr)); |
428 | 0 | break; |
429 | | |
430 | 0 | case FR_TYPE_COMBO_IP_PREFIX: |
431 | 0 | if (vp->vp_ip.af == AF_INET) goto ipv4_prefix; |
432 | 0 | FALL_THROUGH; |
433 | | |
434 | | /* |
435 | | * Common encoder doesn't add reserved byte |
436 | | */ |
437 | 0 | case FR_TYPE_IPV6_PREFIX: |
438 | 0 | len = fr_bytes_from_bits(vp->vp_ip.prefix); |
439 | 0 | FR_DBUFF_IN_BYTES_RETURN(&value_dbuff, 0x00, vp->vp_ip.prefix); |
440 | | /* Only copy the minimum number of address bytes required */ |
441 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&value_dbuff, (uint8_t const *)vp->vp_ipv6addr, len); |
442 | 0 | break; |
443 | | |
444 | | /* |
445 | | * Common encoder doesn't add reserved byte |
446 | | */ |
447 | 0 | case FR_TYPE_IPV4_PREFIX: |
448 | 0 | ipv4_prefix: |
449 | 0 | FR_DBUFF_IN_BYTES_RETURN(&value_dbuff, 0x00, vp->vp_ip.prefix); |
450 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&value_dbuff, (uint8_t const *)&vp->vp_ipv4addr, sizeof(vp->vp_ipv4addr)); |
451 | 0 | break; |
452 | | |
453 | | /* |
454 | | * Special handling for "abinary". Otherwise, fall |
455 | | * through to using the common encoder. |
456 | | */ |
457 | 0 | case FR_TYPE_STRING: |
458 | 0 | if (flag_abinary(&da->flags)) { |
459 | 0 | slen = fr_radius_encode_abinary(vp, &value_dbuff); |
460 | 0 | if (slen <= 0) return slen; |
461 | 0 | break; |
462 | 0 | } |
463 | 0 | FALL_THROUGH; |
464 | | |
465 | 0 | case FR_TYPE_OCTETS: |
466 | | |
467 | | /* |
468 | | * Simple data types use the common encoder. |
469 | | */ |
470 | 0 | default: |
471 | 0 | encode: |
472 | 0 | slen = fr_value_box_to_network(&value_dbuff, &vp->data); |
473 | 0 | if (slen < 0) return slen; |
474 | 0 | break; |
475 | 0 | } |
476 | | |
477 | | /* |
478 | | * No data: don't encode the value. The type and length should still |
479 | | * be written. |
480 | | */ |
481 | 0 | if (fr_dbuff_used(&value_dbuff) == 0) { |
482 | 0 | return_0: |
483 | 0 | vp = fr_dcursor_next(cursor); |
484 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
485 | 0 | return 0; |
486 | 0 | } |
487 | | |
488 | | /* |
489 | | * Encrypt the various password styles |
490 | | * |
491 | | * Attributes with encrypted values MUST be less than |
492 | | * 128 bytes long. |
493 | | */ |
494 | 0 | if (flag_encrypted(&da->flags)) switch (vp->da->flags.subtype) { |
495 | 0 | case FLAG_ENCRYPT_USER_PASSWORD: |
496 | | /* |
497 | | * Encode the password in place |
498 | | */ |
499 | 0 | slen = encode_password(&work_dbuff, &value_start, fr_dbuff_used(&value_dbuff), packet_ctx); |
500 | 0 | if (slen < 0) return slen; |
501 | 0 | encrypted = true; |
502 | 0 | break; |
503 | | |
504 | 0 | case FLAG_TAGGED_TUNNEL_PASSWORD: |
505 | 0 | case FLAG_ENCRYPT_TUNNEL_PASSWORD: |
506 | 0 | if (packet_ctx->disallow_tunnel_passwords) { |
507 | 0 | fr_strerror_const("Attributes with 'encrypt=2' set cannot go into this packet."); |
508 | 0 | goto return_0; |
509 | 0 | } |
510 | | |
511 | | /* |
512 | | * Always encode the tag even if it's zero. |
513 | | * |
514 | | * The Tunnel-Password uses 2 salt fields which |
515 | | * MAY have any value. As a result, we always |
516 | | * encode a tag. If we would omit the tag, then |
517 | | * perhaps one of the salt fields could be |
518 | | * mistaken for the tag. |
519 | | */ |
520 | 0 | if (flag_has_tag(&vp->da->flags)) fr_dbuff_advance(&work_dbuff, 1); |
521 | |
|
522 | 0 | slen = encode_tunnel_password(&work_dbuff, &value_start, fr_dbuff_used(&value_dbuff), packet_ctx); |
523 | 0 | if (slen < 0) { |
524 | 0 | fr_strerror_printf("%s too long", vp->da->name); |
525 | 0 | return slen - flag_has_tag(&vp->da->flags); |
526 | 0 | } |
527 | | |
528 | | /* |
529 | | * Do this after so we don't mess up the input |
530 | | * value. |
531 | | */ |
532 | 0 | if (flag_has_tag(&vp->da->flags)) { |
533 | 0 | fr_dbuff_set_to_start(&value_start); |
534 | 0 | fr_dbuff_in(&value_start, (uint8_t) 0x00); |
535 | 0 | } |
536 | 0 | encrypted = true; |
537 | 0 | break; |
538 | | |
539 | | /* |
540 | | * The code above ensures that this attribute |
541 | | * always fits. |
542 | | */ |
543 | 0 | case FLAG_ENCRYPT_ASCEND_SECRET: |
544 | | /* |
545 | | * @todo radius decoding also uses fr_radius_ascend_secret() (Vernam cipher |
546 | | * is its own inverse). As part of converting decode, make sure the caller |
547 | | * there can pass a marker so we can use it here, too. |
548 | | */ |
549 | 0 | slen = fr_radius_ascend_secret(&work_dbuff, fr_dbuff_current(&value_start), fr_dbuff_used(&value_dbuff), |
550 | 0 | packet_ctx->common->secret, packet_ctx->request_authenticator); |
551 | 0 | if (slen < 0) return slen; |
552 | 0 | encrypted = true; |
553 | 0 | break; |
554 | 0 | } |
555 | | |
556 | 0 | if (!encrypted) { |
557 | 0 | fr_dbuff_set(&work_dbuff, &value_dbuff); |
558 | 0 | fr_dbuff_set(&value_start, fr_dbuff_start(&value_dbuff)); |
559 | 0 | } |
560 | | |
561 | | /* |
562 | | * High byte of 32bit integers gets set to the tag |
563 | | * value. |
564 | | * |
565 | | * The Tag field is one octet in length and is intended to provide a |
566 | | * means of grouping attributes in the same packet which refer to the |
567 | | * same tunnel. Valid values for this field are 0x01 through 0x1F, |
568 | | * inclusive. If the Tag field is unused, it MUST be zero (0x00). |
569 | | */ |
570 | 0 | if ((vp->vp_type == FR_TYPE_UINT32) && flag_has_tag(&vp->da->flags)) { |
571 | 0 | uint8_t msb = 0; |
572 | | /* |
573 | | * Only 24bit integers are allowed here |
574 | | */ |
575 | 0 | fr_dbuff_set(&src, &value_start); |
576 | 0 | (void) fr_dbuff_out(&msb, &src); |
577 | 0 | if (msb != 0) { |
578 | 0 | fr_strerror_const("Integer overflow for tagged uint32 attribute"); |
579 | 0 | goto return_0; |
580 | 0 | } |
581 | 0 | fr_dbuff_set(&dest, &value_start); |
582 | 0 | fr_dbuff_in(&dest, packet_ctx->tag); |
583 | 0 | } |
584 | | |
585 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_start(&work_dbuff), fr_dbuff_used(&work_dbuff), "value %s", |
586 | 0 | fr_type_to_str(vp->vp_type)); |
587 | | |
588 | | /* |
589 | | * Rebuilds the TLV stack for encoding the next attribute |
590 | | */ |
591 | 0 | vp = fr_dcursor_next(cursor); |
592 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
593 | |
|
594 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
595 | 0 | } |
596 | | |
597 | | /** Breaks down large data into pieces, each with a header |
598 | | * |
599 | | * @param[out] data we're fragmenting. |
600 | | * @param[in] data_len the amount of data in the dbuff that makes up the value we're |
601 | | * splitting. |
602 | | * @param[in,out] hdr marker that points at said header |
603 | | * @param[in] hdr_len length of the headers that will be added |
604 | | * @param[in] flag_offset offset within header of a flag byte whose MSB is set for all |
605 | | * but the last piece. |
606 | | * @param[in] vsa_offset if non-zero, the offset of a length field in a (sub?)-header |
607 | | * of size 3 that also needs to be adjusted to include the number |
608 | | * of bytes of data in the piece |
609 | | * @return |
610 | | * - <0 the number of bytes we would have needed to create |
611 | | * space for another attribute header in the buffer. |
612 | | * - 0 data was not modified. |
613 | | * - >0 the number additional bytes we used inserting extra |
614 | | * headers. |
615 | | */ |
616 | | static ssize_t attr_fragment(fr_dbuff_t *data, size_t data_len, fr_dbuff_marker_t *hdr, size_t hdr_len, |
617 | | int flag_offset, int vsa_offset) |
618 | 0 | { |
619 | 0 | unsigned int num_fragments, i = 0; |
620 | 0 | size_t max_frag_data = UINT8_MAX - hdr_len; |
621 | 0 | fr_dbuff_t frag_data = FR_DBUFF_ABS(hdr); |
622 | 0 | fr_dbuff_marker_t frag_hdr, frag_hdr_p; |
623 | |
|
624 | 0 | if (unlikely(!data_len)) return 0; /* Shouldn't have been called */ |
625 | | |
626 | 0 | num_fragments = ROUND_UP_DIV(data_len, max_frag_data); |
627 | 0 | if (num_fragments == 1) return 0; /* Nothing to do */ |
628 | | |
629 | 0 | fr_dbuff_marker(&frag_hdr, &frag_data); |
630 | 0 | fr_dbuff_marker(&frag_hdr_p, &frag_data); |
631 | |
|
632 | 0 | fr_dbuff_advance(&frag_data, hdr_len); |
633 | |
|
634 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(hdr), hdr_len + data_len, "attr_fragment in"); |
635 | 0 | for (;;) { |
636 | 0 | bool last = (i + 1) == num_fragments; |
637 | 0 | uint8_t frag_len; |
638 | | |
639 | | /* |
640 | | * How long is this fragment? |
641 | | */ |
642 | 0 | if (last) { |
643 | 0 | frag_len = (data_len - (max_frag_data * (num_fragments - 1))); |
644 | 0 | } else { |
645 | 0 | frag_len = max_frag_data; |
646 | 0 | } |
647 | | |
648 | | /* |
649 | | * Update the "outer" header to reflect the actual |
650 | | * length of the fragment |
651 | | */ |
652 | 0 | fr_dbuff_set(&frag_hdr_p, &frag_hdr); |
653 | 0 | fr_dbuff_advance(&frag_hdr_p, 1); |
654 | 0 | fr_dbuff_in(&frag_hdr_p, (uint8_t)(hdr_len + frag_len)); |
655 | | |
656 | | /* |
657 | | * Update the "inner" header. The length here is |
658 | | * the inner VSA header length (3) + the fragment |
659 | | * length. |
660 | | */ |
661 | 0 | if (vsa_offset) { |
662 | 0 | fr_dbuff_set(&frag_hdr_p, fr_dbuff_current(&frag_hdr) + vsa_offset); |
663 | 0 | fr_dbuff_in(&frag_hdr_p, (uint8_t)(3 + frag_len)); |
664 | 0 | } |
665 | | |
666 | | /* |
667 | | * Just over-ride the flag field. Nothing else |
668 | | * uses it. |
669 | | */ |
670 | 0 | if (flag_offset) { |
671 | 0 | fr_dbuff_set(&frag_hdr_p, fr_dbuff_current(&frag_hdr) + flag_offset); |
672 | 0 | fr_dbuff_in(&frag_hdr_p, (uint8_t)(!last << 7)); |
673 | 0 | } |
674 | |
|
675 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(hdr), frag_len + hdr_len, |
676 | 0 | "attr_fragment fragment %u/%u", i + 1, num_fragments); |
677 | |
|
678 | 0 | fr_dbuff_advance(&frag_data, frag_len); /* Go to the start of the next fragment */ |
679 | 0 | if (last) break; |
680 | | |
681 | | /* |
682 | | * There's still trailing data after this |
683 | | * fragment. Move the trailing data to *past* |
684 | | * the next header. And after there's room, copy |
685 | | * the header over. |
686 | | * |
687 | | * This process leaves the next header in place, |
688 | | * ready for the next iteration of the loop. |
689 | | * |
690 | | * Yes, moving things multiple times is less than |
691 | | * efficient. Oh well. it's ~1K memmoved() |
692 | | * maybe 4 times. We are nowhere near the CPU / |
693 | | * electrical requirements of Bitcoin. |
694 | | */ |
695 | 0 | i++; |
696 | |
|
697 | 0 | fr_dbuff_set(&frag_hdr, &frag_data); /* Remember where the header should be */ |
698 | 0 | fr_dbuff_advance(&frag_data, hdr_len); /* Advance past the header */ |
699 | | |
700 | | /* |
701 | | * Shift remaining data by hdr_len. |
702 | | */ |
703 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&FR_DBUFF(&frag_data), &frag_hdr, data_len - (i * max_frag_data)); |
704 | 0 | fr_dbuff_in_memcpy(&FR_DBUFF(&frag_hdr), hdr, hdr_len); /* Copy the old header over */ |
705 | 0 | } |
706 | | |
707 | 0 | return fr_dbuff_set(data, &frag_data); |
708 | 0 | } |
709 | | |
710 | | /** Encode an "extended" attribute |
711 | | * |
712 | | */ |
713 | | static ssize_t encode_extended(fr_dbuff_t *dbuff, |
714 | | fr_da_stack_t *da_stack, NDEBUG_UNUSED unsigned int depth, |
715 | | fr_dcursor_t *cursor, void *encode_ctx) |
716 | 0 | { |
717 | 0 | ssize_t slen; |
718 | 0 | uint8_t hlen; |
719 | 0 | size_t vendor_hdr; |
720 | 0 | bool extra; |
721 | 0 | int my_depth; |
722 | 0 | fr_dict_attr_t const *da; |
723 | 0 | fr_dbuff_marker_t hdr, length_field; |
724 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
725 | 0 | fr_dbuff_t work_dbuff; |
726 | |
|
727 | 0 | PAIR_VERIFY(vp); |
728 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
729 | |
|
730 | 0 | extra = flag_long_extended(&da_stack->da[0]->flags); |
731 | | |
732 | | /* |
733 | | * The data used here can be more than 255 bytes, but only for the |
734 | | * "long" extended type. |
735 | | */ |
736 | 0 | if (extra) { |
737 | 0 | work_dbuff = FR_DBUFF_BIND_CURRENT(dbuff); |
738 | 0 | } else { |
739 | 0 | work_dbuff = FR_DBUFF_MAX_BIND_CURRENT(dbuff, UINT8_MAX); |
740 | 0 | } |
741 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
742 | | |
743 | | /* |
744 | | * Encode the header for "short" or "long" attributes |
745 | | */ |
746 | 0 | hlen = 3 + extra; |
747 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[0]->attr); |
748 | 0 | fr_dbuff_marker(&length_field, &work_dbuff); |
749 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, hlen); /* this gets overwritten later*/ |
750 | | |
751 | | /* |
752 | | * Encode which extended attribute it is. |
753 | | */ |
754 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[1]->attr); |
755 | | |
756 | 0 | if (extra) FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x00); /* flags start off at zero */ |
757 | | |
758 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
759 | | |
760 | | /* |
761 | | * Handle VSA as "VENDOR + attr" |
762 | | */ |
763 | 0 | if (da_stack->da[1]->type == FR_TYPE_VSA) { |
764 | 0 | fr_assert(da_stack->da[2]); |
765 | 0 | fr_assert(da_stack->da[2]->type == FR_TYPE_VENDOR); |
766 | |
|
767 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint32_t) da_stack->da[2]->attr); |
768 | | |
769 | 0 | fr_assert(da_stack->da[3]); |
770 | |
|
771 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[3]->attr); |
772 | | |
773 | 0 | hlen += 5; |
774 | 0 | vendor_hdr = 5; |
775 | |
|
776 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
777 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), hlen, "header extended vendor specific"); |
778 | |
|
779 | 0 | my_depth = 3; |
780 | 0 | } else { |
781 | 0 | vendor_hdr = 0; |
782 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), hlen, "header extended"); |
783 | |
|
784 | 0 | my_depth = 1; |
785 | 0 | } |
786 | | |
787 | | /* |
788 | | * We're at the point where we need to encode something. |
789 | | */ |
790 | 0 | da = da_stack->da[my_depth]; |
791 | 0 | fr_assert(vp->da == da); |
792 | |
|
793 | 0 | if (da->type != FR_TYPE_STRUCT) { |
794 | 0 | slen = encode_value(&work_dbuff, da_stack, my_depth, cursor, encode_ctx); |
795 | |
|
796 | 0 | } else { |
797 | 0 | slen = fr_struct_to_network(&work_dbuff, da_stack, my_depth, cursor, encode_ctx, encode_value, encode_child); |
798 | 0 | } |
799 | 0 | if (slen <= 0) return slen; |
800 | | |
801 | | /* |
802 | | * There may be more than 255 octets of data encoded in |
803 | | * the attribute. If so, move the data up in the packet, |
804 | | * and copy the existing header over. Set the "M" flag ONLY |
805 | | * after copying the rest of the data. |
806 | | * |
807 | | * Note that we add "vendor_hdr" to the length of the |
808 | | * encoded data. That 5 octet field is logically part of |
809 | | * the data, and not part of the header. |
810 | | */ |
811 | 0 | if (slen > (UINT8_MAX - hlen)) { |
812 | 0 | slen = attr_fragment(&work_dbuff, (size_t)vendor_hdr + slen, &hdr, 4, 3, 0); |
813 | 0 | if (slen <= 0) return slen; |
814 | | |
815 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
816 | 0 | } |
817 | | |
818 | 0 | fr_dbuff_in_bytes(&length_field, (uint8_t) fr_dbuff_used(&work_dbuff)); |
819 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), hlen, "header extended"); |
820 | |
|
821 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
822 | 0 | } |
823 | | |
824 | | /* |
825 | | * The encode_extended() function expects to see the TLV or |
826 | | * STRUCT inside of the extended attribute, in which case it |
827 | | * creates the attribute header and calls encode_value() for the |
828 | | * leaf type, or child TLV / struct. |
829 | | * |
830 | | * If we see VSA or VENDOR, then we recurse past that to a child |
831 | | * which is either a leaf, or a TLV, or a STRUCT. |
832 | | */ |
833 | | static ssize_t encode_extended_nested(fr_dbuff_t *dbuff, |
834 | | fr_da_stack_t *da_stack, unsigned int depth, |
835 | | fr_dcursor_t *cursor, void *encode_ctx) |
836 | 0 | { |
837 | 0 | ssize_t slen; |
838 | 0 | fr_pair_t *parent, *vp; |
839 | 0 | fr_dcursor_t child_cursor; |
840 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
841 | |
|
842 | 0 | parent = fr_dcursor_current(cursor); |
843 | 0 | fr_assert(fr_type_is_structural(parent->vp_type)); |
844 | |
|
845 | 0 | (void) fr_pair_dcursor_child_iter_init(&child_cursor, &parent->vp_group, cursor); |
846 | |
|
847 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
848 | |
|
849 | 0 | while ((vp = fr_dcursor_current(&child_cursor)) != NULL) { |
850 | 0 | if ((vp->vp_type == FR_TYPE_VSA) || (vp->vp_type == FR_TYPE_VENDOR)) { |
851 | 0 | slen = encode_extended_nested(&work_dbuff, da_stack, depth + 1, &child_cursor, encode_ctx); |
852 | |
|
853 | 0 | } else { |
854 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
855 | 0 | slen = encode_extended(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
856 | 0 | if (slen < 0) return slen; |
857 | 0 | } |
858 | | |
859 | 0 | if (slen < 0) return slen; |
860 | 0 | } |
861 | | |
862 | 0 | vp = fr_dcursor_next(cursor); |
863 | |
|
864 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
865 | |
|
866 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
867 | 0 | } |
868 | | |
869 | | |
870 | | /** Encode an RFC format attribute, with the "concat" flag set |
871 | | * |
872 | | * If there isn't enough freespace in the packet, the data is |
873 | | * truncated to fit. |
874 | | * |
875 | | * The attribute is split on 253 byte boundaries, with a header |
876 | | * prepended to each chunk. |
877 | | */ |
878 | | static ssize_t encode_concat(fr_dbuff_t *dbuff, |
879 | | fr_da_stack_t *da_stack, unsigned int depth, |
880 | | fr_dcursor_t *cursor, UNUSED void *encode_ctx) |
881 | 0 | { |
882 | 0 | uint8_t const *p; |
883 | 0 | size_t data_len; |
884 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
885 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
886 | 0 | fr_dbuff_marker_t hdr; |
887 | |
|
888 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
889 | |
|
890 | 0 | p = vp->vp_octets; |
891 | 0 | data_len = vp->vp_length; |
892 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
893 | |
|
894 | 0 | while (data_len > 0) { |
895 | 0 | size_t frag_len = (data_len > RADIUS_MAX_STRING_LENGTH) ? RADIUS_MAX_STRING_LENGTH : data_len; |
896 | |
|
897 | 0 | fr_dbuff_set(&hdr, &work_dbuff); |
898 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t) da_stack->da[depth]->attr, 0x00); |
899 | | |
900 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, p, frag_len); |
901 | | |
902 | 0 | fr_dbuff_advance(&hdr, 1); |
903 | 0 | fr_dbuff_in(&hdr, (uint8_t) (2 + frag_len)); |
904 | |
|
905 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr) - 1, 2 + frag_len, "encode_concat fragment"); |
906 | |
|
907 | 0 | p += frag_len; |
908 | 0 | data_len -= frag_len; |
909 | 0 | } |
910 | | |
911 | 0 | vp = fr_dcursor_next(cursor); |
912 | | |
913 | | /* |
914 | | * @fixme: attributes with 'concat' MUST of type |
915 | | * 'octets', and therefore CANNOT have any TLV data in them. |
916 | | */ |
917 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
918 | |
|
919 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
920 | 0 | } |
921 | | |
922 | | /** Encode an RFC format attribute. |
923 | | * |
924 | | * This could be a standard attribute, or a TLV data type. |
925 | | * If it's a standard attribute, then vp->da->attr == attribute. |
926 | | * Otherwise, attribute may be something else. |
927 | | */ |
928 | | static ssize_t encode_child(fr_dbuff_t *dbuff, |
929 | | fr_da_stack_t *da_stack, unsigned int depth, |
930 | | fr_dcursor_t *cursor, void *encode_ctx) |
931 | 0 | { |
932 | 0 | ssize_t slen; |
933 | 0 | uint8_t hlen; |
934 | 0 | fr_dbuff_marker_t hdr; |
935 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, UINT8_MAX); |
936 | |
|
937 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
938 | |
|
939 | 0 | fr_assert(da_stack->da[depth] != NULL); |
940 | |
|
941 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
942 | |
|
943 | 0 | hlen = 2; |
944 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[depth]->attr, hlen); |
945 | | |
946 | 0 | slen = encode_value(&work_dbuff, da_stack, depth, cursor, encode_ctx); |
947 | 0 | if (slen <= 0) return slen; |
948 | | |
949 | 0 | fr_dbuff_advance(&hdr, 1); |
950 | 0 | fr_dbuff_in_bytes(&hdr, (uint8_t)(hlen + slen)); |
951 | |
|
952 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_start(&work_dbuff), 2, "header rfc"); |
953 | |
|
954 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
955 | 0 | } |
956 | | |
957 | | |
958 | | /** Encode one full Vendor-Specific + Vendor-ID + Vendor-Attr + Vendor-Length + ... |
959 | | */ |
960 | | static ssize_t encode_vendor_attr(fr_dbuff_t *dbuff, |
961 | | fr_da_stack_t *da_stack, unsigned int depth, |
962 | | fr_dcursor_t *cursor, void *encode_ctx) |
963 | 0 | { |
964 | 0 | ssize_t slen; |
965 | 0 | size_t hdr_len; |
966 | 0 | fr_dbuff_marker_t hdr, length_field, vsa_length_field; |
967 | 0 | fr_dict_attr_t const *da, *dv; |
968 | 0 | fr_dbuff_t work_dbuff; |
969 | |
|
970 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
971 | |
|
972 | 0 | dv = da_stack->da[depth++]; |
973 | |
|
974 | 0 | if (dv->type != FR_TYPE_VENDOR) { |
975 | 0 | fr_strerror_const("Expected Vendor"); |
976 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
977 | 0 | } |
978 | | |
979 | | /* |
980 | | * Now we encode one vendor attribute. |
981 | | */ |
982 | 0 | da = da_stack->da[depth]; |
983 | 0 | fr_assert(da != NULL); |
984 | | |
985 | | /* |
986 | | * Most VSAs get limited to the one attribute. Only refs |
987 | | * (e.g. DHCPv4, DHCpv6) can get fragmented. |
988 | | */ |
989 | 0 | if (da->type != FR_TYPE_GROUP) { |
990 | 0 | work_dbuff = FR_DBUFF_MAX(dbuff, UINT8_MAX); |
991 | 0 | } else { |
992 | 0 | work_dbuff = FR_DBUFF(dbuff); |
993 | 0 | } |
994 | |
|
995 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
996 | | |
997 | | /* |
998 | | * Build the Vendor-Specific header |
999 | | */ |
1000 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, FR_VENDOR_SPECIFIC); |
1001 | | |
1002 | 0 | fr_dbuff_marker(&length_field, &work_dbuff); |
1003 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0); |
1004 | | |
1005 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint32_t)dv->attr); /* Copy in the 32bit vendor ID */ |
1006 | | |
1007 | | |
1008 | 0 | hdr_len = dv->flags.type_size + dv->flags.length; |
1009 | | |
1010 | | /* |
1011 | | * Vendors use different widths for their |
1012 | | * attribute number fields. |
1013 | | */ |
1014 | 0 | switch (dv->flags.type_size) { |
1015 | 0 | default: |
1016 | 0 | fr_strerror_printf("%s: Internal sanity check failed, type %u", __FUNCTION__, (unsigned) dv->flags.type_size); |
1017 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1018 | | |
1019 | 0 | case 4: |
1020 | 0 | fr_dbuff_in(&work_dbuff, (uint32_t)da->attr); |
1021 | 0 | break; |
1022 | | |
1023 | 0 | case 2: |
1024 | 0 | fr_dbuff_in(&work_dbuff, (uint16_t)da->attr); |
1025 | 0 | break; |
1026 | | |
1027 | 0 | case 1: |
1028 | 0 | fr_dbuff_in(&work_dbuff, (uint8_t)da->attr); |
1029 | 0 | break; |
1030 | 0 | } |
1031 | | |
1032 | | /* |
1033 | | * The length fields will get over-written later. |
1034 | | */ |
1035 | 0 | switch (dv->flags.length) { |
1036 | 0 | default: |
1037 | 0 | fr_strerror_printf("%s: Internal sanity check failed, length %u", __FUNCTION__, (unsigned) dv->flags.length); |
1038 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1039 | | |
1040 | 0 | case 0: |
1041 | 0 | break; |
1042 | | |
1043 | 0 | case 2: |
1044 | 0 | fr_dbuff_in_bytes(&work_dbuff, 0); |
1045 | 0 | FALL_THROUGH; |
1046 | |
|
1047 | 0 | case 1: |
1048 | | /* |
1049 | | * Length fields are set to zero, because they |
1050 | | * will get over-ridden later. |
1051 | | */ |
1052 | 0 | fr_dbuff_marker(&vsa_length_field, &work_dbuff); |
1053 | 0 | fr_dbuff_in_bytes(&work_dbuff, 0); |
1054 | 0 | break; |
1055 | 0 | } |
1056 | | |
1057 | 0 | slen = encode_value(&work_dbuff, da_stack, depth, cursor, encode_ctx); |
1058 | 0 | if (slen <= 0) return slen; |
1059 | | |
1060 | | /* |
1061 | | * There may be more than 253 octets of data encoded in |
1062 | | * the attribute. If so, move the data up in the packet, |
1063 | | * and copy the existing header over. Set the "C" flag |
1064 | | * ONLY after copying the rest of the data. |
1065 | | * |
1066 | | * Note that we do NOT check 'slen' here, as it's only |
1067 | | * the size of the sub-sub attribute, and doesn't include |
1068 | | * the RADIUS attribute header, or Vendor-ID. |
1069 | | */ |
1070 | 0 | if (fr_dbuff_used(&work_dbuff) > UINT8_MAX) { |
1071 | 0 | size_t length_offset = 0; |
1072 | |
|
1073 | 0 | if (dv->flags.length) length_offset = 6 + hdr_len - 1; |
1074 | |
|
1075 | 0 | slen = attr_fragment(&work_dbuff, (size_t)slen, &hdr, 6 + hdr_len, 0, length_offset); |
1076 | 0 | if (slen <= 0) return slen; |
1077 | 0 | } else { |
1078 | 0 | if (dv->flags.length) { |
1079 | 0 | fr_dbuff_in(&vsa_length_field, (uint8_t)(hdr_len + slen)); |
1080 | 0 | } |
1081 | |
|
1082 | 0 | fr_dbuff_in(&length_field, (uint8_t) fr_dbuff_used(&work_dbuff)); |
1083 | 0 | } |
1084 | | |
1085 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), 6 + hdr_len, "header vsa"); |
1086 | |
|
1087 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1088 | 0 | } |
1089 | | |
1090 | | /** Encode a WiMAX attribute |
1091 | | * |
1092 | | */ |
1093 | | static ssize_t encode_wimax(fr_dbuff_t *dbuff, |
1094 | | fr_da_stack_t *da_stack, unsigned int depth, |
1095 | | fr_dcursor_t *cursor, void *encode_ctx) |
1096 | 0 | { |
1097 | 0 | ssize_t slen; |
1098 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1099 | 0 | fr_dbuff_marker_t hdr, length_field, vsa_length_field; |
1100 | 0 | fr_dict_attr_t const *dv; |
1101 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
1102 | |
|
1103 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1104 | |
|
1105 | 0 | PAIR_VERIFY(vp); |
1106 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1107 | |
|
1108 | 0 | dv = da_stack->da[depth++]; |
1109 | |
|
1110 | 0 | if (dv->type != FR_TYPE_VENDOR) { |
1111 | 0 | fr_strerror_const("Expected Vendor"); |
1112 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1113 | 0 | } |
1114 | | |
1115 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1116 | | |
1117 | | /* |
1118 | | * Build the Vendor-Specific header |
1119 | | */ |
1120 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, FR_VENDOR_SPECIFIC); |
1121 | 0 | fr_dbuff_marker(&length_field, &work_dbuff); |
1122 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x09); |
1123 | | |
1124 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint32_t) dv->attr); |
1125 | | |
1126 | | /* |
1127 | | * Encode the first attribute |
1128 | | */ |
1129 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[depth]->attr); |
1130 | | |
1131 | 0 | fr_dbuff_marker(&vsa_length_field, &work_dbuff); |
1132 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x03, 0x00); /* length + continuation, both may be overwritten later */ |
1133 | | |
1134 | | /* |
1135 | | * We don't bound the size of work_dbuff; it can use more than UINT8_MAX bytes |
1136 | | * because of the "continuation" byte. |
1137 | | */ |
1138 | 0 | slen = encode_value(&work_dbuff, da_stack, depth, cursor, encode_ctx); |
1139 | 0 | if (slen <= 0) return slen; |
1140 | | |
1141 | | /* |
1142 | | * There may be more than 253 octets of data encoded in |
1143 | | * the attribute. If so, move the data up in the packet, |
1144 | | * and copy the existing header over. Set the "C" flag |
1145 | | * ONLY after copying the rest of the data. |
1146 | | * |
1147 | | * Note that we do NOT check 'slen' here, as it's only |
1148 | | * the size of the sub-sub attribute, and doesn't include |
1149 | | * the RADIUS attribute header, or Vendor-ID. |
1150 | | */ |
1151 | 0 | if (fr_dbuff_used(&work_dbuff) > UINT8_MAX) { |
1152 | 0 | slen = attr_fragment(&work_dbuff, (size_t)slen, &hdr, 9, 8, 7); |
1153 | 0 | if (slen <= 0) return slen; |
1154 | | |
1155 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1156 | 0 | } |
1157 | | |
1158 | 0 | fr_dbuff_in_bytes(&vsa_length_field, (uint8_t) (fr_dbuff_used(&work_dbuff) - 6)); |
1159 | 0 | fr_dbuff_in_bytes(&length_field, (uint8_t) fr_dbuff_used(&work_dbuff)); |
1160 | |
|
1161 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), 9, "header wimax"); |
1162 | |
|
1163 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1164 | 0 | } |
1165 | | |
1166 | | static ssize_t encode_vendor(fr_dbuff_t *dbuff, |
1167 | | fr_da_stack_t *da_stack, unsigned int depth, |
1168 | | fr_dcursor_t *cursor, void *encode_ctx) |
1169 | 0 | { |
1170 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
1171 | 0 | ssize_t slen; |
1172 | 0 | fr_pair_t *vp; |
1173 | 0 | fr_dict_vendor_t const *dv; |
1174 | 0 | fr_dcursor_t child_cursor; |
1175 | 0 | fr_dbuff_t work_dbuff; |
1176 | |
|
1177 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1178 | |
|
1179 | 0 | if (da->type != FR_TYPE_VENDOR) { |
1180 | 0 | fr_strerror_printf("%s: Expected type \"vendor\" got \"%s\"", __FUNCTION__, |
1181 | 0 | fr_type_to_str(da->type)); |
1182 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1183 | 0 | } |
1184 | | |
1185 | 0 | dv = fr_dict_vendor_by_da(da_stack->da[depth]); |
1186 | | |
1187 | | /* |
1188 | | * Flat hierarchy, encode one attribute at a time. |
1189 | | * |
1190 | | * Note that there's no attempt to encode multiple VSAs |
1191 | | * into one attribute. We can add that back as a flag, |
1192 | | * once all of the nested attribute conversion has been |
1193 | | * done. |
1194 | | */ |
1195 | 0 | if (da_stack->da[depth + 1]) { |
1196 | 0 | if (dv && dv->continuation) { |
1197 | 0 | return encode_wimax(dbuff, da_stack, depth, cursor, encode_ctx); |
1198 | 0 | } |
1199 | | |
1200 | 0 | return encode_vendor_attr(dbuff, da_stack, depth, cursor, encode_ctx); |
1201 | 0 | } |
1202 | | |
1203 | | /* |
1204 | | * Loop over the children of this attribute of type Vendor. |
1205 | | */ |
1206 | 0 | vp = fr_dcursor_current(cursor); |
1207 | 0 | fr_assert(vp->da == da); |
1208 | 0 | work_dbuff = FR_DBUFF(dbuff); |
1209 | |
|
1210 | 0 | fr_pair_dcursor_child_iter_init(&child_cursor, &vp->vp_group, cursor); |
1211 | 0 | while ((vp = fr_dcursor_current(&child_cursor)) != NULL) { |
1212 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
1213 | |
|
1214 | 0 | if (dv && dv->continuation) { |
1215 | 0 | slen = encode_wimax(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
1216 | 0 | } else { |
1217 | 0 | slen = encode_vendor_attr(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
1218 | 0 | } |
1219 | 0 | if (slen < 0) return slen; |
1220 | 0 | } |
1221 | | |
1222 | 0 | vp = fr_dcursor_next(cursor); |
1223 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1224 | |
|
1225 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1226 | 0 | } |
1227 | | |
1228 | | /** Encode a Vendor-Specific attribute |
1229 | | * |
1230 | | */ |
1231 | | static ssize_t encode_vsa(fr_dbuff_t *dbuff, |
1232 | | fr_da_stack_t *da_stack, unsigned int depth, |
1233 | | fr_dcursor_t *cursor, void *encode_ctx) |
1234 | 0 | { |
1235 | 0 | ssize_t slen; |
1236 | 0 | fr_pair_t *vp; |
1237 | 0 | fr_dcursor_t child_cursor; |
1238 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
1239 | 0 | fr_dbuff_t work_dbuff; |
1240 | |
|
1241 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1242 | |
|
1243 | 0 | if (da->type != FR_TYPE_VSA) { |
1244 | 0 | fr_strerror_printf("%s: Expected type \"vsa\" got \"%s\"", __FUNCTION__, |
1245 | 0 | fr_type_to_str(da->type)); |
1246 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1247 | 0 | } |
1248 | | |
1249 | | /* |
1250 | | * Loop over the contents of Vendor-Specific, each of |
1251 | | * which MUST be of type FR_TYPE_VENDOR. |
1252 | | */ |
1253 | 0 | if (da_stack->da[depth + 1]) { |
1254 | 0 | return encode_vendor(dbuff, da_stack, depth + 1, cursor, encode_ctx); |
1255 | 0 | } |
1256 | | |
1257 | 0 | work_dbuff = FR_DBUFF(dbuff); |
1258 | |
|
1259 | 0 | vp = fr_dcursor_current(cursor); |
1260 | 0 | if (vp->da != da_stack->da[depth]) { |
1261 | 0 | fr_strerror_printf("%s: Can't encode empty Vendor-Specific", __FUNCTION__); |
1262 | 0 | return 0; |
1263 | 0 | } |
1264 | | |
1265 | | /* |
1266 | | * Loop over the children of this Vendor-Specific |
1267 | | * attribute. |
1268 | | */ |
1269 | 0 | fr_pair_dcursor_child_iter_init(&child_cursor, &vp->vp_group, cursor); |
1270 | 0 | while ((vp = fr_dcursor_current(&child_cursor)) != NULL) { |
1271 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
1272 | |
|
1273 | 0 | fr_assert(da_stack->da[depth + 1]->type == FR_TYPE_VENDOR); |
1274 | |
|
1275 | 0 | slen = encode_vendor(&work_dbuff, da_stack, depth + 1, &child_cursor, encode_ctx); |
1276 | 0 | if (slen < 0) return slen; |
1277 | 0 | } |
1278 | | |
1279 | | /* |
1280 | | * Fix up the da stack, and return the data we've encoded. |
1281 | | */ |
1282 | 0 | vp = fr_dcursor_next(cursor); |
1283 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1284 | |
|
1285 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_start(&work_dbuff), 6, "header vsa"); |
1286 | |
|
1287 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1288 | 0 | } |
1289 | | |
1290 | | /** Encode NAS-Filter-Rule |
1291 | | * |
1292 | | * Concatenating the string attributes together, separated by a 0x00 byte, |
1293 | | */ |
1294 | | static ssize_t encode_nas_filter_rule(fr_dbuff_t *dbuff, |
1295 | | fr_da_stack_t *da_stack, NDEBUG_UNUSED unsigned int depth, |
1296 | | fr_dcursor_t *cursor, UNUSED void *encode_ctx) |
1297 | 0 | { |
1298 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1299 | 0 | fr_dbuff_marker_t hdr, frag_hdr; |
1300 | 0 | fr_pair_t *vp = fr_dcursor_current(cursor); |
1301 | 0 | size_t attr_len = 2; |
1302 | |
|
1303 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1304 | |
|
1305 | 0 | fr_assert(vp); |
1306 | 0 | fr_assert(vp->da); |
1307 | |
|
1308 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1309 | 0 | fr_dbuff_marker(&frag_hdr, &work_dbuff); |
1310 | 0 | fr_dbuff_advance(&hdr, 1); |
1311 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 0x00); |
1312 | | |
1313 | 0 | fr_assert(vp->da == attr_nas_filter_rule); |
1314 | |
|
1315 | 0 | while (true) { |
1316 | 0 | size_t data_len = vp->vp_length; |
1317 | 0 | size_t frag_len; |
1318 | 0 | char const *p = vp->vp_strvalue; |
1319 | | |
1320 | | /* |
1321 | | * Keep encoding this attribute until it's done. |
1322 | | */ |
1323 | 0 | while (data_len > 0) { |
1324 | 0 | frag_len = data_len; |
1325 | | |
1326 | | /* |
1327 | | * This fragment doesn't overflow the |
1328 | | * attribute. Copy it over, update the |
1329 | | * length, but leave the marker at the |
1330 | | * current header. |
1331 | | */ |
1332 | 0 | if ((attr_len + frag_len) <= UINT8_MAX) { |
1333 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, p, frag_len); |
1334 | 0 | attr_len += frag_len; |
1335 | |
|
1336 | 0 | fr_dbuff_set(&frag_hdr, &hdr); |
1337 | 0 | fr_dbuff_in(&frag_hdr, (uint8_t) attr_len); /* there's no fr_dbuff_in_no_advance() */ |
1338 | 0 | break; |
1339 | 0 | } |
1340 | | |
1341 | | /* |
1342 | | * This fragment overflows the attribute. |
1343 | | * Copy the fragment in, and create a new |
1344 | | * attribute header. |
1345 | | */ |
1346 | 0 | frag_len = UINT8_MAX - attr_len; |
1347 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, p, frag_len); |
1348 | 0 | fr_dbuff_in(&hdr, (uint8_t) UINT8_MAX); |
1349 | |
|
1350 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1351 | 0 | fr_dbuff_advance(&hdr, 1); |
1352 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 0x02); |
1353 | 0 | attr_len = 2; |
1354 | |
|
1355 | 0 | p += frag_len; |
1356 | 0 | data_len -= frag_len; |
1357 | 0 | } |
1358 | | |
1359 | | /* |
1360 | | * If we have nothing more to do here, then stop. |
1361 | | */ |
1362 | 0 | vp = fr_dcursor_next(cursor); |
1363 | 0 | if (!vp || (vp->da != attr_nas_filter_rule)) { |
1364 | 0 | break; |
1365 | 0 | } |
1366 | | |
1367 | | /* |
1368 | | * We have to add a zero byte. If it doesn't |
1369 | | * overflow the current attribute, then just add |
1370 | | * it in. |
1371 | | */ |
1372 | 0 | if (attr_len < UINT8_MAX) { |
1373 | 0 | attr_len++; |
1374 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x00); |
1375 | | |
1376 | 0 | fr_dbuff_set(&frag_hdr, &hdr); |
1377 | 0 | fr_dbuff_in(&frag_hdr, (uint8_t) attr_len); /* there's no fr_dbuff_in_no_advance() */ |
1378 | 0 | continue; |
1379 | 0 | } |
1380 | | |
1381 | | /* |
1382 | | * The zero byte causes the current attribute to |
1383 | | * overflow. Create a new header with the zero |
1384 | | * byte already populated, and keep going. |
1385 | | */ |
1386 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1387 | 0 | fr_dbuff_advance(&hdr, 1); |
1388 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 0x00, 0x00); |
1389 | 0 | attr_len = 3; |
1390 | 0 | } |
1391 | | |
1392 | 0 | vp = fr_dcursor_current(cursor); |
1393 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1394 | |
|
1395 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1396 | 0 | } |
1397 | | |
1398 | | /** Encode an RFC standard attribute 1..255 |
1399 | | * |
1400 | | * This function is not the same as encode_child(), because this |
1401 | | * one treats some "top level" attributes as special. e.g. |
1402 | | * Message-Authenticator. |
1403 | | */ |
1404 | | static ssize_t encode_rfc(fr_dbuff_t *dbuff, fr_da_stack_t *da_stack, unsigned int depth, |
1405 | | fr_dcursor_t *cursor, void *encode_ctx) |
1406 | 0 | { |
1407 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
1408 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1409 | 0 | fr_dbuff_marker_t start; |
1410 | 0 | fr_radius_encode_ctx_t *packet_ctx = encode_ctx; |
1411 | |
|
1412 | 0 | fr_dbuff_marker(&start, &work_dbuff); |
1413 | | |
1414 | | /* |
1415 | | * Sanity checks |
1416 | | */ |
1417 | 0 | PAIR_VERIFY(vp); |
1418 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1419 | |
|
1420 | 0 | switch (da_stack->da[depth]->type) { |
1421 | 0 | case FR_TYPE_TLV: |
1422 | 0 | case FR_TYPE_VSA: |
1423 | 0 | case FR_TYPE_VENDOR: |
1424 | | /* FR_TYPE_STRUCT is actually allowed... */ |
1425 | 0 | fr_strerror_printf("%s: Expected leaf type got \"%s\"", __FUNCTION__, |
1426 | 0 | fr_type_to_str(da_stack->da[depth]->type)); |
1427 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1428 | | |
1429 | 0 | default: |
1430 | | /* |
1431 | | * Attribute 0 is fine as a TLV leaf, or VSA, but not |
1432 | | * in the original standards space. |
1433 | | */ |
1434 | 0 | if (((fr_dict_vendor_num_by_da(da_stack->da[depth]) == 0) && (da_stack->da[depth]->attr == 0)) || |
1435 | 0 | (da_stack->da[depth]->attr > UINT8_MAX)) { |
1436 | 0 | fr_strerror_printf("%s: Called with non-standard attribute %u", __FUNCTION__, vp->da->attr); |
1437 | 0 | return 0; |
1438 | 0 | } |
1439 | 0 | break; |
1440 | 0 | } |
1441 | | |
1442 | | /* |
1443 | | * Only CUI is allowed to have zero length. |
1444 | | * Thank you, WiMAX! |
1445 | | */ |
1446 | 0 | if ((vp->da == attr_chargeable_user_identity) && (vp->vp_length == 0)) { |
1447 | 0 | fr_dbuff_in_bytes(&work_dbuff, (uint8_t)vp->da->attr, 0x02); |
1448 | |
|
1449 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&start), 2, "header rfc"); |
1450 | |
|
1451 | 0 | vp = fr_dcursor_next(cursor); |
1452 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1453 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1454 | 0 | } |
1455 | | |
1456 | | /* |
1457 | | * Message-Authenticator is hard-coded. |
1458 | | */ |
1459 | 0 | if (vp->da == attr_message_authenticator) { |
1460 | 0 | if (!packet_ctx->seen_message_authenticator) { |
1461 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 18); |
1462 | 0 | FR_DBUFF_MEMSET_RETURN(&work_dbuff, 0, RADIUS_MESSAGE_AUTHENTICATOR_LENGTH); |
1463 | | |
1464 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&start) + 2, RADIUS_MESSAGE_AUTHENTICATOR_LENGTH, |
1465 | 0 | "message-authenticator"); |
1466 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&start), 2, "header rfc"); |
1467 | |
|
1468 | 0 | packet_ctx->seen_message_authenticator = true; |
1469 | 0 | } |
1470 | | |
1471 | 0 | vp = fr_dcursor_next(cursor); |
1472 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1473 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1474 | 0 | } |
1475 | | |
1476 | | /* |
1477 | | * NAS-Filter-Rule has a stupid format in order to save |
1478 | | * one byte per attribute. |
1479 | | */ |
1480 | 0 | if (vp->da == attr_nas_filter_rule) { |
1481 | 0 | return encode_nas_filter_rule(dbuff, da_stack, depth, cursor, encode_ctx); |
1482 | 0 | } |
1483 | | |
1484 | | /* |
1485 | | * Once we've checked for various top-level magic, RFC attributes are just TLVs. |
1486 | | */ |
1487 | 0 | return encode_child(dbuff, da_stack, depth, cursor, encode_ctx); |
1488 | 0 | } |
1489 | | |
1490 | | /** Encode a data structure into a RADIUS attribute |
1491 | | * |
1492 | | * This is the main entry point into the encoder. It sets up the encoder array |
1493 | | * we use for tracking our TLV/VSA nesting and then calls the appropriate |
1494 | | * dispatch function. |
1495 | | * |
1496 | | * @param[out] dbuff Where to write encoded data. |
1497 | | * @param[in] cursor Specifying attribute to encode. |
1498 | | * @param[in] encode_ctx Additional data such as the shared secret to use. |
1499 | | * @return |
1500 | | * - >0 The number of bytes written to out. |
1501 | | * - 0 Nothing to encode (or attribute skipped). |
1502 | | * - <0 an error occurred. |
1503 | | */ |
1504 | | ssize_t fr_radius_encode_pair(fr_dbuff_t *dbuff, fr_dcursor_t *cursor, void *encode_ctx) |
1505 | 0 | { |
1506 | 0 | fr_pair_t const *vp; |
1507 | 0 | ssize_t slen; |
1508 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1509 | |
|
1510 | 0 | fr_da_stack_t da_stack; |
1511 | 0 | fr_dict_attr_t const *da = NULL; |
1512 | |
|
1513 | 0 | if (!cursor) return PAIR_ENCODE_FATAL_ERROR; |
1514 | | |
1515 | 0 | vp = fr_dcursor_current(cursor); |
1516 | 0 | if (!vp) return 0; |
1517 | | |
1518 | 0 | PAIR_VERIFY(vp); |
1519 | |
|
1520 | 0 | if (vp->da->depth > FR_DICT_MAX_TLV_STACK) { |
1521 | 0 | fr_strerror_printf("%s: Attribute depth %i exceeds maximum nesting depth %i", |
1522 | 0 | __FUNCTION__, vp->da->depth, FR_DICT_MAX_TLV_STACK); |
1523 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1524 | 0 | } |
1525 | | |
1526 | | /* |
1527 | | * Tags are *top-level*, and are never nested. |
1528 | | */ |
1529 | 0 | if ((vp->vp_type == FR_TYPE_GROUP) && vp->da->flags.internal && |
1530 | 0 | (vp->da->attr > FR_TAG_BASE) && (vp->da->attr < (FR_TAG_BASE + 0x20))) { |
1531 | 0 | fr_radius_encode_ctx_t *packet_ctx = encode_ctx; |
1532 | |
|
1533 | 0 | packet_ctx->tag = vp->da->attr - FR_TAG_BASE; |
1534 | 0 | fr_assert(packet_ctx->tag > 0); |
1535 | 0 | fr_assert(packet_ctx->tag < 0x20); |
1536 | | |
1537 | | // recurse to encode the children of this attribute |
1538 | 0 | slen = encode_pairs(&work_dbuff, &vp->vp_group, encode_ctx); |
1539 | 0 | packet_ctx->tag = 0; |
1540 | 0 | if (slen < 0) return slen; |
1541 | | |
1542 | 0 | fr_dcursor_next(cursor); /* skip the tag attribute */ |
1543 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1544 | 0 | } |
1545 | | |
1546 | | /* |
1547 | | * Check for zero-length attributes. |
1548 | | */ |
1549 | 0 | switch (vp->vp_type) { |
1550 | 0 | default: |
1551 | 0 | break; |
1552 | | |
1553 | | /* |
1554 | | * Only variable length data types can be |
1555 | | * variable sized. All others have fixed size. |
1556 | | */ |
1557 | 0 | case FR_TYPE_STRING: |
1558 | 0 | case FR_TYPE_OCTETS: |
1559 | | /* |
1560 | | * Zero-length strings are allowed for CUI |
1561 | | * (thanks WiMAX!), and for |
1562 | | * Message-Authenticator, because we will |
1563 | | * automagically generate that one ourselves. |
1564 | | */ |
1565 | 0 | if ((vp->vp_length == 0) && |
1566 | 0 | (vp->da != attr_chargeable_user_identity) && |
1567 | 0 | (vp->da != attr_message_authenticator)) { |
1568 | 0 | fr_dcursor_next(cursor); |
1569 | 0 | fr_strerror_const("Zero length string attributes not allowed"); |
1570 | 0 | return 0; |
1571 | 0 | } |
1572 | 0 | break; |
1573 | 0 | } |
1574 | | |
1575 | | /* |
1576 | | * Nested structures of attributes can't be longer than |
1577 | | * 255 bytes, so each call to an encode function can |
1578 | | * only use 255 bytes of buffer space at a time. |
1579 | | */ |
1580 | | |
1581 | | /* |
1582 | | * Fast path for the common case. |
1583 | | */ |
1584 | 0 | if (vp->da->parent->flags.is_root && !vp->da->flags.subtype) { |
1585 | 0 | switch (vp->vp_type) { |
1586 | 0 | case FR_TYPE_LEAF: |
1587 | 0 | da_stack.da[0] = vp->da; |
1588 | 0 | da_stack.da[1] = NULL; |
1589 | 0 | da_stack.depth = 1; |
1590 | 0 | FR_PROTO_STACK_PRINT(&da_stack, 0); |
1591 | 0 | slen = encode_rfc(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1592 | 0 | if (slen < 0) return slen; |
1593 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1594 | | |
1595 | 0 | default: |
1596 | 0 | break; |
1597 | 0 | } |
1598 | 0 | } |
1599 | | |
1600 | | /* |
1601 | | * Do more work to set up the stack for the complex case. |
1602 | | */ |
1603 | 0 | fr_proto_da_stack_build(&da_stack, vp->da); |
1604 | 0 | FR_PROTO_STACK_PRINT(&da_stack, 0); |
1605 | | |
1606 | | /* |
1607 | | * Top-level attributes get treated specially. Things |
1608 | | * like VSAs inside of extended attributes are handled |
1609 | | * inside of type-specific encoders. |
1610 | | */ |
1611 | 0 | da = da_stack.da[0]; |
1612 | 0 | switch (da->type) { |
1613 | 0 | case FR_TYPE_OCTETS: |
1614 | 0 | if (flag_concat(&da->flags)) { |
1615 | | /* |
1616 | | * Attributes like EAP-Message are marked as |
1617 | | * "concat", which means that they are fragmented |
1618 | | * using a different scheme than the "long |
1619 | | * extended" one. |
1620 | | */ |
1621 | 0 | slen = encode_concat(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1622 | 0 | if (slen < 0) return slen; |
1623 | 0 | break; |
1624 | 0 | } |
1625 | 0 | FALL_THROUGH; |
1626 | | |
1627 | 0 | default: |
1628 | 0 | slen = encode_rfc(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1629 | 0 | if (slen < 0) return slen; |
1630 | 0 | break; |
1631 | | |
1632 | 0 | case FR_TYPE_VSA: |
1633 | 0 | slen = encode_vsa(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1634 | 0 | if (slen < 0) return slen; |
1635 | 0 | break; |
1636 | | |
1637 | 0 | case FR_TYPE_TLV: |
1638 | 0 | if (!flag_extended(&da->flags)) { |
1639 | 0 | slen = encode_child(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1640 | |
|
1641 | 0 | } else if (vp->da != da) { |
1642 | 0 | fr_strerror_printf("extended attributes must be nested"); |
1643 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1644 | |
|
1645 | 0 | } else { |
1646 | 0 | slen = encode_extended_nested(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1647 | 0 | } |
1648 | 0 | if (slen < 0) return slen; |
1649 | 0 | break; |
1650 | | |
1651 | 0 | case FR_TYPE_NULL: |
1652 | 0 | case FR_TYPE_VENDOR: |
1653 | 0 | case FR_TYPE_MAX: |
1654 | 0 | fr_strerror_printf("%s: Cannot encode attribute %s", __FUNCTION__, vp->da->name); |
1655 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1656 | 0 | } |
1657 | | |
1658 | | /* |
1659 | | * We couldn't do it, so we didn't do anything. |
1660 | | */ |
1661 | 0 | if (fr_dcursor_current(cursor) == vp) { |
1662 | 0 | fr_strerror_printf("%s: Nested attribute structure too large to encode", __FUNCTION__); |
1663 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1664 | 0 | } |
1665 | | |
1666 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1667 | 0 | } |
1668 | | |
1669 | | ssize_t fr_radius_encode_foreign(fr_dbuff_t *dbuff, fr_pair_list_t const *list) |
1670 | 0 | { |
1671 | 0 | fr_radius_ctx_t common_ctx = {}; |
1672 | 0 | fr_radius_encode_ctx_t encode_ctx = { |
1673 | 0 | .common = &common_ctx, |
1674 | 0 | }; |
1675 | | |
1676 | | /* |
1677 | | * Just in case we need random numbers. |
1678 | | */ |
1679 | 0 | encode_ctx.rand_ctx.a = fr_rand(); |
1680 | 0 | encode_ctx.rand_ctx.b = fr_rand(); |
1681 | | |
1682 | | /* |
1683 | | * Encode the pairs. |
1684 | | */ |
1685 | 0 | return encode_pairs(dbuff, list, &encode_ctx); |
1686 | 0 | } |
1687 | | |
1688 | | |
1689 | | static int encode_test_ctx(void **out, TALLOC_CTX *ctx) |
1690 | 0 | { |
1691 | 0 | static uint8_t vector[RADIUS_AUTH_VECTOR_LENGTH] = { |
1692 | 0 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, |
1693 | 0 | 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; |
1694 | |
|
1695 | 0 | fr_radius_encode_ctx_t *test_ctx; |
1696 | 0 | fr_radius_ctx_t *common; |
1697 | |
|
1698 | 0 | test_ctx = talloc_zero(ctx, fr_radius_encode_ctx_t); |
1699 | 0 | if (!test_ctx) return -1; |
1700 | | |
1701 | 0 | test_ctx->common = common = talloc_zero(test_ctx, fr_radius_ctx_t); |
1702 | |
|
1703 | 0 | common->secret = talloc_strdup(test_ctx->common, "testing123"); |
1704 | 0 | common->secret_length = talloc_array_length(test_ctx->common->secret) - 1; |
1705 | | |
1706 | | /* |
1707 | | * We don't want to automatically add Message-Authenticator |
1708 | | */ |
1709 | 0 | common->secure_transport = true; |
1710 | |
|
1711 | 0 | test_ctx->request_authenticator = vector; |
1712 | 0 | test_ctx->rand_ctx.a = 6809; |
1713 | 0 | test_ctx->rand_ctx.b = 2112; |
1714 | |
|
1715 | 0 | *out = test_ctx; |
1716 | |
|
1717 | 0 | return 0; |
1718 | 0 | } |
1719 | | |
1720 | | static ssize_t fr_radius_encode_proto(TALLOC_CTX *ctx, fr_pair_list_t *vps, uint8_t *data, size_t data_len, void *proto_ctx) |
1721 | 0 | { |
1722 | 0 | fr_radius_encode_ctx_t *packet_ctx = talloc_get_type_abort(proto_ctx, fr_radius_encode_ctx_t); |
1723 | 0 | int packet_type = FR_RADIUS_CODE_ACCESS_REQUEST; |
1724 | 0 | fr_pair_t *vp; |
1725 | 0 | ssize_t slen; |
1726 | |
|
1727 | 0 | vp = fr_pair_find_by_da(vps, NULL, attr_packet_type); |
1728 | 0 | if (vp) packet_type = vp->vp_uint32; |
1729 | | |
1730 | | /* |
1731 | | * Force specific values for testing. |
1732 | | */ |
1733 | 0 | if ((packet_type == FR_RADIUS_CODE_ACCESS_REQUEST) || (packet_type == FR_RADIUS_CODE_STATUS_SERVER)) { |
1734 | 0 | vp = fr_pair_find_by_da(vps, NULL, attr_packet_authentication_vector); |
1735 | 0 | if (!vp) { |
1736 | 0 | int i; |
1737 | 0 | uint8_t vector[RADIUS_AUTH_VECTOR_LENGTH]; |
1738 | |
|
1739 | 0 | for (i = 0; i < RADIUS_AUTH_VECTOR_LENGTH; i++) { |
1740 | 0 | data[4 + i] = fr_fast_rand(&packet_ctx->rand_ctx); |
1741 | 0 | } |
1742 | |
|
1743 | 0 | fr_pair_list_append_by_da_len(ctx, vp, vps, attr_packet_authentication_vector, vector, sizeof(vector), false); |
1744 | 0 | } |
1745 | 0 | } |
1746 | | |
1747 | 0 | packet_ctx->code = packet_type; |
1748 | | |
1749 | | /* |
1750 | | * @todo - pass in packet_ctx to this function, so that we |
1751 | | * can leverage a consistent random number generator. |
1752 | | */ |
1753 | 0 | slen = fr_radius_encode(&FR_DBUFF_TMP(data, data_len), vps, packet_ctx); |
1754 | 0 | if (slen <= 0) return slen; |
1755 | | |
1756 | 0 | if (fr_radius_sign(data, NULL, (uint8_t const *) packet_ctx->common->secret, talloc_array_length(packet_ctx->common->secret) - 1) < 0) { |
1757 | 0 | return -1; |
1758 | 0 | } |
1759 | | |
1760 | 0 | return slen; |
1761 | 0 | } |
1762 | | |
1763 | | /* |
1764 | | * No one else should be using this. |
1765 | | */ |
1766 | | extern void *fr_radius_next_encodable(fr_dlist_head_t *list, void *to_eval, void *uctx); |
1767 | | |
1768 | | /* |
1769 | | * Test points |
1770 | | */ |
1771 | | extern fr_test_point_pair_encode_t radius_tp_encode_pair; |
1772 | | fr_test_point_pair_encode_t radius_tp_encode_pair = { |
1773 | | .test_ctx = encode_test_ctx, |
1774 | | .func = fr_radius_encode_pair, |
1775 | | .next_encodable = fr_radius_next_encodable, |
1776 | | }; |
1777 | | |
1778 | | |
1779 | | extern fr_test_point_proto_encode_t radius_tp_encode_proto; |
1780 | | fr_test_point_proto_encode_t radius_tp_encode_proto = { |
1781 | | .test_ctx = encode_test_ctx, |
1782 | | .func = fr_radius_encode_proto |
1783 | | }; |