/src/freeradius-server/src/protocols/radius/encode.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This library is free software; you can redistribute it and/or |
3 | | * modify it under the terms of the GNU Lesser General Public |
4 | | * License as published by the Free Software Foundation; either |
5 | | * version 2.1 of the License, or (at your option) any later version. |
6 | | * |
7 | | * This library is distributed in the hope that it will be useful, |
8 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
10 | | * Lesser General Public License for more details. |
11 | | * |
12 | | * You should have received a copy of the GNU Lesser General Public |
13 | | * License along with this library; if not, write to the Free Software |
14 | | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA |
15 | | */ |
16 | | |
17 | | /** |
18 | | * $Id: 0300ec8b2194816d1e6dc2e044b799598b772be9 $ |
19 | | * |
20 | | * @file protocols/radius/encode.c |
21 | | * @brief Functions to encode RADIUS attributes |
22 | | * |
23 | | * @copyright 2000-2003,2006-2015 The FreeRADIUS server project |
24 | | */ |
25 | | RCSID("$Id: 0300ec8b2194816d1e6dc2e044b799598b772be9 $") |
26 | | |
27 | | #include <freeradius-devel/util/dbuff.h> |
28 | | #include <freeradius-devel/util/md5.h> |
29 | | #include <freeradius-devel/util/struct.h> |
30 | | #include <freeradius-devel/io/test_point.h> |
31 | | #include <freeradius-devel/protocol/radius/freeradius.internal.h> |
32 | | #include "attrs.h" |
33 | | |
34 | 0 | #define TAG_VALID(x) ((x) > 0 && (x) < 0x20) |
35 | | |
36 | | static ssize_t encode_value(fr_dbuff_t *dbuff, |
37 | | fr_da_stack_t *da_stack, unsigned int depth, |
38 | | fr_dcursor_t *cursor, void *encode_ctx); |
39 | | |
40 | | static ssize_t encode_child(fr_dbuff_t *dbuff, |
41 | | fr_da_stack_t *da_stack, unsigned int depth, |
42 | | fr_dcursor_t *cursor, void *encode_ctx); |
43 | | |
44 | | /** "encrypt" a password RADIUS style |
45 | | * |
46 | | * Input and output buffers can be identical if in-place encryption is needed. |
47 | | */ |
48 | | static ssize_t encode_password(fr_dbuff_t *dbuff, fr_dbuff_marker_t *input, size_t inlen, |
49 | | char const *secret, uint8_t const *vector) |
50 | 0 | { |
51 | 0 | fr_md5_ctx_t *md5_ctx, *md5_ctx_old; |
52 | 0 | uint8_t digest[RADIUS_AUTH_VECTOR_LENGTH]; |
53 | 0 | uint8_t passwd[RADIUS_MAX_PASS_LENGTH] = {0}; |
54 | 0 | size_t i, n; |
55 | 0 | size_t len; |
56 | | |
57 | | /* |
58 | | * If the length is zero, round it up. |
59 | | */ |
60 | 0 | len = inlen; |
61 | |
|
62 | 0 | if (len > RADIUS_MAX_PASS_LENGTH) len = RADIUS_MAX_PASS_LENGTH; |
63 | |
|
64 | 0 | (void) fr_dbuff_out_memcpy(passwd, input, len); |
65 | 0 | if (len < sizeof(passwd)) memset(passwd + len, 0, sizeof(passwd) - len); |
66 | |
|
67 | 0 | if (len == 0) len = AUTH_PASS_LEN; |
68 | 0 | else if ((len & 0x0f) != 0) { |
69 | 0 | len += 0x0f; |
70 | 0 | len &= ~0x0f; |
71 | 0 | } |
72 | |
|
73 | 0 | md5_ctx = fr_md5_ctx_alloc_from_list(); |
74 | 0 | md5_ctx_old = fr_md5_ctx_alloc_from_list(); |
75 | |
|
76 | 0 | fr_md5_update(md5_ctx, (uint8_t const *) secret, talloc_array_length(secret) - 1); |
77 | 0 | fr_md5_ctx_copy(md5_ctx_old, md5_ctx); |
78 | | |
79 | | /* |
80 | | * Do first pass. |
81 | | */ |
82 | 0 | fr_md5_update(md5_ctx, vector, AUTH_PASS_LEN); |
83 | |
|
84 | 0 | for (n = 0; n < len; n += AUTH_PASS_LEN) { |
85 | 0 | if (n > 0) { |
86 | 0 | fr_md5_ctx_copy(md5_ctx, md5_ctx_old); |
87 | 0 | fr_md5_update(md5_ctx, passwd + n - AUTH_PASS_LEN, AUTH_PASS_LEN); |
88 | 0 | } |
89 | |
|
90 | 0 | fr_md5_final(digest, md5_ctx); |
91 | 0 | for (i = 0; i < AUTH_PASS_LEN; i++) passwd[i + n] ^= digest[i]; |
92 | 0 | } |
93 | |
|
94 | 0 | fr_md5_ctx_free_from_list(&md5_ctx); |
95 | 0 | fr_md5_ctx_free_from_list(&md5_ctx_old); |
96 | |
|
97 | 0 | return fr_dbuff_in_memcpy(dbuff, passwd, len); |
98 | 0 | } |
99 | | |
100 | | |
101 | | static ssize_t encode_tunnel_password(fr_dbuff_t *dbuff, fr_dbuff_marker_t *in, size_t inlen, void *encode_ctx) |
102 | 0 | { |
103 | 0 | fr_md5_ctx_t *md5_ctx, *md5_ctx_old; |
104 | 0 | uint8_t digest[RADIUS_AUTH_VECTOR_LENGTH]; |
105 | 0 | uint8_t tpasswd[RADIUS_MAX_STRING_LENGTH]; |
106 | 0 | size_t i, n; |
107 | 0 | fr_radius_ctx_t *packet_ctx = encode_ctx; |
108 | 0 | uint32_t r; |
109 | 0 | size_t output_len, encrypted_len, padding; |
110 | 0 | ssize_t slen; |
111 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, RADIUS_MAX_STRING_LENGTH); |
112 | | |
113 | | /* |
114 | | * Limit the maximum size of the input password. 2 bytes |
115 | | * are taken up by the salt, and one by the encoded |
116 | | * "length" field. |
117 | | */ |
118 | 0 | if (inlen > (RADIUS_MAX_STRING_LENGTH - 3)) { |
119 | 0 | fail: |
120 | 0 | fr_strerror_const("Input password is too large for tunnel password encoding"); |
121 | 0 | return -(inlen + 3); |
122 | 0 | } |
123 | | |
124 | | /* |
125 | | * Length of the encrypted data is the clear-text |
126 | | * password length plus one byte which encodes the length |
127 | | * of the password. We round up to the nearest encoding |
128 | | * block, and bound it by the size of the output buffer, |
129 | | * while accounting for 2 bytes of salt. |
130 | | * |
131 | | * And also ensuring that we don't truncate the input |
132 | | * password. |
133 | | */ |
134 | 0 | encrypted_len = ROUND_UP(inlen + 1, 16); |
135 | 0 | if (encrypted_len > (RADIUS_MAX_STRING_LENGTH - 2)) encrypted_len = (RADIUS_MAX_STRING_LENGTH - 2); |
136 | | |
137 | | /* |
138 | | * Get the number of padding bytes in the last block. |
139 | | */ |
140 | 0 | padding = encrypted_len - (inlen + 1); |
141 | |
|
142 | 0 | output_len = encrypted_len + 2; /* account for the salt */ |
143 | | |
144 | | /* |
145 | | * We will have up to 253 octets of data in the output |
146 | | * buffer, some of which are padding. |
147 | | * |
148 | | * If we over-run the output buffer, see if we can drop |
149 | | * some of the padding bytes. If not, we return an error |
150 | | * instead of truncating the password. |
151 | | * |
152 | | * Otherwise we lower the amount of data we copy into the |
153 | | * output buffer, because the last bit is just padding, |
154 | | * and can be safely discarded. |
155 | | */ |
156 | 0 | slen = fr_dbuff_set(&work_dbuff, output_len); |
157 | 0 | if (slen < 0) { |
158 | 0 | if (((size_t) -slen) > padding) goto fail; |
159 | | |
160 | 0 | output_len += slen; |
161 | 0 | } |
162 | 0 | fr_dbuff_set_to_start(&work_dbuff); |
163 | | |
164 | | /* |
165 | | * Copy the password over, and fill the remainder with random data. |
166 | | */ |
167 | 0 | (void) fr_dbuff_out_memcpy(tpasswd + 3, in, inlen); |
168 | |
|
169 | 0 | for (i = 3 + inlen; i < sizeof(tpasswd); i++) { |
170 | 0 | tpasswd[i] = fr_fast_rand(&packet_ctx->rand_ctx); |
171 | 0 | } |
172 | | |
173 | | /* |
174 | | * Generate salt. The RFCs say: |
175 | | * |
176 | | * The high bit of salt[0] must be set, each salt in a |
177 | | * packet should be unique, and they should be random |
178 | | * |
179 | | * So, we set the high bit, add in a counter, and then |
180 | | * add in some PRNG data. should be OK.. |
181 | | */ |
182 | 0 | r = fr_fast_rand(&packet_ctx->rand_ctx); |
183 | 0 | tpasswd[0] = (0x80 | (((packet_ctx->salt_offset++) & 0x07) << 4) | ((r >> 8) & 0x0f)); |
184 | 0 | tpasswd[1] = r & 0xff; |
185 | 0 | tpasswd[2] = inlen; /* length of the password string */ |
186 | |
|
187 | 0 | md5_ctx = fr_md5_ctx_alloc_from_list(); |
188 | 0 | md5_ctx_old = fr_md5_ctx_alloc_from_list(); |
189 | |
|
190 | 0 | fr_md5_update(md5_ctx, (uint8_t const *) packet_ctx->secret, talloc_array_length(packet_ctx->secret) - 1); |
191 | 0 | fr_md5_ctx_copy(md5_ctx_old, md5_ctx); |
192 | |
|
193 | 0 | fr_md5_update(md5_ctx, packet_ctx->vector, RADIUS_AUTH_VECTOR_LENGTH); |
194 | 0 | fr_md5_update(md5_ctx, &tpasswd[0], 2); |
195 | | |
196 | | /* |
197 | | * Do various hashing, and XOR the length+password with |
198 | | * the output of the hash blocks. |
199 | | */ |
200 | 0 | for (n = 0; n < encrypted_len; n += AUTH_PASS_LEN) { |
201 | 0 | size_t block_len; |
202 | |
|
203 | 0 | if (n > 0) { |
204 | 0 | fr_md5_ctx_copy(md5_ctx, md5_ctx_old); |
205 | 0 | fr_md5_update(md5_ctx, tpasswd + 2 + n - AUTH_PASS_LEN, AUTH_PASS_LEN); |
206 | 0 | } |
207 | 0 | fr_md5_final(digest, md5_ctx); |
208 | |
|
209 | 0 | block_len = encrypted_len - n; |
210 | 0 | if (block_len > AUTH_PASS_LEN) block_len = AUTH_PASS_LEN; |
211 | |
|
212 | 0 | for (i = 0; i < block_len; i++) tpasswd[i + 2 + n] ^= digest[i]; |
213 | 0 | } |
214 | |
|
215 | 0 | fr_md5_ctx_free_from_list(&md5_ctx); |
216 | 0 | fr_md5_ctx_free_from_list(&md5_ctx_old); |
217 | |
|
218 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, tpasswd, output_len); |
219 | | |
220 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
221 | 0 | } |
222 | | |
223 | | /* |
224 | | * Encode the contents of an attribute of type TLV. |
225 | | */ |
226 | | static ssize_t encode_tlv(fr_dbuff_t *dbuff, |
227 | | fr_da_stack_t *da_stack, unsigned int depth, |
228 | | fr_dcursor_t *cursor, void *encode_ctx) |
229 | 0 | { |
230 | 0 | ssize_t slen; |
231 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
232 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
233 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, RADIUS_MAX_STRING_LENGTH); |
234 | |
|
235 | 0 | for (;;) { |
236 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
237 | | |
238 | | /* |
239 | | * This attribute carries sub-TLVs. The sub-TLVs |
240 | | * can only carry a total of 253 bytes of data. |
241 | | */ |
242 | | |
243 | | /* |
244 | | * Determine the nested type and call the appropriate encoder |
245 | | */ |
246 | 0 | if (!da_stack->da[depth + 1]) { |
247 | 0 | fr_dcursor_t child_cursor; |
248 | |
|
249 | 0 | if (vp->da != da_stack->da[depth]) { |
250 | 0 | fr_strerror_printf("%s: Can't encode empty TLV", __FUNCTION__); |
251 | 0 | return PAIR_ENCODE_SKIPPED; |
252 | 0 | } |
253 | | |
254 | 0 | fr_pair_dcursor_child_iter_init(&child_cursor, &vp->vp_group, cursor); |
255 | 0 | vp = fr_dcursor_current(&child_cursor); |
256 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
257 | | |
258 | | /* |
259 | | * Call ourselves recursively to encode children. |
260 | | */ |
261 | 0 | slen = encode_tlv(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
262 | 0 | if (slen < 0) { |
263 | 0 | if (slen == PAIR_ENCODE_SKIPPED) continue; |
264 | 0 | return slen; |
265 | 0 | } |
266 | | |
267 | 0 | vp = fr_dcursor_next(cursor); |
268 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
269 | |
|
270 | 0 | } else { |
271 | 0 | slen = encode_child(&work_dbuff, da_stack, depth + 1, cursor, encode_ctx); |
272 | 0 | } |
273 | 0 | if (slen < 0) { |
274 | 0 | if (slen == PAIR_ENCODE_SKIPPED) continue; |
275 | 0 | return slen; |
276 | 0 | } |
277 | | |
278 | | /* |
279 | | * If nothing updated the attribute, stop |
280 | | */ |
281 | 0 | if (!fr_dcursor_current(cursor) || (vp == fr_dcursor_current(cursor))) break; |
282 | | |
283 | | /* |
284 | | * We can encode multiple sub TLVs, if after |
285 | | * rebuilding the TLV Stack, the attribute |
286 | | * at this depth is the same. |
287 | | */ |
288 | 0 | if ((da != da_stack->da[depth]) || (da_stack->depth < da->depth)) break; |
289 | 0 | vp = fr_dcursor_current(cursor); |
290 | 0 | } |
291 | | |
292 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
293 | 0 | } |
294 | | |
295 | | static ssize_t encode_tags(fr_dbuff_t *dbuff, fr_pair_list_t const *vps, void *encode_ctx) |
296 | 0 | { |
297 | 0 | ssize_t slen; |
298 | 0 | fr_pair_t const *vp; |
299 | 0 | fr_dcursor_t cursor; |
300 | | |
301 | | /* |
302 | | * Note that we skip tags inside of tags! |
303 | | */ |
304 | 0 | fr_pair_dcursor_iter_init(&cursor, vps, fr_proto_next_encodable, dict_radius); |
305 | 0 | while ((vp = fr_dcursor_current(&cursor))) { |
306 | 0 | PAIR_VERIFY(vp); |
307 | | |
308 | | /* |
309 | | * Encode an individual VP |
310 | | */ |
311 | 0 | slen = fr_radius_encode_pair(dbuff, &cursor, encode_ctx); |
312 | 0 | if (slen < 0) { |
313 | 0 | if (slen == PAIR_ENCODE_SKIPPED) continue; |
314 | 0 | return slen; |
315 | 0 | } |
316 | 0 | } |
317 | | |
318 | 0 | return fr_dbuff_used(dbuff); |
319 | 0 | } |
320 | | |
321 | | |
322 | | /** Encodes the data portion of an attribute |
323 | | * |
324 | | * @return |
325 | | * > 0, Length of the data portion. |
326 | | * = 0, we could not encode anything, skip this attribute (and don't encode the header) |
327 | | * unless it's one of a list of exceptions. |
328 | | * < 0, How many additional bytes we'd need as a negative integer. |
329 | | * PAIR_ENCODE_FATAL_ERROR - Abort encoding the packet. |
330 | | * PAIR_ENCODE_SKIPPED - Unencodable value |
331 | | */ |
332 | | static ssize_t encode_value(fr_dbuff_t *dbuff, |
333 | | fr_da_stack_t *da_stack, unsigned int depth, |
334 | | fr_dcursor_t *cursor, void *encode_ctx) |
335 | 0 | { |
336 | 0 | ssize_t slen; |
337 | 0 | size_t len; |
338 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
339 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
340 | 0 | fr_radius_ctx_t *packet_ctx = encode_ctx; |
341 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
342 | 0 | fr_dbuff_t value_dbuff; |
343 | 0 | fr_dbuff_marker_t value_start, src, dest; |
344 | 0 | bool encrypted = false; |
345 | |
|
346 | 0 | PAIR_VERIFY(vp); |
347 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
348 | | |
349 | | /* |
350 | | * TLVs are just another type of value. |
351 | | */ |
352 | 0 | if (da->type == FR_TYPE_TLV) return encode_tlv(dbuff, da_stack, depth, cursor, encode_ctx); |
353 | | |
354 | | /* |
355 | | * Catch errors early on. |
356 | | */ |
357 | 0 | if (flag_encrypted(&vp->da->flags) && !packet_ctx) { |
358 | 0 | fr_strerror_const("Asked to encrypt attribute, but no packet context provided"); |
359 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
360 | 0 | } |
361 | | |
362 | | /* |
363 | | * This has special requirements. |
364 | | */ |
365 | 0 | if ((vp->vp_type == FR_TYPE_STRUCT) || (da->type == FR_TYPE_STRUCT)) { |
366 | 0 | slen = fr_struct_to_network(&work_dbuff, da_stack, depth, cursor, encode_ctx, encode_value, encode_child); |
367 | 0 | if (slen <= 0) return slen; |
368 | | |
369 | 0 | vp = fr_dcursor_current(cursor); |
370 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
371 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
372 | 0 | } |
373 | | |
374 | | /* |
375 | | * If it's not a TLV, it should be a value type RFC |
376 | | * attribute make sure that it is. |
377 | | */ |
378 | 0 | if (da_stack->da[depth + 1] != NULL) { |
379 | 0 | fr_strerror_printf("%s: Encoding value but not at top of stack", __FUNCTION__); |
380 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
381 | 0 | } |
382 | | |
383 | 0 | if (vp->da != da) { |
384 | 0 | fr_strerror_printf("%s: Top of stack does not match vp->da", __FUNCTION__); |
385 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
386 | 0 | } |
387 | | |
388 | 0 | if (fr_type_is_structural(da->type)) { |
389 | 0 | fr_strerror_printf("%s: Called with structural type %s", __FUNCTION__, |
390 | 0 | fr_type_to_str(da_stack->da[depth]->type)); |
391 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
392 | 0 | } |
393 | | |
394 | | /* |
395 | | * Write tag byte |
396 | | * |
397 | | * The Tag field is one octet in length and is intended to provide a |
398 | | * means of grouping attributes in the same packet which refer to the |
399 | | * same tunnel. If the value of the Tag field is greater than 0x00 |
400 | | * and less than or equal to 0x1F, it SHOULD be interpreted as |
401 | | * indicating which tunnel (of several alternatives) this attribute |
402 | | * pertains. If the Tag field is greater than 0x1F, it SHOULD be |
403 | | * interpreted as the first byte of the following String field. |
404 | | * |
405 | | * If the first byte of the string value looks like a |
406 | | * tag, then we always encode a tag byte, even one that |
407 | | * is zero. |
408 | | */ |
409 | 0 | if ((vp->vp_type == FR_TYPE_STRING) && flag_has_tag(&vp->da->flags)) { |
410 | 0 | if (packet_ctx->tag) { |
411 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint8_t)packet_ctx->tag); |
412 | 0 | } else if (TAG_VALID(vp->vp_strvalue[0])) { |
413 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint8_t)0x00); |
414 | 0 | } |
415 | 0 | } |
416 | | |
417 | | /* |
418 | | * Starting here is a value that may require encryption. |
419 | | */ |
420 | 0 | value_dbuff = FR_DBUFF(&work_dbuff); |
421 | 0 | fr_dbuff_marker(&value_start, &value_dbuff); |
422 | 0 | fr_dbuff_marker(&src, &value_dbuff); |
423 | 0 | fr_dbuff_marker(&dest, &value_dbuff); |
424 | |
|
425 | 0 | switch (vp->vp_type) { |
426 | | /* |
427 | | * IPv4 addresses are normal, but IPv6 addresses are special to RADIUS. |
428 | | */ |
429 | 0 | case FR_TYPE_COMBO_IP_ADDR: |
430 | 0 | if (vp->vp_ip.af == AF_INET) goto encode; |
431 | 0 | FALL_THROUGH; |
432 | | |
433 | | /* |
434 | | * Common encoder might add scope byte, which we don't want. |
435 | | */ |
436 | 0 | case FR_TYPE_IPV6_ADDR: |
437 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&value_dbuff, vp->vp_ipv6addr, sizeof(vp->vp_ipv6addr)); |
438 | 0 | break; |
439 | | |
440 | 0 | case FR_TYPE_COMBO_IP_PREFIX: |
441 | 0 | if (vp->vp_ip.af == AF_INET) goto ipv4_prefix; |
442 | 0 | FALL_THROUGH; |
443 | | |
444 | | /* |
445 | | * Common encoder doesn't add reserved byte |
446 | | */ |
447 | 0 | case FR_TYPE_IPV6_PREFIX: |
448 | 0 | len = fr_bytes_from_bits(vp->vp_ip.prefix); |
449 | 0 | FR_DBUFF_IN_BYTES_RETURN(&value_dbuff, 0x00, vp->vp_ip.prefix); |
450 | | /* Only copy the minimum number of address bytes required */ |
451 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&value_dbuff, (uint8_t const *)vp->vp_ipv6addr, len); |
452 | 0 | break; |
453 | | |
454 | | /* |
455 | | * Common encoder doesn't add reserved byte |
456 | | */ |
457 | 0 | case FR_TYPE_IPV4_PREFIX: |
458 | 0 | ipv4_prefix: |
459 | 0 | FR_DBUFF_IN_BYTES_RETURN(&value_dbuff, 0x00, vp->vp_ip.prefix); |
460 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&value_dbuff, (uint8_t const *)&vp->vp_ipv4addr, sizeof(vp->vp_ipv4addr)); |
461 | 0 | break; |
462 | | |
463 | | /* |
464 | | * Special handling for "abinary". Otherwise, fall |
465 | | * through to using the common encoder. |
466 | | */ |
467 | 0 | case FR_TYPE_STRING: |
468 | 0 | if (flag_abinary(&da->flags)) { |
469 | 0 | slen = fr_radius_encode_abinary(vp, &value_dbuff); |
470 | 0 | if (slen <= 0) return slen; |
471 | 0 | break; |
472 | 0 | } |
473 | 0 | FALL_THROUGH; |
474 | | |
475 | 0 | case FR_TYPE_OCTETS: |
476 | | |
477 | | /* |
478 | | * Simple data types use the common encoder. |
479 | | */ |
480 | 0 | default: |
481 | 0 | encode: |
482 | 0 | slen = fr_value_box_to_network(&value_dbuff, &vp->data); |
483 | 0 | if (slen < 0) return slen; |
484 | 0 | break; |
485 | 0 | } |
486 | | |
487 | | /* |
488 | | * No data: don't encode the value. The type and length should still |
489 | | * be written. |
490 | | */ |
491 | 0 | if (fr_dbuff_used(&value_dbuff) == 0) { |
492 | 0 | vp = fr_dcursor_next(cursor); |
493 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
494 | 0 | return 0; |
495 | 0 | } |
496 | | |
497 | | /* |
498 | | * Encrypt the various password styles |
499 | | * |
500 | | * Attributes with encrypted values MUST be less than |
501 | | * 128 bytes long. |
502 | | */ |
503 | 0 | if (flag_encrypted(&da->flags)) switch (vp->da->flags.subtype) { |
504 | 0 | case FLAG_ENCRYPT_USER_PASSWORD: |
505 | | /* |
506 | | * Encode the password in place |
507 | | */ |
508 | 0 | slen = encode_password(&work_dbuff, &value_start, fr_dbuff_used(&value_dbuff), |
509 | 0 | packet_ctx->secret, packet_ctx->vector); |
510 | 0 | if (slen < 0) return slen; |
511 | 0 | encrypted = true; |
512 | 0 | break; |
513 | | |
514 | 0 | case FLAG_TAGGED_TUNNEL_PASSWORD: |
515 | 0 | case FLAG_ENCRYPT_TUNNEL_PASSWORD: |
516 | 0 | if (packet_ctx->disallow_tunnel_passwords) { |
517 | 0 | fr_strerror_const("Attributes with 'encrypt=2' set cannot go into this packet."); |
518 | 0 | return PAIR_ENCODE_SKIPPED; |
519 | 0 | } |
520 | | |
521 | | /* |
522 | | * Always encode the tag even if it's zero. |
523 | | * |
524 | | * The Tunnel-Password uses 2 salt fields which |
525 | | * MAY have any value. As a result, we always |
526 | | * encode a tag. If we would omit the tag, then |
527 | | * perhaps one of the salt fields could be |
528 | | * mistaken for the tag. |
529 | | */ |
530 | 0 | if (flag_has_tag(&vp->da->flags)) fr_dbuff_advance(&work_dbuff, 1); |
531 | |
|
532 | 0 | slen = encode_tunnel_password(&work_dbuff, &value_start, fr_dbuff_used(&value_dbuff), packet_ctx); |
533 | 0 | if (slen < 0) { |
534 | 0 | fr_strerror_printf("%s too long", vp->da->name); |
535 | 0 | return slen - flag_has_tag(&vp->da->flags); |
536 | 0 | } |
537 | | |
538 | | /* |
539 | | * Do this after so we don't mess up the input |
540 | | * value. |
541 | | */ |
542 | 0 | if (flag_has_tag(&vp->da->flags)) { |
543 | 0 | fr_dbuff_set_to_start(&value_start); |
544 | 0 | fr_dbuff_in(&value_start, (uint8_t) 0x00); |
545 | 0 | } |
546 | 0 | encrypted = true; |
547 | 0 | break; |
548 | | |
549 | | /* |
550 | | * The code above ensures that this attribute |
551 | | * always fits. |
552 | | */ |
553 | 0 | case FLAG_ENCRYPT_ASCEND_SECRET: |
554 | | /* |
555 | | * @todo radius decoding also uses fr_radius_ascend_secret() (Vernam cipher |
556 | | * is its own inverse). As part of converting decode, make sure the caller |
557 | | * there can pass a marker so we can use it here, too. |
558 | | */ |
559 | 0 | slen = fr_radius_ascend_secret(&work_dbuff, fr_dbuff_current(&value_start), fr_dbuff_used(&value_dbuff), |
560 | 0 | packet_ctx->secret, packet_ctx->vector); |
561 | 0 | if (slen < 0) return slen; |
562 | 0 | encrypted = true; |
563 | 0 | break; |
564 | 0 | } |
565 | | |
566 | 0 | if (!encrypted) { |
567 | 0 | fr_dbuff_set(&work_dbuff, &value_dbuff); |
568 | 0 | fr_dbuff_set(&value_start, fr_dbuff_start(&value_dbuff)); |
569 | 0 | } |
570 | | |
571 | | /* |
572 | | * High byte of 32bit integers gets set to the tag |
573 | | * value. |
574 | | * |
575 | | * The Tag field is one octet in length and is intended to provide a |
576 | | * means of grouping attributes in the same packet which refer to the |
577 | | * same tunnel. Valid values for this field are 0x01 through 0x1F, |
578 | | * inclusive. If the Tag field is unused, it MUST be zero (0x00). |
579 | | */ |
580 | 0 | if ((vp->vp_type == FR_TYPE_UINT32) && flag_has_tag(&vp->da->flags)) { |
581 | 0 | uint8_t msb = 0; |
582 | | /* |
583 | | * Only 24bit integers are allowed here |
584 | | */ |
585 | 0 | fr_dbuff_set(&src, &value_start); |
586 | 0 | (void) fr_dbuff_out(&msb, &src); |
587 | 0 | if (msb != 0) { |
588 | 0 | fr_strerror_const("Integer overflow for tagged uint32 attribute"); |
589 | 0 | return PAIR_ENCODE_SKIPPED; |
590 | 0 | } |
591 | 0 | fr_dbuff_set(&dest, &value_start); |
592 | 0 | fr_dbuff_in(&dest, packet_ctx->tag); |
593 | 0 | } |
594 | | |
595 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_start(&work_dbuff), fr_dbuff_used(&work_dbuff), "value %s", |
596 | 0 | fr_type_to_str(vp->vp_type)); |
597 | | |
598 | | /* |
599 | | * Rebuilds the TLV stack for encoding the next attribute |
600 | | */ |
601 | 0 | vp = fr_dcursor_next(cursor); |
602 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
603 | |
|
604 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
605 | 0 | } |
606 | | |
607 | | /** Breaks down large data into pieces, each with a header |
608 | | * |
609 | | * @param[out] data we're fragmenting. |
610 | | * @param[in] data_len the amount of data in the dbuff that makes up the value we're |
611 | | * splitting. |
612 | | * @param[in,out] hdr marker that points at said header |
613 | | * @param[in] hdr_len length of the headers that will be added |
614 | | * @param[in] flag_offset offset within header of a flag byte whose MSB is set for all |
615 | | * but the last piece. |
616 | | * @param[in] vsa_offset if non-zero, the offset of a length field in a (sub?)-header |
617 | | * of size 3 that also needs to be adjusted to include the number |
618 | | * of bytes of data in the piece |
619 | | * @return |
620 | | * - <0 the number of bytes we would have needed to create |
621 | | * space for another attribute header in the buffer. |
622 | | * - 0 data was not modified. |
623 | | * - >0 the number additional bytes we used inserting extra |
624 | | * headers. |
625 | | */ |
626 | | static ssize_t attr_fragment(fr_dbuff_t *data, size_t data_len, fr_dbuff_marker_t *hdr, size_t hdr_len, |
627 | | int flag_offset, int vsa_offset) |
628 | 0 | { |
629 | 0 | unsigned int num_fragments, i = 0; |
630 | 0 | size_t max_frag_data = UINT8_MAX - hdr_len; |
631 | 0 | fr_dbuff_t frag_data = FR_DBUFF_ABS(hdr); |
632 | 0 | fr_dbuff_marker_t frag_hdr, frag_hdr_p; |
633 | |
|
634 | 0 | if (unlikely(!data_len)) return 0; /* Shouldn't have been called */ |
635 | | |
636 | 0 | num_fragments = ROUND_UP_DIV(data_len, max_frag_data); |
637 | 0 | if (num_fragments == 1) return 0; /* Nothing to do */ |
638 | | |
639 | 0 | fr_dbuff_marker(&frag_hdr, &frag_data); |
640 | 0 | fr_dbuff_marker(&frag_hdr_p, &frag_data); |
641 | |
|
642 | 0 | fr_dbuff_advance(&frag_data, hdr_len); |
643 | |
|
644 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(hdr), hdr_len + data_len, "attr_fragment in"); |
645 | 0 | for (;;) { |
646 | 0 | bool last = (i + 1) == num_fragments; |
647 | 0 | uint8_t frag_len; |
648 | | |
649 | | /* |
650 | | * How long is this fragment? |
651 | | */ |
652 | 0 | if (last) { |
653 | 0 | frag_len = (data_len - (max_frag_data * (num_fragments - 1))); |
654 | 0 | } else { |
655 | 0 | frag_len = max_frag_data; |
656 | 0 | } |
657 | | |
658 | | /* |
659 | | * Update the "outer" header to reflect the actual |
660 | | * length of the fragment |
661 | | */ |
662 | 0 | fr_dbuff_set(&frag_hdr_p, &frag_hdr); |
663 | 0 | fr_dbuff_advance(&frag_hdr_p, 1); |
664 | 0 | fr_dbuff_in(&frag_hdr_p, (uint8_t)(hdr_len + frag_len)); |
665 | | |
666 | | /* |
667 | | * Update the "inner" header. The length here is |
668 | | * the inner VSA header length (3) + the fragment |
669 | | * length. |
670 | | */ |
671 | 0 | if (vsa_offset) { |
672 | 0 | fr_dbuff_set(&frag_hdr_p, fr_dbuff_current(&frag_hdr) + vsa_offset); |
673 | 0 | fr_dbuff_in(&frag_hdr_p, (uint8_t)(3 + frag_len)); |
674 | 0 | } |
675 | | |
676 | | /* |
677 | | * Just over-ride the flag field. Nothing else |
678 | | * uses it. |
679 | | */ |
680 | 0 | fr_dbuff_set(&frag_hdr_p, fr_dbuff_current(&frag_hdr) + flag_offset); |
681 | 0 | fr_dbuff_in(&frag_hdr_p, (uint8_t)(!last << 7)); |
682 | |
|
683 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(hdr), frag_len + hdr_len, |
684 | 0 | "attr_fragment fragment %u/%u", i + 1, num_fragments); |
685 | |
|
686 | 0 | fr_dbuff_advance(&frag_data, frag_len); /* Go to the start of the next fragment */ |
687 | 0 | if (last) break; |
688 | | |
689 | | /* |
690 | | * There's still trailing data after this |
691 | | * fragment. Move the trailing data to *past* |
692 | | * the next header. And after there's room, copy |
693 | | * the header over. |
694 | | * |
695 | | * This process leaves the next header in place, |
696 | | * ready for the next iteration of the loop. |
697 | | * |
698 | | * Yes, moving things multiple times is less than |
699 | | * efficient. Oh well. it's ~1K memmoved() |
700 | | * maybe 4 times. We are nowhere near the CPU / |
701 | | * electrical requirements of Bitcoin. |
702 | | */ |
703 | 0 | i++; |
704 | |
|
705 | 0 | fr_dbuff_set(&frag_hdr, &frag_data); /* Remember where the header should be */ |
706 | 0 | fr_dbuff_advance(&frag_data, hdr_len); /* Advance past the header */ |
707 | | |
708 | | /* |
709 | | * Shift remaining data by hdr_len. |
710 | | */ |
711 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&FR_DBUFF(&frag_data), &frag_hdr, data_len - (i * max_frag_data)); |
712 | 0 | fr_dbuff_in_memcpy(&FR_DBUFF(&frag_hdr), hdr, hdr_len); /* Copy the old header over */ |
713 | 0 | } |
714 | | |
715 | 0 | return fr_dbuff_set(data, &frag_data); |
716 | 0 | } |
717 | | |
718 | | /** Encode an "extended" attribute |
719 | | * |
720 | | */ |
721 | | static ssize_t encode_extended(fr_dbuff_t *dbuff, |
722 | | fr_da_stack_t *da_stack, NDEBUG_UNUSED unsigned int depth, |
723 | | fr_dcursor_t *cursor, void *encode_ctx) |
724 | 0 | { |
725 | 0 | ssize_t slen; |
726 | 0 | uint8_t hlen; |
727 | 0 | size_t vendor_hdr; |
728 | 0 | bool extra; |
729 | 0 | int my_depth; |
730 | 0 | fr_dict_attr_t const *da; |
731 | 0 | fr_dbuff_marker_t hdr, length_field; |
732 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
733 | 0 | fr_dbuff_t work_dbuff; |
734 | |
|
735 | 0 | PAIR_VERIFY(vp); |
736 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
737 | |
|
738 | 0 | extra = flag_long_extended(&da_stack->da[0]->flags); |
739 | | |
740 | | /* |
741 | | * The data used here can be more than 255 bytes, but only for the |
742 | | * "long" extended type. |
743 | | */ |
744 | 0 | if (extra) { |
745 | 0 | work_dbuff = FR_DBUFF_BIND_CURRENT(dbuff); |
746 | 0 | } else { |
747 | 0 | work_dbuff = FR_DBUFF_MAX_BIND_CURRENT(dbuff, UINT8_MAX); |
748 | 0 | } |
749 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
750 | | |
751 | | /* |
752 | | * Encode the header for "short" or "long" attributes |
753 | | */ |
754 | 0 | hlen = 3 + extra; |
755 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[0]->attr); |
756 | 0 | fr_dbuff_marker(&length_field, &work_dbuff); |
757 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, hlen); /* this gets overwritten later*/ |
758 | | |
759 | | /* |
760 | | * Encode which extended attribute it is. |
761 | | */ |
762 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[1]->attr); |
763 | | |
764 | 0 | if (extra) FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x00); /* flags start off at zero */ |
765 | | |
766 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
767 | | |
768 | | /* |
769 | | * Handle VSA as "VENDOR + attr" |
770 | | */ |
771 | 0 | if (da_stack->da[1]->type == FR_TYPE_VSA) { |
772 | 0 | fr_assert(da_stack->da[2]); |
773 | 0 | fr_assert(da_stack->da[2]->type == FR_TYPE_VENDOR); |
774 | |
|
775 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint32_t) da_stack->da[2]->attr); |
776 | | |
777 | 0 | fr_assert(da_stack->da[3]); |
778 | |
|
779 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[3]->attr); |
780 | | |
781 | 0 | hlen += 5; |
782 | 0 | vendor_hdr = 5; |
783 | |
|
784 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
785 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), hlen, "header extended vendor specific"); |
786 | |
|
787 | 0 | my_depth = 3; |
788 | 0 | } else { |
789 | 0 | vendor_hdr = 0; |
790 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), hlen, "header extended"); |
791 | |
|
792 | 0 | my_depth = 1; |
793 | 0 | } |
794 | | |
795 | | /* |
796 | | * We're at the point where we need to encode something. |
797 | | */ |
798 | 0 | da = da_stack->da[my_depth]; |
799 | 0 | fr_assert(vp->da == da); |
800 | |
|
801 | 0 | if (fr_type_is_leaf(da->type)) { |
802 | 0 | slen = encode_value(&work_dbuff, da_stack, my_depth, cursor, encode_ctx); |
803 | |
|
804 | 0 | } else if (da->type == FR_TYPE_STRUCT) { |
805 | 0 | slen = fr_struct_to_network(&work_dbuff, da_stack, my_depth, cursor, encode_ctx, encode_value, encode_child); |
806 | |
|
807 | 0 | } else { |
808 | 0 | fr_assert(da->type == FR_TYPE_TLV); |
809 | |
|
810 | 0 | slen = encode_tlv(&work_dbuff, da_stack, my_depth, cursor, encode_ctx); |
811 | 0 | } |
812 | 0 | if (slen <= 0) return slen; |
813 | | |
814 | | /* |
815 | | * There may be more than 255 octets of data encoded in |
816 | | * the attribute. If so, move the data up in the packet, |
817 | | * and copy the existing header over. Set the "M" flag ONLY |
818 | | * after copying the rest of the data. |
819 | | * |
820 | | * Note that we add "vendor_hdr" to the length of the |
821 | | * encoded data. That 5 octet field is logically part of |
822 | | * the data, and not part of the header. |
823 | | */ |
824 | 0 | if (slen > (UINT8_MAX - hlen)) { |
825 | 0 | slen = attr_fragment(&work_dbuff, (size_t)vendor_hdr + slen, &hdr, 4, 3, 0); |
826 | 0 | if (slen <= 0) return slen; |
827 | | |
828 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
829 | 0 | } |
830 | | |
831 | 0 | fr_dbuff_in_bytes(&length_field, (uint8_t) fr_dbuff_used(&work_dbuff)); |
832 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), hlen, "header extended"); |
833 | |
|
834 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
835 | 0 | } |
836 | | |
837 | | /* |
838 | | * The encode_extended() function expects to see the TLV or |
839 | | * STRUCT inside of the extended attribute, in which case it |
840 | | * creates the attribute header and calls encode_value() for the |
841 | | * leaf type, or child TLV / struct. |
842 | | * |
843 | | * If we see VSA or VENDOR, then we recurse past that to a child |
844 | | * which is either a leaf, or a TLV, or a STRUCT. |
845 | | */ |
846 | | static ssize_t encode_extended_nested(fr_dbuff_t *dbuff, |
847 | | fr_da_stack_t *da_stack, unsigned int depth, |
848 | | fr_dcursor_t *cursor, void *encode_ctx) |
849 | 0 | { |
850 | 0 | ssize_t slen; |
851 | 0 | fr_pair_t *parent, *vp; |
852 | 0 | fr_dcursor_t child_cursor; |
853 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
854 | |
|
855 | 0 | parent = fr_dcursor_current(cursor); |
856 | 0 | fr_assert(fr_type_is_structural(parent->vp_type)); |
857 | |
|
858 | 0 | (void) fr_pair_dcursor_child_iter_init(&child_cursor, &parent->vp_group, cursor); |
859 | |
|
860 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
861 | |
|
862 | 0 | while ((vp = fr_dcursor_current(&child_cursor)) != NULL) { |
863 | 0 | if ((vp->vp_type == FR_TYPE_VSA) || (vp->vp_type == FR_TYPE_VENDOR)) { |
864 | 0 | slen = encode_extended_nested(&work_dbuff, da_stack, depth + 1, &child_cursor, encode_ctx); |
865 | |
|
866 | 0 | } else { |
867 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
868 | 0 | slen = encode_extended(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
869 | 0 | if (slen < 0) return slen; |
870 | 0 | } |
871 | | |
872 | 0 | if (slen < 0) return slen; |
873 | 0 | } |
874 | | |
875 | 0 | vp = fr_dcursor_next(cursor); |
876 | |
|
877 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
878 | |
|
879 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
880 | 0 | } |
881 | | |
882 | | |
883 | | /** Encode an RFC format attribute, with the "concat" flag set |
884 | | * |
885 | | * If there isn't enough freespace in the packet, the data is |
886 | | * truncated to fit. |
887 | | * |
888 | | * The attribute is split on 253 byte boundaries, with a header |
889 | | * prepended to each chunk. |
890 | | */ |
891 | | static ssize_t encode_concat(fr_dbuff_t *dbuff, |
892 | | fr_da_stack_t *da_stack, unsigned int depth, |
893 | | fr_dcursor_t *cursor, UNUSED void *encode_ctx) |
894 | 0 | { |
895 | 0 | uint8_t const *p; |
896 | 0 | size_t data_len; |
897 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
898 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
899 | 0 | fr_dbuff_marker_t hdr; |
900 | |
|
901 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
902 | |
|
903 | 0 | p = vp->vp_octets; |
904 | 0 | data_len = vp->vp_length; |
905 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
906 | |
|
907 | 0 | while (data_len > 0) { |
908 | 0 | size_t frag_len = (data_len > RADIUS_MAX_STRING_LENGTH) ? RADIUS_MAX_STRING_LENGTH : data_len; |
909 | |
|
910 | 0 | fr_dbuff_set(&hdr, &work_dbuff); |
911 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t) da_stack->da[depth]->attr, 0x00); |
912 | | |
913 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, p, frag_len); |
914 | | |
915 | 0 | fr_dbuff_advance(&hdr, 1); |
916 | 0 | fr_dbuff_in(&hdr, (uint8_t) (2 + frag_len)); |
917 | |
|
918 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr) - 1, 2 + frag_len, "encode_concat fragment"); |
919 | |
|
920 | 0 | p += frag_len; |
921 | 0 | data_len -= frag_len; |
922 | 0 | } |
923 | | |
924 | 0 | vp = fr_dcursor_next(cursor); |
925 | | |
926 | | /* |
927 | | * @fixme: attributes with 'concat' MUST of type |
928 | | * 'octets', and therefore CANNOT have any TLV data in them. |
929 | | */ |
930 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
931 | |
|
932 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
933 | 0 | } |
934 | | |
935 | | /** Encode an RFC format attribute. |
936 | | * |
937 | | * This could be a standard attribute, or a TLV data type. |
938 | | * If it's a standard attribute, then vp->da->attr == attribute. |
939 | | * Otherwise, attribute may be something else. |
940 | | */ |
941 | | static ssize_t encode_child(fr_dbuff_t *dbuff, |
942 | | fr_da_stack_t *da_stack, unsigned int depth, |
943 | | fr_dcursor_t *cursor, void *encode_ctx) |
944 | 0 | { |
945 | 0 | ssize_t slen; |
946 | 0 | uint8_t hlen; |
947 | 0 | fr_dbuff_marker_t hdr; |
948 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, UINT8_MAX); |
949 | |
|
950 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
951 | |
|
952 | 0 | fr_assert(da_stack->da[depth] != NULL); |
953 | |
|
954 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
955 | |
|
956 | 0 | hlen = 2; |
957 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[depth]->attr, hlen); |
958 | | |
959 | 0 | slen = encode_value(&work_dbuff, da_stack, depth, cursor, encode_ctx); |
960 | 0 | if (slen <= 0) return slen; |
961 | | |
962 | 0 | fr_dbuff_advance(&hdr, 1); |
963 | 0 | fr_dbuff_in_bytes(&hdr, (uint8_t)(hlen + slen)); |
964 | |
|
965 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_start(&work_dbuff), 2, "header rfc"); |
966 | |
|
967 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
968 | 0 | } |
969 | | |
970 | | |
971 | | /** Encode one full Vendor-Specific + Vendor-ID + Vendor-Attr + Vendor-Length + ... |
972 | | */ |
973 | | static ssize_t encode_vendor_attr(fr_dbuff_t *dbuff, |
974 | | fr_da_stack_t *da_stack, unsigned int depth, |
975 | | fr_dcursor_t *cursor, void *encode_ctx) |
976 | 0 | { |
977 | 0 | ssize_t slen; |
978 | 0 | size_t hdr_len; |
979 | 0 | fr_dbuff_marker_t hdr, length_field, vsa_length_field; |
980 | 0 | fr_dict_attr_t const *da, *dv; |
981 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF_MAX(dbuff, UINT8_MAX); |
982 | |
|
983 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
984 | |
|
985 | 0 | dv = da_stack->da[depth++]; |
986 | |
|
987 | 0 | if (dv->type != FR_TYPE_VENDOR) { |
988 | 0 | fr_strerror_const("Expected Vendor"); |
989 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
990 | 0 | } |
991 | | |
992 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
993 | | |
994 | | /* |
995 | | * Build the Vendor-Specific header |
996 | | */ |
997 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, FR_VENDOR_SPECIFIC); |
998 | | |
999 | 0 | fr_dbuff_marker(&length_field, &work_dbuff); |
1000 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0); |
1001 | | |
1002 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint32_t)dv->attr); /* Copy in the 32bit vendor ID */ |
1003 | | |
1004 | | /* |
1005 | | * Now we encode one vendor attribute. |
1006 | | */ |
1007 | 0 | da = da_stack->da[depth]; |
1008 | 0 | fr_assert(da != NULL); |
1009 | |
|
1010 | 0 | hdr_len = dv->flags.type_size + dv->flags.length; |
1011 | | |
1012 | | /* |
1013 | | * Vendors use different widths for their |
1014 | | * attribute number fields. |
1015 | | */ |
1016 | 0 | switch (dv->flags.type_size) { |
1017 | 0 | default: |
1018 | 0 | fr_strerror_printf("%s: Internal sanity check failed, type %u", __FUNCTION__, (unsigned) dv->flags.type_size); |
1019 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1020 | | |
1021 | 0 | case 4: |
1022 | 0 | fr_dbuff_in(&work_dbuff, (uint32_t)da->attr); |
1023 | 0 | break; |
1024 | | |
1025 | 0 | case 2: |
1026 | 0 | fr_dbuff_in(&work_dbuff, (uint16_t)da->attr); |
1027 | 0 | break; |
1028 | | |
1029 | 0 | case 1: |
1030 | 0 | fr_dbuff_in(&work_dbuff, (uint8_t)da->attr); |
1031 | 0 | break; |
1032 | 0 | } |
1033 | | |
1034 | | /* |
1035 | | * The length fields will get over-written later. |
1036 | | */ |
1037 | 0 | switch (dv->flags.length) { |
1038 | 0 | default: |
1039 | 0 | fr_strerror_printf("%s: Internal sanity check failed, length %u", __FUNCTION__, (unsigned) dv->flags.length); |
1040 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1041 | | |
1042 | 0 | case 0: |
1043 | 0 | break; |
1044 | | |
1045 | 0 | case 2: |
1046 | 0 | fr_dbuff_in_bytes(&work_dbuff, 0); |
1047 | 0 | FALL_THROUGH; |
1048 | |
|
1049 | 0 | case 1: |
1050 | | /* |
1051 | | * Lenght fields are set to zero, because they |
1052 | | * will get over-ridden later. |
1053 | | */ |
1054 | 0 | fr_dbuff_marker(&vsa_length_field, &work_dbuff); |
1055 | 0 | fr_dbuff_in_bytes(&work_dbuff, 0); |
1056 | 0 | break; |
1057 | 0 | } |
1058 | | |
1059 | 0 | slen = encode_value(&work_dbuff, da_stack, depth, cursor, encode_ctx); |
1060 | 0 | if (slen <= 0) return slen; |
1061 | | |
1062 | 0 | if (dv->flags.length) { |
1063 | 0 | fr_dbuff_in(&vsa_length_field, (uint8_t)(hdr_len + slen)); |
1064 | 0 | } |
1065 | |
|
1066 | 0 | fr_dbuff_in(&length_field, (uint8_t) fr_dbuff_used(&work_dbuff)); |
1067 | |
|
1068 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), 6 + hdr_len, "header vsa"); |
1069 | |
|
1070 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1071 | 0 | } |
1072 | | |
1073 | | /** Encode a WiMAX attribute |
1074 | | * |
1075 | | */ |
1076 | | static ssize_t encode_wimax(fr_dbuff_t *dbuff, |
1077 | | fr_da_stack_t *da_stack, unsigned int depth, |
1078 | | fr_dcursor_t *cursor, void *encode_ctx) |
1079 | 0 | { |
1080 | 0 | ssize_t slen; |
1081 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1082 | 0 | fr_dbuff_marker_t hdr, length_field, vsa_length_field; |
1083 | 0 | fr_dict_attr_t const *dv; |
1084 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
1085 | |
|
1086 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1087 | |
|
1088 | 0 | PAIR_VERIFY(vp); |
1089 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1090 | |
|
1091 | 0 | dv = da_stack->da[depth++]; |
1092 | |
|
1093 | 0 | if (dv->type != FR_TYPE_VENDOR) { |
1094 | 0 | fr_strerror_const("Expected Vendor"); |
1095 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1096 | 0 | } |
1097 | | |
1098 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1099 | | |
1100 | | /* |
1101 | | * Build the Vendor-Specific header |
1102 | | */ |
1103 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, FR_VENDOR_SPECIFIC); |
1104 | 0 | fr_dbuff_marker(&length_field, &work_dbuff); |
1105 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x09); |
1106 | | |
1107 | 0 | FR_DBUFF_IN_RETURN(&work_dbuff, (uint32_t) dv->attr); |
1108 | | |
1109 | | /* |
1110 | | * Encode the first attribute |
1111 | | */ |
1112 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)da_stack->da[depth]->attr); |
1113 | | |
1114 | 0 | fr_dbuff_marker(&vsa_length_field, &work_dbuff); |
1115 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x03, 0x00); /* length + continuation, both may be overwritten later */ |
1116 | | |
1117 | | /* |
1118 | | * We don't bound the size of work_dbuff; it can use more than UINT8_MAX bytes |
1119 | | * because of the "continuation" byte. |
1120 | | */ |
1121 | 0 | slen = encode_value(&work_dbuff, da_stack, depth, cursor, encode_ctx); |
1122 | 0 | if (slen <= 0) return slen; |
1123 | | |
1124 | | /* |
1125 | | * There may be more than 253 octets of data encoded in |
1126 | | * the attribute. If so, move the data up in the packet, |
1127 | | * and copy the existing header over. Set the "C" flag |
1128 | | * ONLY after copying the rest of the data. |
1129 | | * |
1130 | | * Note that we do NOT check 'slen' here, as it's only |
1131 | | * the size of the sub-sub attribute, and doesn't include |
1132 | | * the RADIUS attribute header, or Vendor-ID. |
1133 | | */ |
1134 | 0 | if (fr_dbuff_used(&work_dbuff) > UINT8_MAX) { |
1135 | 0 | slen = attr_fragment(&work_dbuff, (size_t)slen, &hdr, 9, 8, 7); |
1136 | 0 | if (slen <= 0) return slen; |
1137 | | |
1138 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1139 | 0 | } |
1140 | | |
1141 | 0 | fr_dbuff_in_bytes(&vsa_length_field, (uint8_t) (fr_dbuff_used(&work_dbuff) - 6)); |
1142 | 0 | fr_dbuff_in_bytes(&length_field, (uint8_t) fr_dbuff_used(&work_dbuff)); |
1143 | |
|
1144 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&hdr), 9, "header wimax"); |
1145 | |
|
1146 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1147 | 0 | } |
1148 | | |
1149 | | static ssize_t encode_vendor(fr_dbuff_t *dbuff, |
1150 | | fr_da_stack_t *da_stack, unsigned int depth, |
1151 | | fr_dcursor_t *cursor, void *encode_ctx) |
1152 | 0 | { |
1153 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
1154 | 0 | ssize_t slen; |
1155 | 0 | fr_pair_t *vp; |
1156 | 0 | fr_dict_vendor_t const *dv; |
1157 | 0 | fr_dcursor_t child_cursor; |
1158 | 0 | fr_dbuff_t work_dbuff; |
1159 | |
|
1160 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1161 | |
|
1162 | 0 | if (da->type != FR_TYPE_VENDOR) { |
1163 | 0 | fr_strerror_printf("%s: Expected type \"vendor\" got \"%s\"", __FUNCTION__, |
1164 | 0 | fr_type_to_str(da->type)); |
1165 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1166 | 0 | } |
1167 | | |
1168 | 0 | dv = fr_dict_vendor_by_da(da_stack->da[depth]); |
1169 | | |
1170 | | /* |
1171 | | * Flat hierarchy, encode one attribute at a time. |
1172 | | * |
1173 | | * Note that there's no attempt to encode multiple VSAs |
1174 | | * into one attribute. We can add that back as a flag, |
1175 | | * once all of the nested attribute conversion has been |
1176 | | * done. |
1177 | | */ |
1178 | 0 | if (da_stack->da[depth + 1]) { |
1179 | 0 | if (dv && dv->continuation) { |
1180 | 0 | return encode_wimax(dbuff, da_stack, depth, cursor, encode_ctx); |
1181 | 0 | } |
1182 | | |
1183 | 0 | return encode_vendor_attr(dbuff, da_stack, depth, cursor, encode_ctx); |
1184 | 0 | } |
1185 | | |
1186 | | /* |
1187 | | * Loop over the children of this attribute of type Vendor. |
1188 | | */ |
1189 | 0 | vp = fr_dcursor_current(cursor); |
1190 | 0 | fr_assert(vp->da == da); |
1191 | 0 | work_dbuff = FR_DBUFF(dbuff); |
1192 | |
|
1193 | 0 | fr_pair_dcursor_child_iter_init(&child_cursor, &vp->vp_group, cursor); |
1194 | 0 | while ((vp = fr_dcursor_current(&child_cursor)) != NULL) { |
1195 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
1196 | |
|
1197 | 0 | if (dv && dv->continuation) { |
1198 | 0 | slen = encode_wimax(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
1199 | 0 | } else { |
1200 | 0 | slen = encode_vendor_attr(&work_dbuff, da_stack, depth, &child_cursor, encode_ctx); |
1201 | 0 | } |
1202 | 0 | if (slen < 0) { |
1203 | 0 | if (slen == PAIR_ENCODE_SKIPPED) continue; |
1204 | 0 | return slen; |
1205 | 0 | } |
1206 | 0 | } |
1207 | | |
1208 | 0 | vp = fr_dcursor_next(cursor); |
1209 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1210 | |
|
1211 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1212 | 0 | } |
1213 | | |
1214 | | /** Encode a Vendor-Specific attribute |
1215 | | * |
1216 | | */ |
1217 | | static ssize_t encode_vsa(fr_dbuff_t *dbuff, |
1218 | | fr_da_stack_t *da_stack, unsigned int depth, |
1219 | | fr_dcursor_t *cursor, void *encode_ctx) |
1220 | 0 | { |
1221 | 0 | ssize_t slen; |
1222 | 0 | fr_pair_t *vp; |
1223 | 0 | fr_dcursor_t child_cursor; |
1224 | 0 | fr_dict_attr_t const *da = da_stack->da[depth]; |
1225 | 0 | fr_dbuff_t work_dbuff; |
1226 | |
|
1227 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1228 | |
|
1229 | 0 | if (da->type != FR_TYPE_VSA) { |
1230 | 0 | fr_strerror_printf("%s: Expected type \"vsa\" got \"%s\"", __FUNCTION__, |
1231 | 0 | fr_type_to_str(da->type)); |
1232 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1233 | 0 | } |
1234 | | |
1235 | | /* |
1236 | | * Loop over the contents of Vendor-Specific, each of |
1237 | | * which MUST be of type FR_TYPE_VENDOR. |
1238 | | */ |
1239 | 0 | if (da_stack->da[depth + 1]) { |
1240 | 0 | return encode_vendor(dbuff, da_stack, depth + 1, cursor, encode_ctx); |
1241 | 0 | } |
1242 | | |
1243 | 0 | work_dbuff = FR_DBUFF(dbuff); |
1244 | |
|
1245 | 0 | vp = fr_dcursor_current(cursor); |
1246 | 0 | if (vp->da != da_stack->da[depth]) { |
1247 | 0 | fr_strerror_printf("%s: Can't encode empty Vendor-Specific", __FUNCTION__); |
1248 | 0 | return PAIR_ENCODE_SKIPPED; |
1249 | 0 | } |
1250 | | |
1251 | | /* |
1252 | | * Loop over the children of this Vendor-Specific |
1253 | | * attribute. |
1254 | | */ |
1255 | 0 | fr_pair_dcursor_child_iter_init(&child_cursor, &vp->vp_group, cursor); |
1256 | 0 | while ((vp = fr_dcursor_current(&child_cursor)) != NULL) { |
1257 | 0 | fr_proto_da_stack_build(da_stack, vp->da); |
1258 | |
|
1259 | 0 | fr_assert(da_stack->da[depth + 1]->type == FR_TYPE_VENDOR); |
1260 | |
|
1261 | 0 | slen = encode_vendor(&work_dbuff, da_stack, depth + 1, &child_cursor, encode_ctx); |
1262 | 0 | if (slen < 0) { |
1263 | 0 | if (slen == PAIR_ENCODE_SKIPPED) continue; |
1264 | 0 | return slen; |
1265 | 0 | } |
1266 | 0 | } |
1267 | | |
1268 | | /* |
1269 | | * Fix up the da stack, and return the data we've encoded. |
1270 | | */ |
1271 | 0 | vp = fr_dcursor_next(cursor); |
1272 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1273 | |
|
1274 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_start(&work_dbuff), 6, "header vsa"); |
1275 | |
|
1276 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1277 | 0 | } |
1278 | | |
1279 | | /** Encode NAS-Filter-Rule |
1280 | | * |
1281 | | * Concatenating the string attributes together, separated by a 0x00 byte, |
1282 | | */ |
1283 | | static ssize_t encode_nas_filter_rule(fr_dbuff_t *dbuff, |
1284 | | fr_da_stack_t *da_stack, NDEBUG_UNUSED unsigned int depth, |
1285 | | fr_dcursor_t *cursor, UNUSED void *encode_ctx) |
1286 | 0 | { |
1287 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1288 | 0 | fr_dbuff_marker_t hdr, frag_hdr; |
1289 | 0 | fr_pair_t *vp = fr_dcursor_current(cursor); |
1290 | 0 | size_t attr_len = 2; |
1291 | |
|
1292 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1293 | |
|
1294 | 0 | fr_assert(vp); |
1295 | 0 | fr_assert(vp->da); |
1296 | |
|
1297 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1298 | 0 | fr_dbuff_marker(&frag_hdr, &work_dbuff); |
1299 | 0 | fr_dbuff_advance(&hdr, 1); |
1300 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 0x00); |
1301 | | |
1302 | 0 | fr_assert(vp->da == attr_nas_filter_rule); |
1303 | |
|
1304 | 0 | while (true) { |
1305 | 0 | size_t data_len = vp->vp_length; |
1306 | 0 | size_t frag_len; |
1307 | 0 | char const *p = vp->vp_strvalue; |
1308 | | |
1309 | | /* |
1310 | | * Keep encoding this attribute until it's done. |
1311 | | */ |
1312 | 0 | while (data_len > 0) { |
1313 | 0 | frag_len = data_len; |
1314 | | |
1315 | | /* |
1316 | | * This fragment doesn't overflow the |
1317 | | * attribute. Copy it over, update the |
1318 | | * length, but leave the marker at the |
1319 | | * current header. |
1320 | | */ |
1321 | 0 | if ((attr_len + frag_len) <= UINT8_MAX) { |
1322 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, p, frag_len); |
1323 | 0 | attr_len += frag_len; |
1324 | |
|
1325 | 0 | fr_dbuff_set(&frag_hdr, &hdr); |
1326 | 0 | fr_dbuff_in(&frag_hdr, (uint8_t) attr_len); /* there's no fr_dbuff_in_no_advance() */ |
1327 | 0 | break; |
1328 | 0 | } |
1329 | | |
1330 | | /* |
1331 | | * This fragment overflows the attribute. |
1332 | | * Copy the fragment in, and create a new |
1333 | | * attribute header. |
1334 | | */ |
1335 | 0 | frag_len = UINT8_MAX - attr_len; |
1336 | 0 | FR_DBUFF_IN_MEMCPY_RETURN(&work_dbuff, p, frag_len); |
1337 | 0 | fr_dbuff_in(&hdr, (uint8_t) UINT8_MAX); |
1338 | |
|
1339 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1340 | 0 | fr_dbuff_advance(&hdr, 1); |
1341 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 0x02); |
1342 | 0 | attr_len = 2; |
1343 | |
|
1344 | 0 | p += frag_len; |
1345 | 0 | data_len -= frag_len; |
1346 | 0 | } |
1347 | | |
1348 | | /* |
1349 | | * If we have nothing more to do here, then stop. |
1350 | | */ |
1351 | 0 | vp = fr_dcursor_next(cursor); |
1352 | 0 | if (!vp || (vp->da != attr_nas_filter_rule)) { |
1353 | 0 | break; |
1354 | 0 | } |
1355 | | |
1356 | | /* |
1357 | | * We have to add a zero byte. If it doesn't |
1358 | | * overflow the current attribute, then just add |
1359 | | * it in. |
1360 | | */ |
1361 | 0 | if (attr_len < UINT8_MAX) { |
1362 | 0 | attr_len++; |
1363 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, 0x00); |
1364 | | |
1365 | 0 | fr_dbuff_set(&frag_hdr, &hdr); |
1366 | 0 | fr_dbuff_in(&frag_hdr, (uint8_t) attr_len); /* there's no fr_dbuff_in_no_advance() */ |
1367 | 0 | continue; |
1368 | 0 | } |
1369 | | |
1370 | | /* |
1371 | | * The zero byte causes the current attribute to |
1372 | | * overflow. Create a new header with the zero |
1373 | | * byte already populated, and keep going. |
1374 | | */ |
1375 | 0 | fr_dbuff_marker(&hdr, &work_dbuff); |
1376 | 0 | fr_dbuff_advance(&hdr, 1); |
1377 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 0x00, 0x00); |
1378 | 0 | attr_len = 3; |
1379 | 0 | } |
1380 | | |
1381 | 0 | vp = fr_dcursor_current(cursor); |
1382 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1383 | |
|
1384 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1385 | 0 | } |
1386 | | |
1387 | | /** Encode an RFC standard attribute 1..255 |
1388 | | * |
1389 | | * This function is not the same as encode_child(), because this |
1390 | | * one treats some "top level" attributes as special. e.g. |
1391 | | * Message-Authenticator. |
1392 | | */ |
1393 | | static ssize_t encode_rfc(fr_dbuff_t *dbuff, fr_da_stack_t *da_stack, unsigned int depth, |
1394 | | fr_dcursor_t *cursor, void *encode_ctx) |
1395 | 0 | { |
1396 | 0 | fr_pair_t const *vp = fr_dcursor_current(cursor); |
1397 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1398 | 0 | fr_dbuff_marker_t start; |
1399 | |
|
1400 | 0 | fr_dbuff_marker(&start, &work_dbuff); |
1401 | | |
1402 | | /* |
1403 | | * Sanity checks |
1404 | | */ |
1405 | 0 | PAIR_VERIFY(vp); |
1406 | 0 | FR_PROTO_STACK_PRINT(da_stack, depth); |
1407 | |
|
1408 | 0 | switch (da_stack->da[depth]->type) { |
1409 | 0 | case FR_TYPE_TLV: |
1410 | 0 | case FR_TYPE_VSA: |
1411 | 0 | case FR_TYPE_VENDOR: |
1412 | | /* FR_TYPE_STRUCT is actually allowed... */ |
1413 | 0 | fr_strerror_printf("%s: Expected leaf type got \"%s\"", __FUNCTION__, |
1414 | 0 | fr_type_to_str(da_stack->da[depth]->type)); |
1415 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1416 | | |
1417 | 0 | default: |
1418 | | /* |
1419 | | * Attribute 0 is fine as a TLV leaf, or VSA, but not |
1420 | | * in the original standards space. |
1421 | | */ |
1422 | 0 | if (((fr_dict_vendor_num_by_da(da_stack->da[depth]) == 0) && (da_stack->da[depth]->attr == 0)) || |
1423 | 0 | (da_stack->da[depth]->attr > UINT8_MAX)) { |
1424 | 0 | fr_strerror_printf("%s: Called with non-standard attribute %u", __FUNCTION__, vp->da->attr); |
1425 | 0 | return PAIR_ENCODE_SKIPPED; |
1426 | 0 | } |
1427 | 0 | break; |
1428 | 0 | } |
1429 | | |
1430 | | /* |
1431 | | * Only CUI is allowed to have zero length. |
1432 | | * Thank you, WiMAX! |
1433 | | */ |
1434 | 0 | if ((vp->da == attr_chargeable_user_identity) && (vp->vp_length == 0)) { |
1435 | 0 | fr_dbuff_in_bytes(&work_dbuff, (uint8_t)vp->da->attr, 0x02); |
1436 | |
|
1437 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&start), 2, "header rfc"); |
1438 | |
|
1439 | 0 | vp = fr_dcursor_next(cursor); |
1440 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1441 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1442 | 0 | } |
1443 | | |
1444 | | /* |
1445 | | * Message-Authenticator is hard-coded. |
1446 | | */ |
1447 | 0 | if (vp->da == attr_message_authenticator) { |
1448 | 0 | FR_DBUFF_IN_BYTES_RETURN(&work_dbuff, (uint8_t)vp->da->attr, 18); |
1449 | 0 | FR_DBUFF_MEMSET_RETURN(&work_dbuff, 0, RADIUS_MESSAGE_AUTHENTICATOR_LENGTH); |
1450 | | |
1451 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&start) + 2, RADIUS_MESSAGE_AUTHENTICATOR_LENGTH, |
1452 | 0 | "message-authenticator"); |
1453 | 0 | FR_PROTO_HEX_DUMP(fr_dbuff_current(&start), 2, "header rfc"); |
1454 | |
|
1455 | 0 | vp = fr_dcursor_next(cursor); |
1456 | 0 | fr_proto_da_stack_build(da_stack, vp ? vp->da : NULL); |
1457 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1458 | 0 | } |
1459 | | |
1460 | | /* |
1461 | | * NAS-Filter-Rule has a stupid format in order to save |
1462 | | * one byte per attribute. |
1463 | | */ |
1464 | 0 | if (vp->da == attr_nas_filter_rule) { |
1465 | 0 | return encode_nas_filter_rule(dbuff, da_stack, depth, cursor, encode_ctx); |
1466 | 0 | } |
1467 | | |
1468 | | /* |
1469 | | * Once we've checked for various top-level magic, RFC attributes are just TLVs. |
1470 | | */ |
1471 | 0 | return encode_child(dbuff, da_stack, depth, cursor, encode_ctx); |
1472 | 0 | } |
1473 | | |
1474 | | /** Encode a data structure into a RADIUS attribute |
1475 | | * |
1476 | | * This is the main entry point into the encoder. It sets up the encoder array |
1477 | | * we use for tracking our TLV/VSA nesting and then calls the appropriate |
1478 | | * dispatch function. |
1479 | | * |
1480 | | * @param[out] dbuff Where to write encoded data. |
1481 | | * @param[in] cursor Specifying attribute to encode. |
1482 | | * @param[in] encode_ctx Additional data such as the shared secret to use. |
1483 | | * @return |
1484 | | * - >0 The number of bytes written to out. |
1485 | | * - 0 Nothing to encode (or attribute skipped). |
1486 | | * - <0 an error occurred. |
1487 | | */ |
1488 | | ssize_t fr_radius_encode_pair(fr_dbuff_t *dbuff, fr_dcursor_t *cursor, void *encode_ctx) |
1489 | 0 | { |
1490 | 0 | fr_pair_t const *vp; |
1491 | 0 | ssize_t slen; |
1492 | 0 | fr_dbuff_t work_dbuff = FR_DBUFF(dbuff); |
1493 | |
|
1494 | 0 | fr_da_stack_t da_stack; |
1495 | 0 | fr_dict_attr_t const *da = NULL; |
1496 | |
|
1497 | 0 | if (!cursor) return PAIR_ENCODE_FATAL_ERROR; |
1498 | | |
1499 | 0 | vp = fr_dcursor_current(cursor); |
1500 | 0 | if (!vp) return 0; |
1501 | | |
1502 | 0 | PAIR_VERIFY(vp); |
1503 | |
|
1504 | 0 | if (vp->da->depth > FR_DICT_MAX_TLV_STACK) { |
1505 | 0 | fr_strerror_printf("%s: Attribute depth %i exceeds maximum nesting depth %i", |
1506 | 0 | __FUNCTION__, vp->da->depth, FR_DICT_MAX_TLV_STACK); |
1507 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1508 | 0 | } |
1509 | | |
1510 | | /* |
1511 | | * Tags are *top-level*, and are never nested. |
1512 | | */ |
1513 | 0 | if (vp->vp_type == FR_TYPE_GROUP) { |
1514 | 0 | fr_radius_ctx_t *packet_ctx = encode_ctx; |
1515 | |
|
1516 | 0 | if (!vp->da->flags.internal || |
1517 | 0 | !((vp->da->attr > FR_TAG_BASE) && (vp->da->attr < (FR_TAG_BASE + 0x20)))) { |
1518 | 0 | fr_dcursor_next(cursor); |
1519 | 0 | return PAIR_ENCODE_SKIPPED; |
1520 | 0 | } |
1521 | | |
1522 | 0 | packet_ctx->tag = vp->da->attr - FR_TAG_BASE; |
1523 | 0 | fr_assert(packet_ctx->tag > 0); |
1524 | 0 | fr_assert(packet_ctx->tag < 0x20); |
1525 | | |
1526 | | // recurse to encode the children of this attribute |
1527 | 0 | slen = encode_tags(&work_dbuff, &vp->vp_group, encode_ctx); |
1528 | 0 | packet_ctx->tag = 0; |
1529 | 0 | if (slen < 0) return slen; |
1530 | | |
1531 | 0 | fr_dcursor_next(cursor); /* skip the tag attribute */ |
1532 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1533 | 0 | } |
1534 | | |
1535 | | /* |
1536 | | * Check for zero-length attributes. |
1537 | | */ |
1538 | 0 | switch (vp->vp_type) { |
1539 | 0 | default: |
1540 | 0 | break; |
1541 | | |
1542 | | /* |
1543 | | * Only variable length data types can be |
1544 | | * variable sized. All others have fixed size. |
1545 | | */ |
1546 | 0 | case FR_TYPE_STRING: |
1547 | 0 | case FR_TYPE_OCTETS: |
1548 | | /* |
1549 | | * Zero-length strings are allowed for CUI |
1550 | | * (thanks WiMAX!), and for |
1551 | | * Message-Authenticator, because we will |
1552 | | * automagically generate that one ourselves. |
1553 | | */ |
1554 | 0 | if ((vp->vp_length == 0) && |
1555 | 0 | (vp->da != attr_chargeable_user_identity) && |
1556 | 0 | (vp->da != attr_message_authenticator)) { |
1557 | 0 | fr_dcursor_next(cursor); |
1558 | 0 | fr_strerror_const("Zero length string attributes not allowed"); |
1559 | 0 | return PAIR_ENCODE_SKIPPED; |
1560 | 0 | } |
1561 | 0 | break; |
1562 | 0 | } |
1563 | | |
1564 | | /* |
1565 | | * Nested structures of attributes can't be longer than |
1566 | | * 255 bytes, so each call to an encode function can |
1567 | | * only use 255 bytes of buffer space at a time. |
1568 | | */ |
1569 | | |
1570 | | /* |
1571 | | * Fast path for the common case. |
1572 | | */ |
1573 | 0 | if (vp->da->parent->flags.is_root && !vp->da->flags.subtype) { |
1574 | 0 | switch (vp->vp_type) { |
1575 | 0 | case FR_TYPE_LEAF: |
1576 | 0 | da_stack.da[0] = vp->da; |
1577 | 0 | da_stack.da[1] = NULL; |
1578 | 0 | da_stack.depth = 1; |
1579 | 0 | FR_PROTO_STACK_PRINT(&da_stack, 0); |
1580 | 0 | slen = encode_rfc(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1581 | 0 | if (slen < 0) return slen; |
1582 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1583 | | |
1584 | 0 | default: |
1585 | 0 | break; |
1586 | 0 | } |
1587 | 0 | } |
1588 | | |
1589 | | /* |
1590 | | * Do more work to set up the stack for the complex case. |
1591 | | */ |
1592 | 0 | fr_proto_da_stack_build(&da_stack, vp->da); |
1593 | 0 | FR_PROTO_STACK_PRINT(&da_stack, 0); |
1594 | | |
1595 | | /* |
1596 | | * Top-level attributes get treated specially. Things |
1597 | | * like VSAs inside of extended attributes are handled |
1598 | | * inside of type-specific encoders. |
1599 | | */ |
1600 | 0 | da = da_stack.da[0]; |
1601 | 0 | switch (da->type) { |
1602 | 0 | case FR_TYPE_OCTETS: |
1603 | 0 | if (flag_concat(&da->flags)) { |
1604 | | /* |
1605 | | * Attributes like EAP-Message are marked as |
1606 | | * "concat", which means that they are fragmented |
1607 | | * using a different scheme than the "long |
1608 | | * extended" one. |
1609 | | */ |
1610 | 0 | slen = encode_concat(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1611 | 0 | if (slen < 0) return slen; |
1612 | 0 | break; |
1613 | 0 | } |
1614 | 0 | FALL_THROUGH; |
1615 | | |
1616 | 0 | default: |
1617 | 0 | slen = encode_rfc(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1618 | 0 | if (slen < 0) return slen; |
1619 | 0 | break; |
1620 | | |
1621 | 0 | case FR_TYPE_VSA: |
1622 | 0 | slen = encode_vsa(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1623 | 0 | if (slen < 0) return slen; |
1624 | 0 | break; |
1625 | | |
1626 | 0 | case FR_TYPE_TLV: |
1627 | 0 | if (!flag_extended(&da->flags)) { |
1628 | 0 | slen = encode_child(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1629 | |
|
1630 | 0 | } else if (vp->da != da) { |
1631 | 0 | fr_strerror_printf("extended attributes must be nested"); |
1632 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1633 | |
|
1634 | 0 | } else { |
1635 | 0 | slen = encode_extended_nested(&work_dbuff, &da_stack, 0, cursor, encode_ctx); |
1636 | 0 | } |
1637 | 0 | if (slen < 0) return slen; |
1638 | 0 | break; |
1639 | | |
1640 | 0 | case FR_TYPE_NULL: |
1641 | 0 | case FR_TYPE_VENDOR: |
1642 | 0 | case FR_TYPE_MAX: |
1643 | 0 | fr_strerror_printf("%s: Cannot encode attribute %s", __FUNCTION__, vp->da->name); |
1644 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1645 | 0 | } |
1646 | | |
1647 | | /* |
1648 | | * We couldn't do it, so we didn't do anything. |
1649 | | */ |
1650 | 0 | if (fr_dcursor_current(cursor) == vp) { |
1651 | 0 | fr_strerror_printf("%s: Nested attribute structure too large to encode", __FUNCTION__); |
1652 | 0 | return PAIR_ENCODE_FATAL_ERROR; |
1653 | 0 | } |
1654 | | |
1655 | 0 | return fr_dbuff_set(dbuff, &work_dbuff); |
1656 | 0 | } |
1657 | | |
1658 | | static int _test_ctx_free(UNUSED fr_radius_ctx_t *ctx) |
1659 | 0 | { |
1660 | 0 | fr_radius_free(); |
1661 | |
|
1662 | 0 | return 0; |
1663 | 0 | } |
1664 | | |
1665 | | static int encode_test_ctx(void **out, TALLOC_CTX *ctx) |
1666 | 0 | { |
1667 | 0 | static uint8_t vector[RADIUS_AUTH_VECTOR_LENGTH] = { |
1668 | 0 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, |
1669 | 0 | 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; |
1670 | |
|
1671 | 0 | fr_radius_ctx_t *test_ctx; |
1672 | |
|
1673 | 0 | if (fr_radius_init() < 0) return -1; |
1674 | | |
1675 | 0 | test_ctx = talloc_zero(ctx, fr_radius_ctx_t); |
1676 | 0 | if (!test_ctx) return -1; |
1677 | | |
1678 | 0 | test_ctx->secret = talloc_strdup(test_ctx, "testing123"); |
1679 | 0 | memcpy(test_ctx->vector, vector, sizeof(test_ctx->vector)); |
1680 | 0 | test_ctx->rand_ctx.a = 6809; |
1681 | 0 | test_ctx->rand_ctx.b = 2112; |
1682 | 0 | talloc_set_destructor(test_ctx, _test_ctx_free); |
1683 | |
|
1684 | 0 | *out = test_ctx; |
1685 | |
|
1686 | 0 | return 0; |
1687 | 0 | } |
1688 | | |
1689 | | static ssize_t fr_radius_encode_proto(UNUSED TALLOC_CTX *ctx, fr_pair_list_t *vps, uint8_t *data, size_t data_len, void *proto_ctx) |
1690 | 0 | { |
1691 | 0 | fr_radius_ctx_t *test_ctx = talloc_get_type_abort(proto_ctx, fr_radius_ctx_t); |
1692 | 0 | int packet_type = FR_RADIUS_CODE_ACCESS_REQUEST; |
1693 | 0 | fr_pair_t *vp; |
1694 | 0 | ssize_t slen; |
1695 | |
|
1696 | 0 | vp = fr_pair_find_by_da(vps, NULL, attr_packet_type); |
1697 | 0 | if (vp) packet_type = vp->vp_uint32; |
1698 | |
|
1699 | 0 | if ((packet_type == FR_RADIUS_CODE_ACCESS_REQUEST) || (packet_type == FR_RADIUS_CODE_STATUS_SERVER)) { |
1700 | 0 | vp = fr_pair_find_by_da(vps, NULL, attr_packet_authentication_vector); |
1701 | 0 | if (vp && (vp->vp_length == RADIUS_AUTH_VECTOR_LENGTH)) { |
1702 | 0 | memcpy(data + 4, vp->vp_octets, RADIUS_AUTH_VECTOR_LENGTH); |
1703 | 0 | } else { |
1704 | 0 | int i; |
1705 | |
|
1706 | 0 | for (i = 0; i < RADIUS_AUTH_VECTOR_LENGTH; i++) { |
1707 | 0 | data[4 + i] = fr_fast_rand(&test_ctx->rand_ctx); |
1708 | 0 | } |
1709 | 0 | } |
1710 | 0 | } |
1711 | | |
1712 | | /* |
1713 | | * @todo - pass in test_ctx to this function, so that we |
1714 | | * can leverage a consistent random number generator. |
1715 | | */ |
1716 | 0 | slen = fr_radius_encode(data, data_len, NULL, test_ctx->secret, talloc_array_length(test_ctx->secret) - 1, |
1717 | 0 | packet_type, 0, vps); |
1718 | 0 | if (slen <= 0) return slen; |
1719 | | |
1720 | 0 | if (fr_radius_sign(data, NULL, (uint8_t const *) test_ctx->secret, talloc_array_length(test_ctx->secret) - 1) < 0) { |
1721 | 0 | return -1; |
1722 | 0 | } |
1723 | | |
1724 | 0 | return slen; |
1725 | 0 | } |
1726 | | |
1727 | | /* |
1728 | | * No one else should be using this. |
1729 | | */ |
1730 | | extern void *fr_radius_next_encodable(fr_dlist_head_t *list, void *to_eval, void *uctx); |
1731 | | |
1732 | | /* |
1733 | | * Test points |
1734 | | */ |
1735 | | extern fr_test_point_pair_encode_t radius_tp_encode_pair; |
1736 | | fr_test_point_pair_encode_t radius_tp_encode_pair = { |
1737 | | .test_ctx = encode_test_ctx, |
1738 | | .func = fr_radius_encode_pair, |
1739 | | .next_encodable = fr_radius_next_encodable, |
1740 | | }; |
1741 | | |
1742 | | |
1743 | | extern fr_test_point_proto_encode_t radius_tp_encode_proto; |
1744 | | fr_test_point_proto_encode_t radius_tp_encode_proto = { |
1745 | | .test_ctx = encode_test_ctx, |
1746 | | .func = fr_radius_encode_proto |
1747 | | }; |