/src/ntp-dev/ntpd/ntp_proto.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * ntp_proto.c - NTP version 4 protocol machinery |
3 | | * |
4 | | * ATTENTION: Get approval from Harlan on all changes to this file! |
5 | | * (Harlan will be discussing these changes with Dave Mills.) |
6 | | * |
7 | | */ |
8 | | #ifdef HAVE_CONFIG_H |
9 | | #include <config.h> |
10 | | #endif |
11 | | |
12 | | #include "ntpd.h" |
13 | | #include "ntp_stdlib.h" |
14 | | #include "ntp_unixtime.h" |
15 | | #include "ntp_control.h" |
16 | | #include "ntp_string.h" |
17 | | #include "ntp_leapsec.h" |
18 | | #include "refidsmear.h" |
19 | | #include "lib_strbuf.h" |
20 | | |
21 | | #include <stdio.h> |
22 | | #ifdef HAVE_LIBSCF_H |
23 | | #include <libscf.h> |
24 | | #endif |
25 | | #ifdef HAVE_UNISTD_H |
26 | | #include <unistd.h> |
27 | | #endif |
28 | | |
29 | | /* [Bug 3031] define automatic broadcastdelay cutoff preset */ |
30 | | #ifndef BDELAY_DEFAULT |
31 | 1 | # define BDELAY_DEFAULT (-0.050) |
32 | | #endif |
33 | | |
34 | | /* |
35 | | * This macro defines the authentication state. If x is 1 authentication |
36 | | * is required; otherwise it is optional. |
37 | | */ |
38 | 195 | #define AUTH(x, y) ((x) ? (y) == AUTH_OK \ |
39 | 195 | : (y) == AUTH_OK || (y) == AUTH_NONE) |
40 | | |
41 | | typedef enum |
42 | | auth_state { |
43 | | AUTH_UNKNOWN = -1, /* Unknown */ |
44 | | AUTH_NONE, /* authentication not required */ |
45 | | AUTH_OK, /* authentication OK */ |
46 | | AUTH_ERROR, /* authentication error */ |
47 | | AUTH_CRYPTO /* crypto_NAK */ |
48 | | } auth_code; |
49 | | |
50 | | /* |
51 | | * Set up Kiss Code values |
52 | | */ |
53 | | |
54 | | typedef enum |
55 | | kiss_codes { |
56 | | NOKISS, /* No Kiss Code */ |
57 | | RATEKISS, /* Rate limit Kiss Code */ |
58 | | DENYKISS, /* Deny Kiss */ |
59 | | RSTRKISS, /* Restricted Kiss */ |
60 | | XKISS /* Experimental Kiss */ |
61 | | } kiss_code; |
62 | | |
63 | | typedef enum |
64 | | nak_error_codes { |
65 | | NONAK, /* No NAK seen */ |
66 | | INVALIDNAK, /* NAK cannot be used */ |
67 | | VALIDNAK /* NAK is valid */ |
68 | | } nak_code; |
69 | | |
70 | | /* |
71 | | * traffic shaping parameters |
72 | | */ |
73 | 0 | #define NTP_IBURST 6 /* packets in iburst */ |
74 | 0 | #define RESP_DELAY 1 /* refclock burst delay (s) */ |
75 | | |
76 | | /* |
77 | | * pool soliciting restriction duration (s) |
78 | | */ |
79 | 0 | #define POOL_SOLICIT_WINDOW 8 |
80 | | |
81 | | /* |
82 | | * peer_select groups statistics for a peer used by clock_select() and |
83 | | * clock_cluster(). |
84 | | */ |
85 | | typedef struct peer_select_tag { |
86 | | struct peer * peer; |
87 | | double synch; /* sync distance */ |
88 | | double error; /* jitter */ |
89 | | double seljit; /* selection jitter */ |
90 | | } peer_select; |
91 | | |
92 | | /* |
93 | | * System variables are declared here. Unless specified otherwise, all |
94 | | * times are in seconds. |
95 | | */ |
96 | | u_char sys_leap; /* system leap indicator, use set_sys_leap() to change this */ |
97 | | u_char xmt_leap; /* leap indicator sent in client requests, set up by set_sys_leap() */ |
98 | | u_char sys_stratum; /* system stratum */ |
99 | | s_char sys_precision; /* local clock precision (log2 s) */ |
100 | | double sys_rootdelay; /* roundtrip delay to primary source */ |
101 | | double sys_rootdisp; /* dispersion to primary source */ |
102 | | u_int32 sys_refid; /* reference id (network byte order) */ |
103 | | l_fp sys_reftime; /* last update time */ |
104 | | struct peer *sys_peer; /* current peer */ |
105 | | |
106 | | #ifdef LEAP_SMEAR |
107 | | struct leap_smear_info leap_smear; |
108 | | #endif |
109 | | int leap_sec_in_progress; |
110 | | |
111 | | /* |
112 | | * Rate controls. Leaky buckets are used to throttle the packet |
113 | | * transmission rates in order to protect busy servers such as at NIST |
114 | | * and USNO. There is a counter for each association and another for KoD |
115 | | * packets. The association counter decrements each second, but not |
116 | | * below zero. Each time a packet is sent the counter is incremented by |
117 | | * a configurable value representing the average interval between |
118 | | * packets. A packet is delayed as long as the counter is greater than |
119 | | * zero. Note this does not affect the time value computations. |
120 | | */ |
121 | | /* |
122 | | * Nonspecified system state variables |
123 | | */ |
124 | | int sys_bclient; /* broadcast client enable */ |
125 | | double sys_bdelay; /* broadcast client default delay */ |
126 | | int sys_authenticate; /* requre authentication for config */ |
127 | | l_fp sys_authdelay; /* authentication delay */ |
128 | | double sys_offset; /* current local clock offset */ |
129 | | double sys_mindisp = MINDISPERSE; /* minimum distance (s) */ |
130 | | double sys_maxdist = MAXDISTANCE; /* selection threshold */ |
131 | | double sys_jitter; /* system jitter */ |
132 | | u_long sys_epoch; /* last clock update time */ |
133 | | static double sys_clockhop; /* clockhop threshold */ |
134 | | static int leap_vote_ins; /* leap consensus for insert */ |
135 | | static int leap_vote_del; /* leap consensus for delete */ |
136 | | keyid_t sys_private; /* private value for session seed */ |
137 | | int sys_manycastserver; /* respond to manycast client pkts */ |
138 | | int ntp_mode7; /* respond to ntpdc (mode7) */ |
139 | | int peer_ntpdate; /* active peers in ntpdate mode */ |
140 | | int sys_survivors; /* truest of the truechimers */ |
141 | | char *sys_ident = NULL; /* identity scheme */ |
142 | | |
143 | | /* |
144 | | * TOS and multicast mapping stuff |
145 | | */ |
146 | | int sys_floor = 0; /* cluster stratum floor */ |
147 | | u_char sys_bcpollbstep = 0; /* Broadcast Poll backstep gate */ |
148 | | int sys_ceiling = STRATUM_UNSPEC - 1; /* cluster stratum ceiling */ |
149 | | int sys_minsane = 1; /* minimum candidates */ |
150 | | int sys_minclock = NTP_MINCLOCK; /* minimum candidates */ |
151 | | int sys_maxclock = NTP_MAXCLOCK; /* maximum candidates */ |
152 | | int sys_cohort = 0; /* cohort switch */ |
153 | | int sys_orphan = STRATUM_UNSPEC + 1; /* orphan stratum */ |
154 | | int sys_orphwait = NTP_ORPHWAIT; /* orphan wait */ |
155 | | int sys_beacon = BEACON; /* manycast beacon interval */ |
156 | | u_int sys_ttlmax; /* max ttl mapping vector index */ |
157 | | u_char sys_ttl[MAX_TTL]; /* ttl mapping vector */ |
158 | | |
159 | | /* |
160 | | * Statistics counters - first the good, then the bad |
161 | | */ |
162 | | u_long sys_stattime; /* elapsed time */ |
163 | | u_long sys_received; /* packets received */ |
164 | | u_long sys_processed; /* packets for this host */ |
165 | | u_long sys_newversion; /* current version */ |
166 | | u_long sys_oldversion; /* old version */ |
167 | | u_long sys_restricted; /* access denied */ |
168 | | u_long sys_badlength; /* bad length or format */ |
169 | | u_long sys_badauth; /* bad authentication */ |
170 | | u_long sys_declined; /* declined */ |
171 | | u_long sys_limitrejected; /* rate exceeded */ |
172 | | u_long sys_kodsent; /* KoD sent */ |
173 | | |
174 | | /* |
175 | | * Mechanism knobs: how soon do we peer_clear() or unpeer()? |
176 | | * |
177 | | * The default way is "on-receipt". If this was a packet from a |
178 | | * well-behaved source, on-receipt will offer the fastest recovery. |
179 | | * If this was from a DoS attack, the default way makes it easier |
180 | | * for a bad-guy to DoS us. So look and see what bites you harder |
181 | | * and choose according to your environment. |
182 | | */ |
183 | | int peer_clear_digest_early = 1; /* bad digest (TEST5) and Autokey */ |
184 | | int unpeer_crypto_early = 1; /* bad crypto (TEST9) */ |
185 | | int unpeer_crypto_nak_early = 1; /* crypto_NAK (TEST5) */ |
186 | | int unpeer_digest_early = 1; /* bad digest (TEST5) */ |
187 | | |
188 | | int dynamic_interleave = DYNAMIC_INTERLEAVE; /* Bug 2978 mitigation */ |
189 | | |
190 | | int kiss_code_check(u_char hisleap, u_char hisstratum, u_char hismode, u_int32 refid); |
191 | | nak_code valid_NAK (struct peer *peer, struct recvbuf *rbufp, u_char hismode); |
192 | | static double root_distance (struct peer *); |
193 | | static void clock_combine (peer_select *, int, int); |
194 | | static void peer_xmit (struct peer *); |
195 | | static void fast_xmit (struct recvbuf *, int, keyid_t, int); |
196 | | static void pool_xmit (struct peer *); |
197 | | static void clock_update (struct peer *); |
198 | | static void measure_precision(void); |
199 | | static double measure_tick_fuzz(void); |
200 | | static int local_refid (struct peer *); |
201 | | static int peer_unfit (struct peer *); |
202 | | #ifdef AUTOKEY |
203 | | static int group_test (char *, char *); |
204 | | #endif /* AUTOKEY */ |
205 | | #ifdef WORKER |
206 | | void pool_name_resolved (int, int, void *, const char *, |
207 | | const char *, const struct addrinfo *, |
208 | | const struct addrinfo *); |
209 | | #endif /* WORKER */ |
210 | | |
211 | | const char * amtoa (int am); |
212 | | |
213 | | |
214 | | void |
215 | | set_sys_leap( |
216 | | u_char new_sys_leap |
217 | | ) |
218 | 1 | { |
219 | 1 | sys_leap = new_sys_leap; |
220 | 1 | xmt_leap = sys_leap; |
221 | | |
222 | | /* |
223 | | * Under certain conditions we send faked leap bits to clients, so |
224 | | * eventually change xmt_leap below, but never change LEAP_NOTINSYNC. |
225 | | */ |
226 | 1 | if (xmt_leap != LEAP_NOTINSYNC) { |
227 | 0 | if (leap_sec_in_progress) { |
228 | | /* always send "not sync" */ |
229 | 0 | xmt_leap = LEAP_NOTINSYNC; |
230 | 0 | } |
231 | | #ifdef LEAP_SMEAR |
232 | | else { |
233 | | /* |
234 | | * If leap smear is enabled in general we must |
235 | | * never send a leap second warning to clients, |
236 | | * so make sure we only send "in sync". |
237 | | */ |
238 | | if (leap_smear.enabled) |
239 | | xmt_leap = LEAP_NOWARNING; |
240 | | } |
241 | | #endif /* LEAP_SMEAR */ |
242 | 0 | } |
243 | 1 | } |
244 | | |
245 | | |
246 | | /* |
247 | | * Kiss Code check |
248 | | */ |
249 | | int |
250 | | kiss_code_check( |
251 | | u_char hisleap, |
252 | | u_char hisstratum, |
253 | | u_char hismode, |
254 | | u_int32 refid |
255 | | ) |
256 | 0 | { |
257 | |
|
258 | 0 | if ( hismode == MODE_SERVER |
259 | 0 | && hisleap == LEAP_NOTINSYNC |
260 | 0 | && hisstratum == STRATUM_UNSPEC) { |
261 | 0 | if(memcmp(&refid,"RATE", 4) == 0) { |
262 | 0 | return (RATEKISS); |
263 | 0 | } else if(memcmp(&refid,"DENY", 4) == 0) { |
264 | 0 | return (DENYKISS); |
265 | 0 | } else if(memcmp(&refid,"RSTR", 4) == 0) { |
266 | 0 | return (RSTRKISS); |
267 | 0 | } else if(memcmp(&refid,"X", 1) == 0) { |
268 | 0 | return (XKISS); |
269 | 0 | } |
270 | 0 | } |
271 | 0 | return (NOKISS); |
272 | 0 | } |
273 | | |
274 | | |
275 | | /* |
276 | | * Check that NAK is valid |
277 | | */ |
278 | | nak_code |
279 | | valid_NAK( |
280 | | struct peer *peer, |
281 | | struct recvbuf *rbufp, |
282 | | u_char hismode |
283 | | ) |
284 | 249 | { |
285 | 249 | int base_packet_length = MIN_V4_PKT_LEN; |
286 | 249 | int remainder_size; |
287 | 249 | struct pkt * rpkt; |
288 | 249 | int keyid; |
289 | 249 | l_fp p_org; /* origin timestamp */ |
290 | 249 | const l_fp * myorg; /* selected peer origin */ |
291 | | |
292 | | /* |
293 | | * Check to see if there is something beyond the basic packet |
294 | | */ |
295 | 249 | if (rbufp->recv_length == base_packet_length) { |
296 | 27 | return NONAK; |
297 | 27 | } |
298 | | |
299 | 222 | remainder_size = rbufp->recv_length - base_packet_length; |
300 | | /* |
301 | | * Is this a potential NAK? |
302 | | */ |
303 | 222 | if (remainder_size != 4) { |
304 | 148 | return NONAK; |
305 | 148 | } |
306 | | |
307 | | /* |
308 | | * Only server responses can contain NAK's |
309 | | */ |
310 | | |
311 | 74 | if (hismode != MODE_SERVER && |
312 | 74 | hismode != MODE_ACTIVE && |
313 | 74 | hismode != MODE_PASSIVE |
314 | 74 | ) { |
315 | 9 | return INVALIDNAK; |
316 | 9 | } |
317 | | |
318 | | /* |
319 | | * Make sure that the extra field in the packet is all zeros |
320 | | */ |
321 | 65 | rpkt = &rbufp->recv_pkt; |
322 | 65 | keyid = ntohl(((u_int32 *)rpkt)[base_packet_length / 4]); |
323 | 65 | if (keyid != 0) { |
324 | 61 | return INVALIDNAK; |
325 | 61 | } |
326 | | |
327 | | /* |
328 | | * During the first few packets of the autokey dance there will |
329 | | * not (yet) be a keyid, but in this case FLAG_SKEY is set. |
330 | | * So the NAK is invalid if either there's no peer, or |
331 | | * if the keyid is 0 and FLAG_SKEY is not set. |
332 | | */ |
333 | 4 | if (!peer || (!peer->keyid && !(peer->flags & FLAG_SKEY))) { |
334 | 4 | return INVALIDNAK; |
335 | 4 | } |
336 | | |
337 | | /* |
338 | | * The ORIGIN must match, or this cannot be a valid NAK, either. |
339 | | */ |
340 | 0 | NTOHL_FP(&rpkt->org, &p_org); |
341 | 0 | if (peer->flip > 0) |
342 | 0 | myorg = &peer->borg; |
343 | 0 | else |
344 | 0 | myorg = &peer->aorg; |
345 | |
|
346 | 0 | if (L_ISZERO(&p_org) || |
347 | 0 | L_ISZERO( myorg) || |
348 | 0 | !L_ISEQU(&p_org, myorg)) { |
349 | 0 | return INVALIDNAK; |
350 | 0 | } |
351 | | |
352 | | /* If we ever passed all that checks, we should be safe. Well, |
353 | | * as safe as we can ever be with an unauthenticated crypto-nak. |
354 | | */ |
355 | 0 | return VALIDNAK; |
356 | 0 | } |
357 | | |
358 | | |
359 | | /* |
360 | | * transmit - transmit procedure called by poll timeout |
361 | | */ |
362 | | void |
363 | | transmit( |
364 | | struct peer *peer /* peer structure pointer */ |
365 | | ) |
366 | 0 | { |
367 | 0 | u_char hpoll; |
368 | | |
369 | | /* |
370 | | * The polling state machine. There are two kinds of machines, |
371 | | * those that never expect a reply (broadcast and manycast |
372 | | * server modes) and those that do (all other modes). The dance |
373 | | * is intricate... |
374 | | */ |
375 | 0 | hpoll = peer->hpoll; |
376 | | |
377 | | /* |
378 | | * If we haven't received anything (even if unsync) since last |
379 | | * send, reset ppoll. |
380 | | */ |
381 | 0 | if (peer->outdate > peer->timelastrec && !peer->reach) |
382 | 0 | peer->ppoll = peer->maxpoll; |
383 | | |
384 | | /* |
385 | | * In broadcast mode the poll interval is never changed from |
386 | | * minpoll. |
387 | | */ |
388 | 0 | if (peer->cast_flags & (MDF_BCAST | MDF_MCAST)) { |
389 | 0 | peer->outdate = current_time; |
390 | 0 | poll_update(peer, hpoll); |
391 | 0 | if (sys_leap != LEAP_NOTINSYNC) |
392 | 0 | peer_xmit(peer); |
393 | 0 | return; |
394 | 0 | } |
395 | | |
396 | | /* |
397 | | * In manycast mode we start with unity ttl. The ttl is |
398 | | * increased by one for each poll until either sys_maxclock |
399 | | * servers have been found or the maximum ttl is reached. When |
400 | | * sys_maxclock servers are found we stop polling until one or |
401 | | * more servers have timed out or until less than sys_minclock |
402 | | * associations turn up. In this case additional better servers |
403 | | * are dragged in and preempt the existing ones. Once every |
404 | | * sys_beacon seconds we are to transmit unconditionally, but |
405 | | * this code is not quite right -- peer->unreach counts polls |
406 | | * and is being compared with sys_beacon, so the beacons happen |
407 | | * every sys_beacon polls. |
408 | | */ |
409 | 0 | if (peer->cast_flags & MDF_ACAST) { |
410 | 0 | peer->outdate = current_time; |
411 | 0 | poll_update(peer, hpoll); |
412 | 0 | if (peer->unreach > sys_beacon) { |
413 | 0 | peer->unreach = 0; |
414 | 0 | peer->ttl = 0; |
415 | 0 | peer_xmit(peer); |
416 | 0 | } else if ( sys_survivors < sys_minclock |
417 | 0 | || peer_associations < sys_maxclock) { |
418 | 0 | if (peer->ttl < sys_ttlmax) |
419 | 0 | peer->ttl++; |
420 | 0 | peer_xmit(peer); |
421 | 0 | } |
422 | 0 | peer->unreach++; |
423 | 0 | return; |
424 | 0 | } |
425 | | |
426 | | /* |
427 | | * Pool associations transmit unicast solicitations when there |
428 | | * are less than a hard limit of 2 * sys_maxclock associations, |
429 | | * and either less than sys_minclock survivors or less than |
430 | | * sys_maxclock associations. The hard limit prevents unbounded |
431 | | * growth in associations if the system clock or network quality |
432 | | * result in survivor count dipping below sys_minclock often. |
433 | | * This was observed testing with pool, where sys_maxclock == 12 |
434 | | * resulted in 60 associations without the hard limit. A |
435 | | * similar hard limit on manycastclient ephemeral associations |
436 | | * may be appropriate. |
437 | | */ |
438 | 0 | if (peer->cast_flags & MDF_POOL) { |
439 | 0 | peer->outdate = current_time; |
440 | 0 | poll_update(peer, hpoll); |
441 | 0 | if ( (peer_associations <= 2 * sys_maxclock) |
442 | 0 | && ( peer_associations < sys_maxclock |
443 | 0 | || sys_survivors < sys_minclock)) |
444 | 0 | pool_xmit(peer); |
445 | 0 | return; |
446 | 0 | } |
447 | | |
448 | | /* |
449 | | * In unicast modes the dance is much more intricate. It is |
450 | | * designed to back off whenever possible to minimize network |
451 | | * traffic. |
452 | | */ |
453 | 0 | if (peer->burst == 0) { |
454 | 0 | u_char oreach; |
455 | | |
456 | | /* |
457 | | * Update the reachability status. If not heard for |
458 | | * three consecutive polls, stuff infinity in the clock |
459 | | * filter. |
460 | | */ |
461 | 0 | oreach = peer->reach; |
462 | 0 | peer->outdate = current_time; |
463 | 0 | peer->unreach++; |
464 | 0 | peer->reach <<= 1; |
465 | 0 | if (!peer->reach) { |
466 | | |
467 | | /* |
468 | | * Here the peer is unreachable. If it was |
469 | | * previously reachable raise a trap. Send a |
470 | | * burst if enabled. |
471 | | */ |
472 | 0 | clock_filter(peer, 0., 0., MAXDISPERSE); |
473 | 0 | if (oreach) { |
474 | 0 | peer_unfit(peer); |
475 | 0 | report_event(PEVNT_UNREACH, peer, NULL); |
476 | 0 | } |
477 | 0 | if ( (peer->flags & FLAG_IBURST) |
478 | 0 | && peer->retry == 0) |
479 | 0 | peer->retry = NTP_RETRY; |
480 | 0 | } else { |
481 | | |
482 | | /* |
483 | | * Here the peer is reachable. Send a burst if |
484 | | * enabled and the peer is fit. Reset unreach |
485 | | * for persistent and ephemeral associations. |
486 | | * Unreach is also reset for survivors in |
487 | | * clock_select(). |
488 | | */ |
489 | 0 | hpoll = sys_poll; |
490 | 0 | if (!(peer->flags & FLAG_PREEMPT)) |
491 | 0 | peer->unreach = 0; |
492 | 0 | if ( (peer->flags & FLAG_BURST) |
493 | 0 | && peer->retry == 0 |
494 | 0 | && !peer_unfit(peer)) |
495 | 0 | peer->retry = NTP_RETRY; |
496 | 0 | } |
497 | | |
498 | | /* |
499 | | * Watch for timeout. If ephemeral, toss the rascal; |
500 | | * otherwise, bump the poll interval. Note the |
501 | | * poll_update() routine will clamp it to maxpoll. |
502 | | * If preemptible and we have more peers than maxclock, |
503 | | * and this peer has the minimum score of preemptibles, |
504 | | * demobilize. |
505 | | */ |
506 | 0 | if (peer->unreach >= NTP_UNREACH) { |
507 | 0 | hpoll++; |
508 | | /* ephemeral: no FLAG_CONFIG nor FLAG_PREEMPT */ |
509 | 0 | if (!(peer->flags & (FLAG_CONFIG | FLAG_PREEMPT))) { |
510 | 0 | report_event(PEVNT_RESTART, peer, "timeout"); |
511 | 0 | peer_clear(peer, "TIME"); |
512 | 0 | unpeer(peer); |
513 | 0 | return; |
514 | 0 | } |
515 | 0 | if ( (peer->flags & FLAG_PREEMPT) |
516 | 0 | && (peer_associations > sys_maxclock) |
517 | 0 | && score_all(peer)) { |
518 | 0 | report_event(PEVNT_RESTART, peer, "timeout"); |
519 | 0 | peer_clear(peer, "TIME"); |
520 | 0 | unpeer(peer); |
521 | 0 | return; |
522 | 0 | } |
523 | 0 | } |
524 | 0 | } else { |
525 | 0 | peer->burst--; |
526 | 0 | if (peer->burst == 0) { |
527 | | |
528 | | /* |
529 | | * If ntpdate mode and the clock has not been |
530 | | * set and all peers have completed the burst, |
531 | | * we declare a successful failure. |
532 | | */ |
533 | 0 | if (mode_ntpdate) { |
534 | 0 | peer_ntpdate--; |
535 | 0 | if (peer_ntpdate == 0) { |
536 | 0 | msyslog(LOG_NOTICE, |
537 | 0 | "ntpd: no servers found"); |
538 | 0 | if (!msyslog_term) |
539 | 0 | printf( |
540 | 0 | "ntpd: no servers found\n"); |
541 | 0 | exit (0); |
542 | 0 | } |
543 | 0 | } |
544 | 0 | } |
545 | 0 | } |
546 | 0 | if (peer->retry > 0) |
547 | 0 | peer->retry--; |
548 | | |
549 | | /* |
550 | | * Do not transmit if in broadcast client mode. |
551 | | */ |
552 | 0 | poll_update(peer, hpoll); |
553 | 0 | if (peer->hmode != MODE_BCLIENT) |
554 | 0 | peer_xmit(peer); |
555 | |
|
556 | 0 | return; |
557 | 0 | } |
558 | | |
559 | | |
560 | | const char * |
561 | | amtoa( |
562 | | int am |
563 | | ) |
564 | 249 | { |
565 | 249 | char *bp; |
566 | | |
567 | 249 | switch(am) { |
568 | 0 | case AM_ERR: return "AM_ERR"; |
569 | 16 | case AM_NOMATCH: return "AM_NOMATCH"; |
570 | 0 | case AM_PROCPKT: return "AM_PROCPKT"; |
571 | 0 | case AM_BCST: return "AM_BCST"; |
572 | 147 | case AM_FXMIT: return "AM_FXMIT"; |
573 | 25 | case AM_MANYCAST: return "AM_MANYCAST"; |
574 | 59 | case AM_NEWPASS: return "AM_NEWPASS"; |
575 | 2 | case AM_NEWBCL: return "AM_NEWBCL"; |
576 | 0 | case AM_POSSBCL: return "AM_POSSBCL"; |
577 | 0 | default: |
578 | 0 | LIB_GETBUF(bp); |
579 | 0 | snprintf(bp, LIB_BUFLENGTH, "AM_#%d", am); |
580 | 0 | return bp; |
581 | 249 | } |
582 | 249 | } |
583 | | |
584 | | |
585 | | /* |
586 | | * receive - receive procedure called for each packet received |
587 | | */ |
588 | | void |
589 | | receive( |
590 | | struct recvbuf *rbufp |
591 | | ) |
592 | 1.74k | { |
593 | 1.74k | register struct peer *peer; /* peer structure pointer */ |
594 | 1.74k | register struct pkt *pkt; /* receive packet pointer */ |
595 | 1.74k | u_char hisversion; /* packet version */ |
596 | 1.74k | u_char hisleap; /* packet leap indicator */ |
597 | 1.74k | u_char hismode; /* packet mode */ |
598 | 1.74k | u_char hisstratum; /* packet stratum */ |
599 | 1.74k | r4addr r4a; /* address restrictions */ |
600 | 1.74k | u_short restrict_mask; /* restrict bits */ |
601 | 1.74k | const char *hm_str; /* hismode string */ |
602 | 1.74k | const char *am_str; /* association match string */ |
603 | 1.74k | int kissCode = NOKISS; /* Kiss Code */ |
604 | 1.74k | int has_mac; /* length of MAC field */ |
605 | 1.74k | int authlen; /* offset of MAC field */ |
606 | 1.74k | auth_code is_authentic = AUTH_UNKNOWN; /* Was AUTH_NONE */ |
607 | 1.74k | nak_code crypto_nak_test; /* result of crypto-NAK check */ |
608 | 1.74k | int retcode = AM_NOMATCH; /* match code */ |
609 | 1.74k | keyid_t skeyid = 0; /* key IDs */ |
610 | 1.74k | u_int32 opcode = 0; /* extension field opcode */ |
611 | 1.74k | sockaddr_u *dstadr_sin; /* active runway */ |
612 | 1.74k | struct peer *peer2; /* aux peer structure pointer */ |
613 | 1.74k | endpt *match_ep; /* newpeer() local address */ |
614 | 1.74k | l_fp p_org; /* origin timestamp */ |
615 | 1.74k | l_fp p_rec; /* receive timestamp */ |
616 | 1.74k | l_fp p_xmt; /* transmit timestamp */ |
617 | | #ifdef AUTOKEY |
618 | | char hostname[NTP_MAXSTRLEN + 1]; |
619 | | char *groupname = NULL; |
620 | | struct autokey *ap; /* autokey structure pointer */ |
621 | | int rval; /* cookie snatcher */ |
622 | | keyid_t pkeyid = 0, tkeyid = 0; /* key IDs */ |
623 | | #endif /* AUTOKEY */ |
624 | | #ifdef HAVE_NTP_SIGND |
625 | | static unsigned char zero_key[16]; |
626 | | #endif /* HAVE_NTP_SIGND */ |
627 | | |
628 | | /* |
629 | | * Note that there are many places we do not call record_raw_stats(). |
630 | | * |
631 | | * We only want to call it *after* we've sent a response, or perhaps |
632 | | * when we've decided to drop a packet. |
633 | | */ |
634 | | |
635 | | /* |
636 | | * Monitor the packet and get restrictions. Note that the packet |
637 | | * length for control and private mode packets must be checked |
638 | | * by the service routines. Some restrictions have to be handled |
639 | | * later in order to generate a kiss-o'-death packet. |
640 | | */ |
641 | | /* |
642 | | * Bogus port check is before anything, since it probably |
643 | | * reveals a clogging attack. |
644 | | */ |
645 | 1.74k | sys_received++; |
646 | 1.74k | if (0 == SRCPORT(&rbufp->recv_srcadr)) { |
647 | 1 | sys_badlength++; |
648 | 1 | return; /* bogus port */ |
649 | 1 | } |
650 | 1.74k | restrictions(&rbufp->recv_srcadr, &r4a); |
651 | 1.74k | restrict_mask = r4a.rflags; |
652 | | |
653 | 1.74k | pkt = &rbufp->recv_pkt; |
654 | 1.74k | hisversion = PKT_VERSION(pkt->li_vn_mode); |
655 | 1.74k | hisleap = PKT_LEAP(pkt->li_vn_mode); |
656 | 1.74k | hismode = (int)PKT_MODE(pkt->li_vn_mode); |
657 | 1.74k | hisstratum = PKT_TO_STRATUM(pkt->stratum); |
658 | 1.74k | DPRINTF(1, ("receive: at %ld %s<-%s ippeerlimit %d mode %d iflags %s restrict %s org %#010x.%08x xmt %#010x.%08x\n", |
659 | 1.74k | current_time, stoa(&rbufp->dstadr->sin), |
660 | 1.74k | stoa(&rbufp->recv_srcadr), r4a.ippeerlimit, hismode, |
661 | 1.74k | build_iflags(rbufp->dstadr->flags), |
662 | 1.74k | build_rflags(restrict_mask), |
663 | 1.74k | ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), |
664 | 1.74k | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); |
665 | | |
666 | | /* See basic mode and broadcast checks, below */ |
667 | 1.74k | INSIST(0 != hisstratum); |
668 | | |
669 | 1.74k | if (restrict_mask & RES_IGNORE) { |
670 | 29 | DPRINTF(2, ("receive: drop: RES_IGNORE\n")); |
671 | 29 | sys_restricted++; |
672 | 29 | return; /* ignore everything */ |
673 | 29 | } |
674 | 1.71k | if (hismode == MODE_PRIVATE) { |
675 | 1 | if (!ntp_mode7 || (restrict_mask & RES_NOQUERY)) { |
676 | 1 | DPRINTF(2, ("receive: drop: RES_NOQUERY\n")); |
677 | 1 | sys_restricted++; |
678 | 1 | return; /* no query private */ |
679 | 1 | } |
680 | 0 | process_private(rbufp, ((restrict_mask & |
681 | 0 | RES_NOMODIFY) == 0)); |
682 | 0 | return; |
683 | 1 | } |
684 | 1.71k | if (hismode == MODE_CONTROL) { |
685 | 1.34k | if (restrict_mask & RES_NOQUERY) { |
686 | 0 | DPRINTF(2, ("receive: drop: RES_NOQUERY\n")); |
687 | 0 | sys_restricted++; |
688 | 0 | return; /* no query control */ |
689 | 0 | } |
690 | 1.34k | process_control(rbufp, restrict_mask); |
691 | 1.34k | return; |
692 | 1.34k | } |
693 | 371 | if (restrict_mask & RES_DONTSERVE) { |
694 | 0 | DPRINTF(2, ("receive: drop: RES_DONTSERVE\n")); |
695 | 0 | sys_restricted++; |
696 | 0 | return; /* no time serve */ |
697 | 0 | } |
698 | | |
699 | | /* |
700 | | * This is for testing. If restricted drop ten percent of |
701 | | * surviving packets. |
702 | | */ |
703 | 371 | if (restrict_mask & RES_FLAKE) { |
704 | 0 | if ((double)ntp_random() / 0x7fffffff < .1) { |
705 | 0 | DPRINTF(2, ("receive: drop: RES_FLAKE\n")); |
706 | 0 | sys_restricted++; |
707 | 0 | return; /* no flakeway */ |
708 | 0 | } |
709 | 0 | } |
710 | | |
711 | | /* |
712 | | ** Format Layer Checks |
713 | | ** |
714 | | ** Validate the packet format. The packet size, packet header, |
715 | | ** and any extension field lengths are checked. We identify |
716 | | ** the beginning of the MAC, to identify the upper limit of |
717 | | ** of the hash computation. |
718 | | ** |
719 | | ** In case of a format layer check violation, the packet is |
720 | | ** discarded with no further processing. |
721 | | */ |
722 | | |
723 | | /* |
724 | | * Version check must be after the query packets, since they |
725 | | * intentionally use an early version. |
726 | | */ |
727 | 371 | if (hisversion == NTP_VERSION) { |
728 | 35 | sys_newversion++; /* new version */ |
729 | 336 | } else if ( !(restrict_mask & RES_VERSION) |
730 | 336 | && hisversion >= NTP_OLDVERSION) { |
731 | 282 | sys_oldversion++; /* previous version */ |
732 | 282 | } else { |
733 | 54 | DPRINTF(2, ("receive: drop: RES_VERSION\n")); |
734 | 54 | sys_badlength++; |
735 | 54 | return; /* old version */ |
736 | 54 | } |
737 | | |
738 | | /* |
739 | | * Figure out his mode and validate the packet. This has some |
740 | | * legacy raunch that probably should be removed. In very early |
741 | | * NTP versions mode 0 was equivalent to what later versions |
742 | | * would interpret as client mode. |
743 | | */ |
744 | 317 | if (hismode == MODE_UNSPEC) { |
745 | 126 | if (hisversion == NTP_OLDVERSION) { |
746 | 123 | hismode = MODE_CLIENT; |
747 | 123 | } else { |
748 | 3 | DPRINTF(2, ("receive: drop: MODE_UNSPEC\n")); |
749 | 3 | sys_badlength++; |
750 | 3 | return; /* invalid mode */ |
751 | 3 | } |
752 | 126 | } |
753 | | |
754 | | /* |
755 | | * Parse the extension field if present. We figure out whether |
756 | | * an extension field is present by measuring the MAC size. If |
757 | | * the number of words following the packet header is 0, no MAC |
758 | | * is present and the packet is not authenticated. If 1, the |
759 | | * packet is a crypto-NAK; if 3, the packet is authenticated |
760 | | * with DES; if 5, the packet is authenticated with MD5; if 6, |
761 | | * the packet is authenticated with SHA. If 2 or * 4, the packet |
762 | | * is a runt and discarded forthwith. If greater than 6, an |
763 | | * extension field is present, so we subtract the length of the |
764 | | * field and go around again. |
765 | | * |
766 | | * Note the above description is lame. We should/could also check |
767 | | * the two bytes that make up the EF type and subtype, and then |
768 | | * check the two bytes that tell us the EF length. A legacy MAC |
769 | | * has a 4 byte keyID, and for conforming symmetric keys its value |
770 | | * must be <= 64k, meaning the top two bytes will always be zero. |
771 | | * Since the EF Type of 0 is reserved/unused, there's no way a |
772 | | * conforming legacy MAC could ever be misinterpreted as an EF. |
773 | | * |
774 | | * There is more, but this isn't the place to document it. |
775 | | */ |
776 | | |
777 | 314 | authlen = LEN_PKT_NOMAC; |
778 | 314 | has_mac = rbufp->recv_length - authlen; |
779 | 727 | while (has_mac > 0) { |
780 | 676 | u_int32 len; |
781 | | #ifdef AUTOKEY |
782 | | u_int32 hostlen; |
783 | | struct exten *ep; |
784 | | #endif /*AUTOKEY */ |
785 | | |
786 | 676 | if (has_mac % 4 != 0 || has_mac < (int)MIN_MAC_LEN) { |
787 | 13 | DPRINTF(2, ("receive: drop: bad post-packet length\n")); |
788 | 13 | sys_badlength++; |
789 | 13 | return; /* bad length */ |
790 | 13 | } |
791 | | /* |
792 | | * This next test is clearly wrong - it needlessly |
793 | | * prohibits short EFs (which don't yet exist) |
794 | | */ |
795 | 663 | if (has_mac <= (int)MAX_MAC_LEN) { |
796 | 217 | skeyid = ntohl(((u_int32 *)pkt)[authlen / 4]); |
797 | 217 | break; |
798 | | |
799 | 446 | } else { |
800 | 446 | opcode = ntohl(((u_int32 *)pkt)[authlen / 4]); |
801 | 446 | len = opcode & 0xffff; |
802 | 446 | if ( len % 4 != 0 |
803 | 446 | || len < 4 |
804 | 446 | || (int)len + authlen > rbufp->recv_length) { |
805 | 33 | DPRINTF(2, ("receive: drop: bad EF length\n")); |
806 | 33 | sys_badlength++; |
807 | 33 | return; /* bad length */ |
808 | 33 | } |
809 | | #ifdef AUTOKEY |
810 | | /* |
811 | | * Extract calling group name for later. If |
812 | | * sys_groupname is non-NULL, there must be |
813 | | * a group name provided to elicit a response. |
814 | | */ |
815 | | if ( (opcode & 0x3fff0000) == CRYPTO_ASSOC |
816 | | && sys_groupname != NULL) { |
817 | | ep = (struct exten *)&((u_int32 *)pkt)[authlen / 4]; |
818 | | hostlen = ntohl(ep->vallen); |
819 | | if ( hostlen >= sizeof(hostname) |
820 | | || hostlen > len - |
821 | | offsetof(struct exten, pkt)) { |
822 | | DPRINTF(2, ("receive: drop: bad autokey hostname length\n")); |
823 | | sys_badlength++; |
824 | | return; /* bad length */ |
825 | | } |
826 | | memcpy(hostname, &ep->pkt, hostlen); |
827 | | hostname[hostlen] = '\0'; |
828 | | groupname = strchr(hostname, '@'); |
829 | | if (groupname == NULL) { |
830 | | DPRINTF(2, ("receive: drop: empty autokey groupname\n")); |
831 | | sys_declined++; |
832 | | return; |
833 | | } |
834 | | groupname++; |
835 | | } |
836 | | #endif /* AUTOKEY */ |
837 | 413 | authlen += len; |
838 | 413 | has_mac -= len; |
839 | 413 | } |
840 | 663 | } |
841 | | |
842 | | /* |
843 | | * If has_mac is < 0 we had a malformed packet. |
844 | | */ |
845 | 268 | if (has_mac < 0) { |
846 | 19 | DPRINTF(2, ("receive: drop: post-packet under-read\n")); |
847 | 19 | sys_badlength++; |
848 | 19 | return; /* bad length */ |
849 | 19 | } |
850 | | |
851 | | /* |
852 | | ** Packet Data Verification Layer |
853 | | ** |
854 | | ** This layer verifies the packet data content. If |
855 | | ** authentication is required, a MAC must be present. |
856 | | ** If a MAC is present, it must validate. |
857 | | ** Crypto-NAK? Look - a shiny thing! |
858 | | ** |
859 | | ** If authentication fails, we're done. |
860 | | */ |
861 | | |
862 | | /* |
863 | | * If authentication is explicitly required, a MAC must be present. |
864 | | */ |
865 | 249 | if (restrict_mask & RES_DONTTRUST && has_mac == 0) { |
866 | 0 | DPRINTF(2, ("receive: drop: RES_DONTTRUST\n")); |
867 | 0 | sys_restricted++; |
868 | 0 | return; /* access denied */ |
869 | 0 | } |
870 | | |
871 | | /* |
872 | | * Update the MRU list and finger the cloggers. It can be a |
873 | | * little expensive, so turn it off for production use. |
874 | | * RES_LIMITED and RES_KOD will be cleared in the returned |
875 | | * restrict_mask unless one or both actions are warranted. |
876 | | */ |
877 | 249 | restrict_mask = ntp_monitor(rbufp, restrict_mask); |
878 | 249 | if (restrict_mask & RES_LIMITED) { |
879 | 0 | sys_limitrejected++; |
880 | 0 | if ( !(restrict_mask & RES_KOD) |
881 | 0 | || MODE_BROADCAST == hismode |
882 | 0 | || MODE_SERVER == hismode) { |
883 | 0 | if (MODE_SERVER == hismode) { |
884 | 0 | DPRINTF(1, ("Possibly self-induced rate limiting of MODE_SERVER from %s\n", |
885 | 0 | stoa(&rbufp->recv_srcadr))); |
886 | 0 | } else { |
887 | 0 | DPRINTF(2, ("receive: drop: RES_KOD\n")); |
888 | 0 | } |
889 | 0 | return; /* rate exceeded */ |
890 | 0 | } |
891 | 0 | if (hismode == MODE_CLIENT) |
892 | 0 | fast_xmit(rbufp, MODE_SERVER, skeyid, |
893 | 0 | restrict_mask); |
894 | 0 | else |
895 | 0 | fast_xmit(rbufp, MODE_ACTIVE, skeyid, |
896 | 0 | restrict_mask); |
897 | 0 | return; /* rate exceeded */ |
898 | 0 | } |
899 | 249 | restrict_mask &= ~RES_KOD; |
900 | | |
901 | | /* |
902 | | * We have tossed out as many buggy packets as possible early in |
903 | | * the game to reduce the exposure to a clogging attack. Now we |
904 | | * have to burn some cycles to find the association and |
905 | | * authenticate the packet if required. Note that we burn only |
906 | | * digest cycles, again to reduce exposure. There may be no |
907 | | * matching association and that's okay. |
908 | | * |
909 | | * More on the autokey mambo. Normally the local interface is |
910 | | * found when the association was mobilized with respect to a |
911 | | * designated remote address. We assume packets arriving from |
912 | | * the remote address arrive via this interface and the local |
913 | | * address used to construct the autokey is the unicast address |
914 | | * of the interface. However, if the sender is a broadcaster, |
915 | | * the interface broadcast address is used instead. |
916 | | * Notwithstanding this technobabble, if the sender is a |
917 | | * multicaster, the broadcast address is null, so we use the |
918 | | * unicast address anyway. Don't ask. |
919 | | */ |
920 | | |
921 | 249 | peer = findpeer(rbufp, hismode, &retcode); |
922 | 249 | dstadr_sin = &rbufp->dstadr->sin; |
923 | 249 | NTOHL_FP(&pkt->org, &p_org); |
924 | 249 | NTOHL_FP(&pkt->rec, &p_rec); |
925 | 249 | NTOHL_FP(&pkt->xmt, &p_xmt); |
926 | 249 | hm_str = modetoa(hismode); |
927 | 249 | am_str = amtoa(retcode); |
928 | | |
929 | | /* |
930 | | * Authentication is conditioned by three switches: |
931 | | * |
932 | | * NOPEER (RES_NOPEER) do not mobilize an association unless |
933 | | * authenticated |
934 | | * NOTRUST (RES_DONTTRUST) do not allow access unless |
935 | | * authenticated (implies NOPEER) |
936 | | * enable (sys_authenticate) master NOPEER switch, by default |
937 | | * on |
938 | | * |
939 | | * The NOPEER and NOTRUST can be specified on a per-client basis |
940 | | * using the restrict command. The enable switch if on implies |
941 | | * NOPEER for all clients. There are four outcomes: |
942 | | * |
943 | | * NONE The packet has no MAC. |
944 | | * OK the packet has a MAC and authentication succeeds |
945 | | * ERROR the packet has a MAC and authentication fails |
946 | | * CRYPTO crypto-NAK. The MAC has four octets only. |
947 | | * |
948 | | * Note: The AUTH(x, y) macro is used to filter outcomes. If x |
949 | | * is zero, acceptable outcomes of y are NONE and OK. If x is |
950 | | * one, the only acceptable outcome of y is OK. |
951 | | */ |
952 | 249 | crypto_nak_test = valid_NAK(peer, rbufp, hismode); |
953 | | |
954 | | /* |
955 | | * Drop any invalid crypto-NAKs |
956 | | */ |
957 | 249 | if (crypto_nak_test == INVALIDNAK) { |
958 | 74 | report_event(PEVNT_AUTH, peer, "Invalid_NAK"); |
959 | 74 | if (0 != peer) { |
960 | 0 | peer->badNAK++; |
961 | 0 | } |
962 | 74 | msyslog(LOG_ERR, "Invalid-NAK error at %ld %s<-%s", |
963 | 74 | current_time, stoa(dstadr_sin), stoa(&rbufp->recv_srcadr)); |
964 | 74 | return; |
965 | 74 | } |
966 | | |
967 | 175 | if (has_mac == 0) { |
968 | 32 | restrict_mask &= ~RES_MSSNTP; |
969 | 32 | is_authentic = AUTH_NONE; /* not required */ |
970 | 32 | DPRINTF(1, ("receive: at %ld %s<-%s mode %d/%s:%s len %d org %#010x.%08x xmt %#010x.%08x NOMAC\n", |
971 | 32 | current_time, stoa(dstadr_sin), |
972 | 32 | stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, |
973 | 32 | authlen, |
974 | 32 | ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), |
975 | 32 | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); |
976 | 143 | } else if (crypto_nak_test == VALIDNAK) { |
977 | 0 | restrict_mask &= ~RES_MSSNTP; |
978 | 0 | is_authentic = AUTH_CRYPTO; /* crypto-NAK */ |
979 | 0 | DPRINTF(1, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x CRYPTONAK\n", |
980 | 0 | current_time, stoa(dstadr_sin), |
981 | 0 | stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, |
982 | 0 | skeyid, authlen + has_mac, is_authentic, |
983 | 0 | ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), |
984 | 0 | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); |
985 | |
|
986 | | #ifdef HAVE_NTP_SIGND |
987 | | /* |
988 | | * If the signature is 20 bytes long, the last 16 of |
989 | | * which are zero, then this is a Microsoft client |
990 | | * wanting AD-style authentication of the server's |
991 | | * reply. |
992 | | * |
993 | | * This is described in Microsoft's WSPP docs, in MS-SNTP: |
994 | | * http://msdn.microsoft.com/en-us/library/cc212930.aspx |
995 | | */ |
996 | | } else if ( has_mac == MAX_MD5_LEN |
997 | | && (restrict_mask & RES_MSSNTP) |
998 | | && (retcode == AM_FXMIT || retcode == AM_NEWPASS) |
999 | | && (memcmp(zero_key, (char *)pkt + authlen + 4, |
1000 | | MAX_MD5_LEN - 4) == 0)) { |
1001 | | is_authentic = AUTH_NONE; |
1002 | | DPRINTF(1, ("receive: at %ld %s<-%s mode %d/%s:%s len %d org %#010x.%08x xmt %#010x.%08x SIGND\n", |
1003 | | current_time, stoa(dstadr_sin), |
1004 | | stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, |
1005 | | authlen, |
1006 | | ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), |
1007 | | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); |
1008 | | #endif /* HAVE_NTP_SIGND */ |
1009 | |
|
1010 | 143 | } else { |
1011 | | /* |
1012 | | * has_mac is not 0 |
1013 | | * Not a VALID_NAK |
1014 | | * Not an MS-SNTP SIGND packet |
1015 | | * |
1016 | | * So there is a MAC here. |
1017 | | */ |
1018 | | |
1019 | 143 | restrict_mask &= ~RES_MSSNTP; |
1020 | | #ifdef AUTOKEY |
1021 | | /* |
1022 | | * For autokey modes, generate the session key |
1023 | | * and install in the key cache. Use the socket |
1024 | | * broadcast or unicast address as appropriate. |
1025 | | */ |
1026 | | if (crypto_flags && skeyid > NTP_MAXKEY) { |
1027 | | |
1028 | | /* |
1029 | | * More on the autokey dance (AKD). A cookie is |
1030 | | * constructed from public and private values. |
1031 | | * For broadcast packets, the cookie is public |
1032 | | * (zero). For packets that match no |
1033 | | * association, the cookie is hashed from the |
1034 | | * addresses and private value. For server |
1035 | | * packets, the cookie was previously obtained |
1036 | | * from the server. For symmetric modes, the |
1037 | | * cookie was previously constructed using an |
1038 | | * agreement protocol; however, should PKI be |
1039 | | * unavailable, we construct a fake agreement as |
1040 | | * the EXOR of the peer and host cookies. |
1041 | | * |
1042 | | * hismode ephemeral persistent |
1043 | | * ======================================= |
1044 | | * active 0 cookie# |
1045 | | * passive 0% cookie# |
1046 | | * client sys cookie 0% |
1047 | | * server 0% sys cookie |
1048 | | * broadcast 0 0 |
1049 | | * |
1050 | | * # if unsync, 0 |
1051 | | * % can't happen |
1052 | | */ |
1053 | | if (has_mac < (int)MAX_MD5_LEN) { |
1054 | | DPRINTF(2, ("receive: drop: MD5 digest too short\n")); |
1055 | | sys_badauth++; |
1056 | | return; |
1057 | | } |
1058 | | if (hismode == MODE_BROADCAST) { |
1059 | | |
1060 | | /* |
1061 | | * For broadcaster, use the interface |
1062 | | * broadcast address when available; |
1063 | | * otherwise, use the unicast address |
1064 | | * found when the association was |
1065 | | * mobilized. However, if this is from |
1066 | | * the wildcard interface, game over. |
1067 | | */ |
1068 | | if ( crypto_flags |
1069 | | && rbufp->dstadr == |
1070 | | ANY_INTERFACE_CHOOSE(&rbufp->recv_srcadr)) { |
1071 | | DPRINTF(2, ("receive: drop: BCAST from wildcard\n")); |
1072 | | sys_restricted++; |
1073 | | return; /* no wildcard */ |
1074 | | } |
1075 | | pkeyid = 0; |
1076 | | if (!SOCK_UNSPEC(&rbufp->dstadr->bcast)) |
1077 | | dstadr_sin = |
1078 | | &rbufp->dstadr->bcast; |
1079 | | } else if (peer == NULL) { |
1080 | | pkeyid = session_key( |
1081 | | &rbufp->recv_srcadr, dstadr_sin, 0, |
1082 | | sys_private, 0); |
1083 | | } else { |
1084 | | pkeyid = peer->pcookie; |
1085 | | } |
1086 | | |
1087 | | /* |
1088 | | * The session key includes both the public |
1089 | | * values and cookie. In case of an extension |
1090 | | * field, the cookie used for authentication |
1091 | | * purposes is zero. Note the hash is saved for |
1092 | | * use later in the autokey mambo. |
1093 | | */ |
1094 | | if (authlen > (int)LEN_PKT_NOMAC && pkeyid != 0) { |
1095 | | session_key(&rbufp->recv_srcadr, |
1096 | | dstadr_sin, skeyid, 0, 2); |
1097 | | tkeyid = session_key( |
1098 | | &rbufp->recv_srcadr, dstadr_sin, |
1099 | | skeyid, pkeyid, 0); |
1100 | | } else { |
1101 | | tkeyid = session_key( |
1102 | | &rbufp->recv_srcadr, dstadr_sin, |
1103 | | skeyid, pkeyid, 2); |
1104 | | } |
1105 | | |
1106 | | } |
1107 | | #endif /* AUTOKEY */ |
1108 | | |
1109 | | /* |
1110 | | * Compute the cryptosum. Note a clogging attack may |
1111 | | * succeed in bloating the key cache. If an autokey, |
1112 | | * purge it immediately, since we won't be needing it |
1113 | | * again. If the packet is authentic, it can mobilize an |
1114 | | * association. Note that there is no key zero. |
1115 | | */ |
1116 | 143 | if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen, |
1117 | 143 | has_mac)) |
1118 | 143 | is_authentic = AUTH_ERROR; |
1119 | 0 | else |
1120 | 0 | is_authentic = AUTH_OK; |
1121 | | #ifdef AUTOKEY |
1122 | | if (crypto_flags && skeyid > NTP_MAXKEY) |
1123 | | authtrust(skeyid, 0); |
1124 | | #endif /* AUTOKEY */ |
1125 | 143 | DPRINTF(1, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x MAC\n", |
1126 | 143 | current_time, stoa(dstadr_sin), |
1127 | 143 | stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, |
1128 | 143 | skeyid, authlen + has_mac, is_authentic, |
1129 | 143 | ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), |
1130 | 143 | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); |
1131 | 143 | } |
1132 | | |
1133 | | |
1134 | | /* |
1135 | | * Bug 3454: |
1136 | | * |
1137 | | * Now come at this from a different perspective: |
1138 | | * - If we expect a MAC and it's not there, we drop it. |
1139 | | * - If we expect one keyID and get another, we drop it. |
1140 | | * - If we have a MAC ahd it hasn't been validated yet, try. |
1141 | | * - if the provided MAC doesn't validate, we drop it. |
1142 | | * |
1143 | | * There might be more to this. |
1144 | | */ |
1145 | 175 | if (0 != peer && 0 != peer->keyid) { |
1146 | | /* Should we msyslog() any of these? */ |
1147 | | |
1148 | | /* |
1149 | | * This should catch: |
1150 | | * - no keyID where one is expected, |
1151 | | * - different keyID than what we expect. |
1152 | | */ |
1153 | 0 | if (peer->keyid != skeyid) { |
1154 | 0 | DPRINTF(2, ("receive: drop: Wanted keyID %d, got %d from %s\n", |
1155 | 0 | peer->keyid, skeyid, |
1156 | 0 | stoa(&rbufp->recv_srcadr))); |
1157 | 0 | sys_restricted++; |
1158 | 0 | return; /* drop: access denied */ |
1159 | 0 | } |
1160 | | |
1161 | | /* |
1162 | | * if has_mac != 0 ... |
1163 | | * - If it has not yet been validated, do so. |
1164 | | * (under what circumstances might that happen?) |
1165 | | * - if missing or bad MAC, log and drop. |
1166 | | */ |
1167 | 0 | if (0 != has_mac) { |
1168 | 0 | if (is_authentic == AUTH_UNKNOWN) { |
1169 | | /* How can this happen? */ |
1170 | 0 | DPRINTF(2, ("receive: 3454 check: AUTH_UNKNOWN from %s\n", |
1171 | 0 | stoa(&rbufp->recv_srcadr))); |
1172 | 0 | if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen, |
1173 | 0 | has_mac)) { |
1174 | | /* MAC invalid or not found */ |
1175 | 0 | is_authentic = AUTH_ERROR; |
1176 | 0 | } else { |
1177 | 0 | is_authentic = AUTH_OK; |
1178 | 0 | } |
1179 | 0 | } |
1180 | 0 | if (is_authentic != AUTH_OK) { |
1181 | 0 | DPRINTF(2, ("receive: drop: missing or bad MAC from %s\n", |
1182 | 0 | stoa(&rbufp->recv_srcadr))); |
1183 | 0 | sys_restricted++; |
1184 | 0 | return; /* drop: access denied */ |
1185 | 0 | } |
1186 | 0 | } |
1187 | 0 | } |
1188 | | /**/ |
1189 | | |
1190 | | /* |
1191 | | ** On-Wire Protocol Layer |
1192 | | ** |
1193 | | ** Verify protocol operations consistent with the on-wire protocol. |
1194 | | ** The protocol discards bogus and duplicate packets as well as |
1195 | | ** minimizes disruptions doe to protocol restarts and dropped |
1196 | | ** packets. The operations are controlled by two timestamps: |
1197 | | ** the transmit timestamp saved in the client state variables, |
1198 | | ** and the origin timestamp in the server packet header. The |
1199 | | ** comparison of these two timestamps is called the loopback test. |
1200 | | ** The transmit timestamp functions as a nonce to verify that the |
1201 | | ** response corresponds to the original request. The transmit |
1202 | | ** timestamp also serves to discard replays of the most recent |
1203 | | ** packet. Upon failure of either test, the packet is discarded |
1204 | | ** with no further action. |
1205 | | */ |
1206 | | |
1207 | | /* |
1208 | | * The association matching rules are implemented by a set of |
1209 | | * routines and an association table. A packet matching an |
1210 | | * association is processed by the peer process for that |
1211 | | * association. If there are no errors, an ephemeral association |
1212 | | * is mobilized: a broadcast packet mobilizes a broadcast client |
1213 | | * aassociation; a manycast server packet mobilizes a manycast |
1214 | | * client association; a symmetric active packet mobilizes a |
1215 | | * symmetric passive association. |
1216 | | */ |
1217 | 175 | DPRINTF(1, ("receive: MATCH_ASSOC dispatch: mode %d/%s:%s \n", |
1218 | 175 | hismode, hm_str, am_str)); |
1219 | 175 | switch (retcode) { |
1220 | | |
1221 | | /* |
1222 | | * This is a client mode packet not matching any association. If |
1223 | | * an ordinary client, simply toss a server mode packet back |
1224 | | * over the fence. If a manycast client, we have to work a |
1225 | | * little harder. |
1226 | | * |
1227 | | * There are cases here where we do not call record_raw_stats(). |
1228 | | */ |
1229 | 139 | case AM_FXMIT: |
1230 | | |
1231 | | /* |
1232 | | * If authentication OK, send a server reply; otherwise, |
1233 | | * send a crypto-NAK. |
1234 | | */ |
1235 | 139 | if (!(rbufp->dstadr->flags & INT_MCASTOPEN)) { |
1236 | | /* HMS: would be nice to log FAST_XMIT|BADAUTH|RESTRICTED */ |
1237 | 139 | record_raw_stats(&rbufp->recv_srcadr, |
1238 | 139 | &rbufp->dstadr->sin, |
1239 | 139 | &p_org, &p_rec, &p_xmt, &rbufp->recv_time, |
1240 | 139 | PKT_LEAP(pkt->li_vn_mode), |
1241 | 139 | PKT_VERSION(pkt->li_vn_mode), |
1242 | 139 | PKT_MODE(pkt->li_vn_mode), |
1243 | 139 | PKT_TO_STRATUM(pkt->stratum), |
1244 | 139 | pkt->ppoll, |
1245 | 139 | pkt->precision, |
1246 | 139 | FPTOD(NTOHS_FP(pkt->rootdelay)), |
1247 | 139 | FPTOD(NTOHS_FP(pkt->rootdisp)), |
1248 | 139 | pkt->refid, |
1249 | 139 | rbufp->recv_length - MIN_V4_PKT_LEN, (u_char *)&pkt->exten); |
1250 | | |
1251 | 139 | if (AUTH(restrict_mask & RES_DONTTRUST, |
1252 | 139 | is_authentic)) { |
1253 | 26 | fast_xmit(rbufp, MODE_SERVER, skeyid, |
1254 | 26 | restrict_mask); |
1255 | 113 | } else if (is_authentic == AUTH_ERROR) { |
1256 | 113 | fast_xmit(rbufp, MODE_SERVER, 0, |
1257 | 113 | restrict_mask); |
1258 | 113 | sys_badauth++; |
1259 | 113 | } else { |
1260 | 0 | DPRINTF(2, ("receive: AM_FXMIT drop: !mcast restricted\n")); |
1261 | 0 | sys_restricted++; |
1262 | 0 | } |
1263 | | |
1264 | 139 | return; /* hooray */ |
1265 | 139 | } |
1266 | | |
1267 | | /* |
1268 | | * This must be manycast. Do not respond if not |
1269 | | * configured as a manycast server. |
1270 | | */ |
1271 | 0 | if (!sys_manycastserver) { |
1272 | 0 | DPRINTF(2, ("receive: AM_FXMIT drop: Not manycastserver\n")); |
1273 | 0 | sys_restricted++; |
1274 | 0 | return; /* not enabled */ |
1275 | 0 | } |
1276 | | |
1277 | | #ifdef AUTOKEY |
1278 | | /* |
1279 | | * Do not respond if not the same group. |
1280 | | */ |
1281 | | if (group_test(groupname, NULL)) { |
1282 | | DPRINTF(2, ("receive: AM_FXMIT drop: empty groupname\n")); |
1283 | | sys_declined++; |
1284 | | return; |
1285 | | } |
1286 | | #endif /* AUTOKEY */ |
1287 | | |
1288 | | /* |
1289 | | * Do not respond if we are not synchronized or our |
1290 | | * stratum is greater than the manycaster or the |
1291 | | * manycaster has already synchronized to us. |
1292 | | */ |
1293 | 0 | if ( sys_leap == LEAP_NOTINSYNC |
1294 | 0 | || sys_stratum >= hisstratum |
1295 | 0 | || (!sys_cohort && sys_stratum == hisstratum + 1) |
1296 | 0 | || rbufp->dstadr->addr_refid == pkt->refid) { |
1297 | 0 | DPRINTF(2, ("receive: AM_FXMIT drop: LEAP_NOTINSYNC || stratum || loop\n")); |
1298 | 0 | sys_declined++; |
1299 | 0 | return; /* no help */ |
1300 | 0 | } |
1301 | | |
1302 | | /* |
1303 | | * Respond only if authentication succeeds. Don't do a |
1304 | | * crypto-NAK, as that would not be useful. |
1305 | | */ |
1306 | 0 | if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) { |
1307 | 0 | record_raw_stats(&rbufp->recv_srcadr, |
1308 | 0 | &rbufp->dstadr->sin, |
1309 | 0 | &p_org, &p_rec, &p_xmt, &rbufp->recv_time, |
1310 | 0 | PKT_LEAP(pkt->li_vn_mode), |
1311 | 0 | PKT_VERSION(pkt->li_vn_mode), |
1312 | 0 | PKT_MODE(pkt->li_vn_mode), |
1313 | 0 | PKT_TO_STRATUM(pkt->stratum), |
1314 | 0 | pkt->ppoll, |
1315 | 0 | pkt->precision, |
1316 | 0 | FPTOD(NTOHS_FP(pkt->rootdelay)), |
1317 | 0 | FPTOD(NTOHS_FP(pkt->rootdisp)), |
1318 | 0 | pkt->refid, |
1319 | 0 | rbufp->recv_length - MIN_V4_PKT_LEN, (u_char *)&pkt->exten); |
1320 | |
|
1321 | 0 | fast_xmit(rbufp, MODE_SERVER, skeyid, |
1322 | 0 | restrict_mask); |
1323 | 0 | } |
1324 | 0 | return; /* hooray */ |
1325 | | |
1326 | | /* |
1327 | | * This is a server mode packet returned in response to a client |
1328 | | * mode packet sent to a multicast group address (for |
1329 | | * manycastclient) or to a unicast address (for pool). The |
1330 | | * origin timestamp is a good nonce to reliably associate the |
1331 | | * reply with what was sent. If there is no match, that's |
1332 | | * curious and could be an intruder attempting to clog, so we |
1333 | | * just ignore it. |
1334 | | * |
1335 | | * If the packet is authentic and the manycastclient or pool |
1336 | | * association is found, we mobilize a client association and |
1337 | | * copy pertinent variables from the manycastclient or pool |
1338 | | * association to the new client association. If not, just |
1339 | | * ignore the packet. |
1340 | | * |
1341 | | * There is an implosion hazard at the manycast client, since |
1342 | | * the manycast servers send the server packet immediately. If |
1343 | | * the guy is already here, don't fire up a duplicate. |
1344 | | * |
1345 | | * There are cases here where we do not call record_raw_stats(). |
1346 | | */ |
1347 | 3 | case AM_MANYCAST: |
1348 | | |
1349 | | #ifdef AUTOKEY |
1350 | | /* |
1351 | | * Do not respond if not the same group. |
1352 | | */ |
1353 | | if (group_test(groupname, NULL)) { |
1354 | | DPRINTF(2, ("receive: AM_MANYCAST drop: empty groupname\n")); |
1355 | | sys_declined++; |
1356 | | return; |
1357 | | } |
1358 | | #endif /* AUTOKEY */ |
1359 | 3 | if ((peer2 = findmanycastpeer(rbufp)) == NULL) { |
1360 | 3 | DPRINTF(2, ("receive: AM_MANYCAST drop: No manycast peer\n")); |
1361 | 3 | sys_restricted++; |
1362 | 3 | return; /* not enabled */ |
1363 | 3 | } |
1364 | 0 | if (!AUTH( (!(peer2->cast_flags & MDF_POOL) |
1365 | 0 | && sys_authenticate) |
1366 | 0 | || (restrict_mask & (RES_NOPEER | |
1367 | 0 | RES_DONTTRUST)), is_authentic) |
1368 | | /* MC: RES_NOEPEER? */ |
1369 | 0 | ) { |
1370 | 0 | DPRINTF(2, ("receive: AM_MANYCAST drop: bad auth || (NOPEER|DONTTRUST)\n")); |
1371 | 0 | sys_restricted++; |
1372 | 0 | return; /* access denied */ |
1373 | 0 | } |
1374 | | |
1375 | | /* |
1376 | | * Do not respond if unsynchronized or stratum is below |
1377 | | * the floor or at or above the ceiling. |
1378 | | */ |
1379 | 0 | if ( hisleap == LEAP_NOTINSYNC |
1380 | 0 | || hisstratum < sys_floor |
1381 | 0 | || hisstratum >= sys_ceiling) { |
1382 | 0 | DPRINTF(2, ("receive: AM_MANYCAST drop: unsync/stratum\n")); |
1383 | 0 | sys_declined++; |
1384 | 0 | return; /* no help */ |
1385 | 0 | } |
1386 | 0 | peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, |
1387 | 0 | r4a.ippeerlimit, MODE_CLIENT, hisversion, |
1388 | 0 | peer2->minpoll, peer2->maxpoll, |
1389 | 0 | FLAG_PREEMPT | (FLAG_IBURST & peer2->flags), |
1390 | 0 | MDF_UCAST | MDF_UCLNT, 0, skeyid, sys_ident); |
1391 | 0 | if (NULL == peer) { |
1392 | 0 | DPRINTF(2, ("receive: AM_MANYCAST drop: duplicate\n")); |
1393 | 0 | sys_declined++; |
1394 | 0 | return; /* ignore duplicate */ |
1395 | 0 | } |
1396 | | |
1397 | | /* |
1398 | | * After each ephemeral pool association is spun, |
1399 | | * accelerate the next poll for the pool solicitor so |
1400 | | * the pool will fill promptly. |
1401 | | */ |
1402 | 0 | if (peer2->cast_flags & MDF_POOL) |
1403 | 0 | peer2->nextdate = current_time + 1; |
1404 | | |
1405 | | /* |
1406 | | * Further processing of the solicitation response would |
1407 | | * simply detect its origin timestamp as bogus for the |
1408 | | * brand-new association (it matches the prototype |
1409 | | * association) and tinker with peer->nextdate delaying |
1410 | | * first sync. |
1411 | | */ |
1412 | 0 | return; /* solicitation response handled */ |
1413 | | |
1414 | | /* |
1415 | | * This is the first packet received from a broadcast server. If |
1416 | | * the packet is authentic and we are enabled as broadcast |
1417 | | * client, mobilize a broadcast client association. We don't |
1418 | | * kiss any frogs here. |
1419 | | * |
1420 | | * There are cases here where we do not call record_raw_stats(). |
1421 | | */ |
1422 | 1 | case AM_NEWBCL: |
1423 | | |
1424 | | #ifdef AUTOKEY |
1425 | | /* |
1426 | | * Do not respond if not the same group. |
1427 | | */ |
1428 | | if (group_test(groupname, sys_ident)) { |
1429 | | DPRINTF(2, ("receive: AM_NEWBCL drop: groupname mismatch\n")); |
1430 | | sys_declined++; |
1431 | | return; |
1432 | | } |
1433 | | #endif /* AUTOKEY */ |
1434 | 1 | if (sys_bclient == 0) { |
1435 | 1 | DPRINTF(2, ("receive: AM_NEWBCL drop: not a bclient\n")); |
1436 | 1 | sys_restricted++; |
1437 | 1 | return; /* not enabled */ |
1438 | 1 | } |
1439 | 0 | if (!AUTH(sys_authenticate | (restrict_mask & |
1440 | 0 | (RES_NOPEER | RES_DONTTRUST)), is_authentic) |
1441 | | /* NEWBCL: RES_NOEPEER? */ |
1442 | 0 | ) { |
1443 | 0 | DPRINTF(2, ("receive: AM_NEWBCL drop: AUTH failed\n")); |
1444 | 0 | sys_restricted++; |
1445 | 0 | return; /* access denied */ |
1446 | 0 | } |
1447 | | |
1448 | | /* |
1449 | | * Do not respond if unsynchronized or stratum is below |
1450 | | * the floor or at or above the ceiling. |
1451 | | */ |
1452 | 0 | if ( hisleap == LEAP_NOTINSYNC |
1453 | 0 | || hisstratum < sys_floor |
1454 | 0 | || hisstratum >= sys_ceiling) { |
1455 | 0 | DPRINTF(2, ("receive: AM_NEWBCL drop: Unsync or bad stratum\n")); |
1456 | 0 | sys_declined++; |
1457 | 0 | return; /* no help */ |
1458 | 0 | } |
1459 | | |
1460 | | #ifdef AUTOKEY |
1461 | | /* |
1462 | | * Do not respond if Autokey and the opcode is not a |
1463 | | * CRYPTO_ASSOC response with association ID. |
1464 | | */ |
1465 | | if ( crypto_flags && skeyid > NTP_MAXKEY |
1466 | | && (opcode & 0xffff0000) != (CRYPTO_ASSOC | CRYPTO_RESP)) { |
1467 | | DPRINTF(2, ("receive: AM_NEWBCL drop: Autokey but not CRYPTO_ASSOC\n")); |
1468 | | sys_declined++; |
1469 | | return; /* protocol error */ |
1470 | | } |
1471 | | #endif /* AUTOKEY */ |
1472 | | |
1473 | | /* |
1474 | | * Broadcasts received via a multicast address may |
1475 | | * arrive after a unicast volley has begun |
1476 | | * with the same remote address. newpeer() will not |
1477 | | * find duplicate associations on other local endpoints |
1478 | | * if a non-NULL endpoint is supplied. multicastclient |
1479 | | * ephemeral associations are unique across all local |
1480 | | * endpoints. |
1481 | | */ |
1482 | 0 | if (!(INT_MCASTOPEN & rbufp->dstadr->flags)) |
1483 | 0 | match_ep = rbufp->dstadr; |
1484 | 0 | else |
1485 | 0 | match_ep = NULL; |
1486 | | |
1487 | | /* |
1488 | | * Determine whether to execute the initial volley. |
1489 | | */ |
1490 | 0 | if (sys_bdelay > 0.0) { |
1491 | | #ifdef AUTOKEY |
1492 | | /* |
1493 | | * If a two-way exchange is not possible, |
1494 | | * neither is Autokey. |
1495 | | */ |
1496 | | if (crypto_flags && skeyid > NTP_MAXKEY) { |
1497 | | sys_restricted++; |
1498 | | DPRINTF(2, ("receive: AM_NEWBCL drop: Autokey but not 2-way\n")); |
1499 | | return; /* no autokey */ |
1500 | | } |
1501 | | #endif /* AUTOKEY */ |
1502 | | |
1503 | | /* |
1504 | | * Do not execute the volley. Start out in |
1505 | | * broadcast client mode. |
1506 | | */ |
1507 | 0 | peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, |
1508 | 0 | r4a.ippeerlimit, MODE_BCLIENT, hisversion, |
1509 | 0 | pkt->ppoll, pkt->ppoll, |
1510 | 0 | FLAG_PREEMPT, MDF_BCLNT, 0, skeyid, sys_ident); |
1511 | 0 | if (NULL == peer) { |
1512 | 0 | DPRINTF(2, ("receive: AM_NEWBCL drop: duplicate\n")); |
1513 | 0 | sys_restricted++; |
1514 | 0 | return; /* ignore duplicate */ |
1515 | |
|
1516 | 0 | } else { |
1517 | 0 | peer->delay = sys_bdelay; |
1518 | 0 | peer->bxmt = p_xmt; |
1519 | 0 | } |
1520 | 0 | break; |
1521 | 0 | } |
1522 | | |
1523 | | /* |
1524 | | * Execute the initial volley in order to calibrate the |
1525 | | * propagation delay and run the Autokey protocol. |
1526 | | * |
1527 | | * Note that the minpoll is taken from the broadcast |
1528 | | * packet, normally 6 (64 s) and that the poll interval |
1529 | | * is fixed at this value. |
1530 | | */ |
1531 | 0 | peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, |
1532 | 0 | r4a.ippeerlimit, MODE_CLIENT, hisversion, |
1533 | 0 | pkt->ppoll, pkt->ppoll, |
1534 | 0 | FLAG_BC_VOL | FLAG_IBURST | FLAG_PREEMPT, MDF_BCLNT, |
1535 | 0 | 0, skeyid, sys_ident); |
1536 | 0 | if (NULL == peer) { |
1537 | 0 | DPRINTF(2, ("receive: AM_NEWBCL drop: empty newpeer() failed\n")); |
1538 | 0 | sys_restricted++; |
1539 | 0 | return; /* ignore duplicate */ |
1540 | 0 | } |
1541 | 0 | peer->bxmt = p_xmt; |
1542 | | #ifdef AUTOKEY |
1543 | | if (skeyid > NTP_MAXKEY) |
1544 | | crypto_recv(peer, rbufp); |
1545 | | #endif /* AUTOKEY */ |
1546 | |
|
1547 | 0 | return; /* hooray */ |
1548 | | |
1549 | | /* |
1550 | | * This is the first packet received from a potential ephemeral |
1551 | | * symmetric active peer. First, deal with broken Windows clients. |
1552 | | * Then, if NOEPEER is enabled, drop it. If the packet meets our |
1553 | | * authenticty requirements and is the first he sent, mobilize |
1554 | | * a passive association. |
1555 | | * Otherwise, kiss the frog. |
1556 | | * |
1557 | | * There are cases here where we do not call record_raw_stats(). |
1558 | | */ |
1559 | 28 | case AM_NEWPASS: |
1560 | | |
1561 | 28 | DEBUG_REQUIRE(MODE_ACTIVE == hismode); |
1562 | | |
1563 | | #ifdef AUTOKEY |
1564 | | /* |
1565 | | * Do not respond if not the same group. |
1566 | | */ |
1567 | | if (group_test(groupname, sys_ident)) { |
1568 | | DPRINTF(2, ("receive: AM_NEWPASS drop: Autokey group mismatch\n")); |
1569 | | sys_declined++; |
1570 | | return; |
1571 | | } |
1572 | | #endif /* AUTOKEY */ |
1573 | 28 | if (!AUTH(sys_authenticate | (restrict_mask & |
1574 | 28 | (RES_NOPEER | RES_DONTTRUST)), is_authentic) |
1575 | 28 | ) { |
1576 | | /* |
1577 | | * If authenticated but cannot mobilize an |
1578 | | * association, send a symmetric passive |
1579 | | * response without mobilizing an association. |
1580 | | * This is for drat broken Windows clients. See |
1581 | | * Microsoft KB 875424 for preferred workaround. |
1582 | | */ |
1583 | 28 | if (AUTH(restrict_mask & RES_DONTTRUST, |
1584 | 28 | is_authentic)) { |
1585 | 3 | fast_xmit(rbufp, MODE_PASSIVE, skeyid, |
1586 | 3 | restrict_mask); |
1587 | 3 | return; /* hooray */ |
1588 | 3 | } |
1589 | | /* HMS: Why is this next set of lines a feature? */ |
1590 | 25 | if (is_authentic == AUTH_ERROR) { |
1591 | 25 | fast_xmit(rbufp, MODE_PASSIVE, 0, |
1592 | 25 | restrict_mask); |
1593 | 25 | sys_restricted++; |
1594 | 25 | return; |
1595 | 25 | } |
1596 | | |
1597 | 0 | if (restrict_mask & RES_NOEPEER) { |
1598 | 0 | DPRINTF(2, ("receive: AM_NEWPASS drop: NOEPEER\n")); |
1599 | 0 | sys_declined++; |
1600 | 0 | return; |
1601 | 0 | } |
1602 | | |
1603 | | /* [Bug 2941] |
1604 | | * If we got here, the packet isn't part of an |
1605 | | * existing association, either isn't correctly |
1606 | | * authenticated or it is but we are refusing |
1607 | | * ephemeral peer requests, and it didn't meet |
1608 | | * either of the previous two special cases so we |
1609 | | * should just drop it on the floor. For example, |
1610 | | * crypto-NAKs (is_authentic == AUTH_CRYPTO) |
1611 | | * will make it this far. This is just |
1612 | | * debug-printed and not logged to avoid log |
1613 | | * flooding. |
1614 | | */ |
1615 | 0 | DPRINTF(2, ("receive: at %ld refusing to mobilize passive association" |
1616 | 0 | " with unknown peer %s mode %d/%s:%s keyid %08x len %d auth %d\n", |
1617 | 0 | current_time, stoa(&rbufp->recv_srcadr), |
1618 | 0 | hismode, hm_str, am_str, skeyid, |
1619 | 0 | (authlen + has_mac), is_authentic)); |
1620 | 0 | sys_declined++; |
1621 | 0 | return; |
1622 | 0 | } |
1623 | | |
1624 | 0 | if (restrict_mask & RES_NOEPEER) { |
1625 | 0 | DPRINTF(2, ("receive: AM_NEWPASS drop: NOEPEER\n")); |
1626 | 0 | sys_declined++; |
1627 | 0 | return; |
1628 | 0 | } |
1629 | | |
1630 | | /* |
1631 | | * Do not respond if synchronized and if stratum is |
1632 | | * below the floor or at or above the ceiling. Note, |
1633 | | * this allows an unsynchronized peer to synchronize to |
1634 | | * us. It would be very strange if he did and then was |
1635 | | * nipped, but that could only happen if we were |
1636 | | * operating at the top end of the range. It also means |
1637 | | * we will spin an ephemeral association in response to |
1638 | | * MODE_ACTIVE KoDs, which will time out eventually. |
1639 | | */ |
1640 | 0 | if ( hisleap != LEAP_NOTINSYNC |
1641 | 0 | && (hisstratum < sys_floor || hisstratum >= sys_ceiling)) { |
1642 | 0 | DPRINTF(2, ("receive: AM_NEWPASS drop: Autokey group mismatch\n")); |
1643 | 0 | sys_declined++; |
1644 | 0 | return; /* no help */ |
1645 | 0 | } |
1646 | | |
1647 | | /* |
1648 | | * The message is correctly authenticated and allowed. |
1649 | | * Mobilize a symmetric passive association, if we won't |
1650 | | * exceed the ippeerlimit. |
1651 | | */ |
1652 | 0 | if ((peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, |
1653 | 0 | r4a.ippeerlimit, MODE_PASSIVE, hisversion, |
1654 | 0 | pkt->ppoll, NTP_MAXDPOLL, 0, MDF_UCAST, 0, |
1655 | 0 | skeyid, sys_ident)) == NULL) { |
1656 | 0 | DPRINTF(2, ("receive: AM_NEWPASS drop: newpeer() failed\n")); |
1657 | 0 | sys_declined++; |
1658 | 0 | return; /* ignore duplicate */ |
1659 | 0 | } |
1660 | 0 | break; |
1661 | | |
1662 | | |
1663 | | /* |
1664 | | * Process regular packet. Nothing special. |
1665 | | * |
1666 | | * There are cases here where we do not call record_raw_stats(). |
1667 | | */ |
1668 | 0 | case AM_PROCPKT: |
1669 | |
|
1670 | | #ifdef AUTOKEY |
1671 | | /* |
1672 | | * Do not respond if not the same group. |
1673 | | */ |
1674 | | if (group_test(groupname, peer->ident)) { |
1675 | | DPRINTF(2, ("receive: AM_PROCPKT drop: Autokey group mismatch\n")); |
1676 | | sys_declined++; |
1677 | | return; |
1678 | | } |
1679 | | #endif /* AUTOKEY */ |
1680 | |
|
1681 | 0 | if (MODE_BROADCAST == hismode) { |
1682 | 0 | int bail = 0; |
1683 | 0 | l_fp tdiff; |
1684 | 0 | u_long deadband; |
1685 | |
|
1686 | 0 | DPRINTF(2, ("receive: PROCPKT/BROADCAST: prev pkt %ld seconds ago, ppoll: %d, %d secs\n", |
1687 | 0 | (current_time - peer->timelastrec), |
1688 | 0 | peer->ppoll, (1 << peer->ppoll) |
1689 | 0 | )); |
1690 | | /* Things we can check: |
1691 | | * |
1692 | | * Did the poll interval change? |
1693 | | * Is the poll interval in the packet in-range? |
1694 | | * Did this packet arrive too soon? |
1695 | | * Is the timestamp in this packet monotonic |
1696 | | * with respect to the previous packet? |
1697 | | */ |
1698 | | |
1699 | | /* This is noteworthy, not error-worthy */ |
1700 | 0 | if (pkt->ppoll != peer->ppoll) { |
1701 | 0 | msyslog(LOG_INFO, "receive: broadcast poll from %s changed from %u to %u", |
1702 | 0 | stoa(&rbufp->recv_srcadr), |
1703 | 0 | peer->ppoll, pkt->ppoll); |
1704 | 0 | } |
1705 | | |
1706 | | /* This is error-worthy */ |
1707 | 0 | if ( pkt->ppoll < peer->minpoll |
1708 | 0 | || pkt->ppoll > peer->maxpoll) { |
1709 | 0 | msyslog(LOG_INFO, "receive: broadcast poll of %u from %s is out-of-range (%d to %d)!", |
1710 | 0 | pkt->ppoll, stoa(&rbufp->recv_srcadr), |
1711 | 0 | peer->minpoll, peer->maxpoll); |
1712 | 0 | ++bail; |
1713 | 0 | } |
1714 | | |
1715 | | /* too early? worth an error, too! |
1716 | | * |
1717 | | * [Bug 3113] Ensure that at least one poll |
1718 | | * interval has elapsed since the last **clean** |
1719 | | * packet was received. We limit the check to |
1720 | | * **clean** packets to prevent replayed packets |
1721 | | * and incorrectly authenticated packets, which |
1722 | | * we'll discard, from being used to create a |
1723 | | * denial of service condition. |
1724 | | */ |
1725 | 0 | deadband = (1u << pkt->ppoll); |
1726 | 0 | if (FLAG_BC_VOL & peer->flags) |
1727 | 0 | deadband -= 3; /* allow greater fuzz after volley */ |
1728 | 0 | if ((current_time - peer->timereceived) < deadband) { |
1729 | 0 | msyslog(LOG_INFO, "receive: broadcast packet from %s arrived after %lu, not %lu seconds!", |
1730 | 0 | stoa(&rbufp->recv_srcadr), |
1731 | 0 | (current_time - peer->timereceived), |
1732 | 0 | deadband); |
1733 | 0 | ++bail; |
1734 | 0 | } |
1735 | | |
1736 | | /* Alert if time from the server is non-monotonic. |
1737 | | * |
1738 | | * [Bug 3114] is about Broadcast mode replay DoS. |
1739 | | * |
1740 | | * Broadcast mode *assumes* a trusted network. |
1741 | | * Even so, it's nice to be robust in the face |
1742 | | * of attacks. |
1743 | | * |
1744 | | * If we get an authenticated broadcast packet |
1745 | | * with an "earlier" timestamp, it means one of |
1746 | | * two things: |
1747 | | * |
1748 | | * - the broadcast server had a backward step. |
1749 | | * |
1750 | | * - somebody is trying a replay attack. |
1751 | | * |
1752 | | * deadband: By default, we assume the broadcast |
1753 | | * network is trustable, so we take our accepted |
1754 | | * broadcast packets as we receive them. But |
1755 | | * some folks might want to take additional poll |
1756 | | * delays before believing a backward step. |
1757 | | */ |
1758 | 0 | if (sys_bcpollbstep) { |
1759 | | /* pkt->ppoll or peer->ppoll ? */ |
1760 | 0 | deadband = (1u << pkt->ppoll) |
1761 | 0 | * sys_bcpollbstep + 2; |
1762 | 0 | } else { |
1763 | 0 | deadband = 0; |
1764 | 0 | } |
1765 | |
|
1766 | 0 | if (L_ISZERO(&peer->bxmt)) { |
1767 | 0 | tdiff.l_ui = tdiff.l_uf = 0; |
1768 | 0 | } else { |
1769 | 0 | tdiff = p_xmt; |
1770 | 0 | L_SUB(&tdiff, &peer->bxmt); |
1771 | 0 | } |
1772 | 0 | if ( tdiff.l_i < 0 |
1773 | 0 | && (current_time - peer->timereceived) < deadband) |
1774 | 0 | { |
1775 | 0 | msyslog(LOG_INFO, "receive: broadcast packet from %s contains non-monotonic timestamp: %#010x.%08x -> %#010x.%08x", |
1776 | 0 | stoa(&rbufp->recv_srcadr), |
1777 | 0 | peer->bxmt.l_ui, peer->bxmt.l_uf, |
1778 | 0 | p_xmt.l_ui, p_xmt.l_uf |
1779 | 0 | ); |
1780 | 0 | ++bail; |
1781 | 0 | } |
1782 | |
|
1783 | 0 | if (bail) { |
1784 | 0 | DPRINTF(2, ("receive: AM_PROCPKT drop: bail\n")); |
1785 | 0 | peer->timelastrec = current_time; |
1786 | 0 | sys_declined++; |
1787 | 0 | return; |
1788 | 0 | } |
1789 | 0 | } |
1790 | | |
1791 | 0 | break; |
1792 | | |
1793 | | /* |
1794 | | * A passive packet matches a passive association. This is |
1795 | | * usually the result of reconfiguring a client on the fly. As |
1796 | | * this association might be legitimate and this packet an |
1797 | | * attempt to deny service, just ignore it. |
1798 | | */ |
1799 | 0 | case AM_ERR: |
1800 | 0 | DPRINTF(2, ("receive: AM_ERR drop.\n")); |
1801 | 0 | sys_declined++; |
1802 | 0 | return; |
1803 | | |
1804 | | /* |
1805 | | * For everything else there is the bit bucket. |
1806 | | */ |
1807 | 4 | default: |
1808 | 4 | DPRINTF(2, ("receive: default drop.\n")); |
1809 | 4 | sys_declined++; |
1810 | 4 | return; |
1811 | 175 | } |
1812 | | |
1813 | | #ifdef AUTOKEY |
1814 | | /* |
1815 | | * If the association is configured for Autokey, the packet must |
1816 | | * have a public key ID; if not, the packet must have a |
1817 | | * symmetric key ID. |
1818 | | */ |
1819 | | if ( is_authentic != AUTH_CRYPTO |
1820 | | && ( ((peer->flags & FLAG_SKEY) && skeyid <= NTP_MAXKEY) |
1821 | | || (!(peer->flags & FLAG_SKEY) && skeyid > NTP_MAXKEY))) { |
1822 | | DPRINTF(2, ("receive: drop: Autokey but wrong/bad auth\n")); |
1823 | | sys_badauth++; |
1824 | | return; |
1825 | | } |
1826 | | #endif /* AUTOKEY */ |
1827 | | |
1828 | 0 | peer->received++; |
1829 | 0 | peer->flash &= ~PKT_TEST_MASK; |
1830 | 0 | if (peer->flags & FLAG_XBOGUS) { |
1831 | 0 | peer->flags &= ~FLAG_XBOGUS; |
1832 | 0 | peer->flash |= TEST3; |
1833 | 0 | } |
1834 | | |
1835 | | /* |
1836 | | * Next comes a rigorous schedule of timestamp checking. If the |
1837 | | * transmit timestamp is zero, the server has not initialized in |
1838 | | * interleaved modes or is horribly broken. |
1839 | | * |
1840 | | * A KoD packet we pay attention to cannot have a 0 transmit |
1841 | | * timestamp. |
1842 | | */ |
1843 | |
|
1844 | 0 | kissCode = kiss_code_check(hisleap, hisstratum, hismode, pkt->refid); |
1845 | |
|
1846 | 0 | if (L_ISZERO(&p_xmt)) { |
1847 | 0 | peer->flash |= TEST3; /* unsynch */ |
1848 | 0 | if (kissCode != NOKISS) { /* KoD packet */ |
1849 | 0 | peer->bogusorg++; /* for TEST2 or TEST3 */ |
1850 | 0 | msyslog(LOG_INFO, |
1851 | 0 | "receive: Unexpected zero transmit timestamp in KoD from %s", |
1852 | 0 | ntoa(&peer->srcadr)); |
1853 | 0 | return; |
1854 | 0 | } |
1855 | | |
1856 | | /* |
1857 | | * If the transmit timestamp duplicates our previous one, the |
1858 | | * packet is a replay. This prevents the bad guys from replaying |
1859 | | * the most recent packet, authenticated or not. |
1860 | | */ |
1861 | 0 | } else if (L_ISEQU(&peer->xmt, &p_xmt)) { |
1862 | 0 | DPRINTF(2, ("receive: drop: Duplicate xmit\n")); |
1863 | 0 | peer->flash |= TEST1; /* duplicate */ |
1864 | 0 | peer->oldpkt++; |
1865 | 0 | return; |
1866 | | |
1867 | | /* |
1868 | | * If this is a broadcast mode packet, make sure hisstratum |
1869 | | * is appropriate. Don't do anything else here - we wait to |
1870 | | * see if this is an interleave broadcast packet until after |
1871 | | * we've validated the MAC that SHOULD be provided. |
1872 | | * |
1873 | | * hisstratum cannot be 0 - see assertion above. |
1874 | | * If hisstratum is 15, then we'll advertise as UNSPEC but |
1875 | | * at least we'll be able to sync with the broadcast server. |
1876 | | */ |
1877 | 0 | } else if (hismode == MODE_BROADCAST) { |
1878 | | /* 0 is unexpected too, and impossible */ |
1879 | 0 | if (STRATUM_UNSPEC <= hisstratum) { |
1880 | | /* Is this a ++sys_declined or ??? */ |
1881 | 0 | msyslog(LOG_INFO, |
1882 | 0 | "receive: Unexpected stratum (%d) in broadcast from %s", |
1883 | 0 | hisstratum, ntoa(&peer->srcadr)); |
1884 | 0 | return; |
1885 | 0 | } |
1886 | | |
1887 | | /* |
1888 | | * Basic KoD validation checking: |
1889 | | * |
1890 | | * KoD packets are a mixed-blessing. Forged KoD packets |
1891 | | * are DoS attacks. There are rare situations where we might |
1892 | | * get a valid KoD response, though. Since KoD packets are |
1893 | | * a special case that complicate the checks we do next, we |
1894 | | * handle the basic KoD checks here. |
1895 | | * |
1896 | | * Note that we expect the incoming KoD packet to have its |
1897 | | * (nonzero) org, rec, and xmt timestamps set to the xmt timestamp |
1898 | | * that we have previously sent out. Watch interleave mode. |
1899 | | */ |
1900 | 0 | } else if (kissCode != NOKISS) { |
1901 | 0 | DEBUG_INSIST(!L_ISZERO(&p_xmt)); |
1902 | | if ( L_ISZERO(&p_org) /* We checked p_xmt above */ |
1903 | 0 | || L_ISZERO(&p_rec)) { |
1904 | 0 | peer->bogusorg++; |
1905 | 0 | msyslog(LOG_INFO, |
1906 | 0 | "receive: KoD packet from %s has a zero org or rec timestamp. Ignoring.", |
1907 | 0 | ntoa(&peer->srcadr)); |
1908 | 0 | return; |
1909 | 0 | } |
1910 | | |
1911 | 0 | if ( !L_ISEQU(&p_xmt, &p_org) |
1912 | 0 | || !L_ISEQU(&p_xmt, &p_rec)) { |
1913 | 0 | peer->bogusorg++; |
1914 | 0 | msyslog(LOG_INFO, |
1915 | 0 | "receive: KoD packet from %s has inconsistent xmt/org/rec timestamps. Ignoring.", |
1916 | 0 | ntoa(&peer->srcadr)); |
1917 | 0 | return; |
1918 | 0 | } |
1919 | | |
1920 | | /* Be conservative */ |
1921 | 0 | if (peer->flip == 0 && !L_ISEQU(&p_org, &peer->aorg)) { |
1922 | 0 | peer->bogusorg++; |
1923 | 0 | msyslog(LOG_INFO, |
1924 | 0 | "receive: flip 0 KoD origin timestamp %#010x.%08x from %s does not match %#010x.%08x - ignoring.", |
1925 | 0 | p_org.l_ui, p_org.l_uf, |
1926 | 0 | ntoa(&peer->srcadr), |
1927 | 0 | peer->aorg.l_ui, peer->aorg.l_uf); |
1928 | 0 | return; |
1929 | 0 | } else if (peer->flip == 1 && !L_ISEQU(&p_org, &peer->borg)) { |
1930 | 0 | peer->bogusorg++; |
1931 | 0 | msyslog(LOG_INFO, |
1932 | 0 | "receive: flip 1 KoD origin timestamp %#010x.%08x from %s does not match interleave %#010x.%08x - ignoring.", |
1933 | 0 | p_org.l_ui, p_org.l_uf, |
1934 | 0 | ntoa(&peer->srcadr), |
1935 | 0 | peer->borg.l_ui, peer->borg.l_uf); |
1936 | 0 | return; |
1937 | 0 | } |
1938 | | |
1939 | | /* |
1940 | | * Basic mode checks: |
1941 | | * |
1942 | | * If there is no origin timestamp, it's either an initial packet |
1943 | | * or we've already received a response to our query. Of course, |
1944 | | * should 'aorg' be all-zero because this really was the original |
1945 | | * transmit timestamp, we'll ignore this reply. There is a window |
1946 | | * of one nanosecond once every 136 years' time where this is |
1947 | | * possible. We currently ignore this situation, as a completely |
1948 | | * zero timestamp is (quietly?) disallowed. |
1949 | | * |
1950 | | * Otherwise, check for bogus packet in basic mode. |
1951 | | * If it is bogus, switch to interleaved mode and resynchronize, |
1952 | | * but only after confirming the packet is not bogus in |
1953 | | * symmetric interleaved mode. |
1954 | | * |
1955 | | * This could also mean somebody is forging packets claiming to |
1956 | | * be from us, attempting to cause our server to KoD us. |
1957 | | * |
1958 | | * We have earlier asserted that hisstratum cannot be 0. |
1959 | | * If hisstratum is STRATUM_UNSPEC, it means he's not sync'd. |
1960 | | */ |
1961 | 0 | } else if (peer->flip == 0) { |
1962 | 0 | if (0) { |
1963 | 0 | } else if (L_ISZERO(&p_org)) { |
1964 | 0 | const char *action; |
1965 | |
|
1966 | | #ifdef BUG3361 |
1967 | | msyslog(LOG_INFO, |
1968 | | "receive: BUG 3361: Clearing peer->aorg "); |
1969 | | L_CLR(&peer->aorg); |
1970 | | #endif |
1971 | | /**/ |
1972 | 0 | switch (hismode) { |
1973 | | /* We allow 0org for: */ |
1974 | 0 | case UCHAR_MAX: |
1975 | 0 | action = "Allow"; |
1976 | 0 | break; |
1977 | | /* We disallow 0org for: */ |
1978 | 0 | case MODE_UNSPEC: |
1979 | 0 | case MODE_ACTIVE: |
1980 | 0 | case MODE_PASSIVE: |
1981 | 0 | case MODE_CLIENT: |
1982 | 0 | case MODE_SERVER: |
1983 | 0 | case MODE_BROADCAST: |
1984 | 0 | action = "Drop"; |
1985 | 0 | peer->bogusorg++; |
1986 | 0 | peer->flash |= TEST2; /* bogus */ |
1987 | 0 | break; |
1988 | 0 | default: |
1989 | 0 | action = ""; /* for cranky compilers / MSVC */ |
1990 | 0 | INSIST(!"receive(): impossible hismode"); |
1991 | 0 | break; |
1992 | 0 | } |
1993 | | /**/ |
1994 | 0 | msyslog(LOG_INFO, |
1995 | 0 | "receive: %s 0 origin timestamp from %s@%s xmt %#010x.%08x", |
1996 | 0 | action, hm_str, ntoa(&peer->srcadr), |
1997 | 0 | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)); |
1998 | 0 | } else if (!L_ISEQU(&p_org, &peer->aorg)) { |
1999 | | /* are there cases here where we should bail? */ |
2000 | | /* Should we set TEST2 if we decide to try xleave? */ |
2001 | 0 | peer->bogusorg++; |
2002 | 0 | peer->flash |= TEST2; /* bogus */ |
2003 | 0 | msyslog(LOG_INFO, |
2004 | 0 | "receive: Unexpected origin timestamp %#010x.%08x does not match aorg %#010x.%08x from %s@%s xmt %#010x.%08x", |
2005 | 0 | ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), |
2006 | 0 | peer->aorg.l_ui, peer->aorg.l_uf, |
2007 | 0 | hm_str, ntoa(&peer->srcadr), |
2008 | 0 | ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)); |
2009 | 0 | if ( !L_ISZERO(&peer->dst) |
2010 | 0 | && L_ISEQU(&p_org, &peer->dst)) { |
2011 | | /* Might be the start of an interleave */ |
2012 | 0 | if (dynamic_interleave) { |
2013 | 0 | peer->flip = 1; |
2014 | 0 | report_event(PEVNT_XLEAVE, peer, NULL); |
2015 | 0 | } else { |
2016 | 0 | msyslog(LOG_INFO, |
2017 | 0 | "receive: Dynamic interleave from %s@%s denied", |
2018 | 0 | hm_str, ntoa(&peer->srcadr)); |
2019 | 0 | } |
2020 | 0 | } |
2021 | 0 | } else { |
2022 | 0 | L_CLR(&peer->aorg); |
2023 | 0 | } |
2024 | | |
2025 | | /* |
2026 | | * Check for valid nonzero timestamp fields. |
2027 | | */ |
2028 | 0 | } else if ( L_ISZERO(&p_org) |
2029 | 0 | || L_ISZERO(&p_rec) |
2030 | 0 | || L_ISZERO(&peer->dst)) { |
2031 | 0 | peer->flash |= TEST3; /* unsynch */ |
2032 | | |
2033 | | /* |
2034 | | * Check for bogus packet in interleaved symmetric mode. This |
2035 | | * can happen if a packet is lost, duplicated or crossed. If |
2036 | | * found, flip and resynchronize. |
2037 | | */ |
2038 | 0 | } else if ( !L_ISZERO(&peer->dst) |
2039 | 0 | && !L_ISEQU(&p_org, &peer->dst)) { |
2040 | 0 | DPRINTF(2, ("receive: drop: Bogus packet in interleaved symmetric mode\n")); |
2041 | 0 | peer->bogusorg++; |
2042 | 0 | peer->flags |= FLAG_XBOGUS; |
2043 | 0 | peer->flash |= TEST2; /* bogus */ |
2044 | | #ifdef BUG3453 |
2045 | | return; /* Bogus packet, we are done */ |
2046 | | #endif |
2047 | 0 | } |
2048 | | |
2049 | | /**/ |
2050 | | |
2051 | | /* |
2052 | | * If this is a crypto_NAK, the server cannot authenticate a |
2053 | | * client packet. The server might have just changed keys. Clear |
2054 | | * the association and restart the protocol. |
2055 | | */ |
2056 | 0 | if (crypto_nak_test == VALIDNAK) { |
2057 | 0 | report_event(PEVNT_AUTH, peer, "crypto_NAK"); |
2058 | 0 | peer->flash |= TEST5; /* bad auth */ |
2059 | 0 | peer->badauth++; |
2060 | 0 | if (peer->flags & FLAG_PREEMPT) { |
2061 | 0 | if (unpeer_crypto_nak_early) { |
2062 | 0 | unpeer(peer); |
2063 | 0 | } |
2064 | 0 | DPRINTF(2, ("receive: drop: PREEMPT crypto_NAK\n")); |
2065 | 0 | return; |
2066 | 0 | } |
2067 | | #ifdef AUTOKEY |
2068 | | if (peer->crypto) { |
2069 | | peer_clear(peer, "AUTH"); |
2070 | | } |
2071 | | #endif /* AUTOKEY */ |
2072 | 0 | DPRINTF(2, ("receive: drop: crypto_NAK\n")); |
2073 | 0 | return; |
2074 | | |
2075 | | /* |
2076 | | * If the digest fails or it's missing for authenticated |
2077 | | * associations, the client cannot authenticate a server |
2078 | | * reply to a client packet previously sent. The loopback check |
2079 | | * is designed to avoid a bait-and-switch attack, which was |
2080 | | * possible in past versions. If symmetric modes, return a |
2081 | | * crypto-NAK. The peer should restart the protocol. |
2082 | | */ |
2083 | 0 | } else if (!AUTH(peer->keyid || has_mac || |
2084 | 0 | (restrict_mask & RES_DONTTRUST), is_authentic)) { |
2085 | |
|
2086 | 0 | if (peer->flash & PKT_TEST_MASK) { |
2087 | 0 | msyslog(LOG_INFO, |
2088 | 0 | "receive: Bad auth in packet with bad timestamps from %s denied - spoof?", |
2089 | 0 | ntoa(&peer->srcadr)); |
2090 | 0 | return; |
2091 | 0 | } |
2092 | | |
2093 | 0 | report_event(PEVNT_AUTH, peer, "digest"); |
2094 | 0 | peer->flash |= TEST5; /* bad auth */ |
2095 | 0 | peer->badauth++; |
2096 | 0 | if ( has_mac |
2097 | 0 | && ( hismode == MODE_ACTIVE |
2098 | 0 | || hismode == MODE_PASSIVE)) |
2099 | 0 | fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask); |
2100 | 0 | if (peer->flags & FLAG_PREEMPT) { |
2101 | 0 | if (unpeer_digest_early) { |
2102 | 0 | unpeer(peer); |
2103 | 0 | } |
2104 | 0 | } |
2105 | | #ifdef AUTOKEY |
2106 | | else if (peer_clear_digest_early && peer->crypto) { |
2107 | | peer_clear(peer, "AUTH"); |
2108 | | } |
2109 | | #endif /* AUTOKEY */ |
2110 | 0 | DPRINTF(2, ("receive: drop: Bad or missing AUTH\n")); |
2111 | 0 | return; |
2112 | 0 | } |
2113 | | |
2114 | | /* |
2115 | | * For broadcast packets: |
2116 | | * |
2117 | | * HMS: This next line never made much sense to me, even |
2118 | | * when it was up higher: |
2119 | | * If an initial volley, bail out now and let the |
2120 | | * client do its stuff. |
2121 | | * |
2122 | | * If the packet has not failed authentication, then |
2123 | | * - if the origin timestamp is nonzero this is an |
2124 | | * interleaved broadcast, so restart the protocol. |
2125 | | * - else, this is not an interleaved broadcast packet. |
2126 | | */ |
2127 | 0 | if (hismode == MODE_BROADCAST) { |
2128 | 0 | if ( is_authentic == AUTH_OK |
2129 | 0 | || is_authentic == AUTH_NONE) { |
2130 | 0 | if (!L_ISZERO(&p_org)) { |
2131 | 0 | if (!(peer->flags & FLAG_XB)) { |
2132 | 0 | msyslog(LOG_INFO, |
2133 | 0 | "receive: Broadcast server at %s is in interleave mode", |
2134 | 0 | ntoa(&peer->srcadr)); |
2135 | 0 | peer->flags |= FLAG_XB; |
2136 | 0 | peer->aorg = p_xmt; |
2137 | 0 | peer->borg = rbufp->recv_time; |
2138 | 0 | report_event(PEVNT_XLEAVE, peer, NULL); |
2139 | 0 | return; |
2140 | 0 | } |
2141 | 0 | } else if (peer->flags & FLAG_XB) { |
2142 | 0 | msyslog(LOG_INFO, |
2143 | 0 | "receive: Broadcast server at %s is no longer in interleave mode", |
2144 | 0 | ntoa(&peer->srcadr)); |
2145 | 0 | peer->flags &= ~FLAG_XB; |
2146 | 0 | } |
2147 | 0 | } else { |
2148 | 0 | msyslog(LOG_INFO, |
2149 | 0 | "receive: Bad broadcast auth (%d) from %s", |
2150 | 0 | is_authentic, ntoa(&peer->srcadr)); |
2151 | 0 | } |
2152 | | |
2153 | | /* |
2154 | | * Now that we know the packet is correctly authenticated, |
2155 | | * update peer->bxmt. |
2156 | | */ |
2157 | 0 | peer->bxmt = p_xmt; |
2158 | 0 | } |
2159 | | |
2160 | | |
2161 | | /* |
2162 | | ** Update the state variables. |
2163 | | */ |
2164 | 0 | if (peer->flip == 0) { |
2165 | 0 | if (hismode != MODE_BROADCAST) |
2166 | 0 | peer->rec = p_xmt; |
2167 | 0 | peer->dst = rbufp->recv_time; |
2168 | 0 | } |
2169 | 0 | peer->xmt = p_xmt; |
2170 | | |
2171 | | /* |
2172 | | * Set the peer ppoll to the maximum of the packet ppoll and the |
2173 | | * peer minpoll. If a kiss-o'-death, set the peer minpoll to |
2174 | | * this maximum and advance the headway to give the sender some |
2175 | | * headroom. Very intricate. |
2176 | | */ |
2177 | | |
2178 | | /* |
2179 | | * Check for any kiss codes. Note this is only used when a server |
2180 | | * responds to a packet request. |
2181 | | */ |
2182 | | |
2183 | | /* |
2184 | | * Check to see if this is a RATE Kiss Code |
2185 | | * Currently this kiss code will accept whatever poll |
2186 | | * rate that the server sends |
2187 | | */ |
2188 | 0 | peer->ppoll = max(peer->minpoll, pkt->ppoll); |
2189 | 0 | if (kissCode == RATEKISS) { |
2190 | 0 | peer->selbroken++; /* Increment the KoD count */ |
2191 | 0 | report_event(PEVNT_RATE, peer, NULL); |
2192 | 0 | if (pkt->ppoll > peer->minpoll) |
2193 | 0 | peer->minpoll = peer->ppoll; |
2194 | 0 | peer->burst = peer->retry = 0; |
2195 | 0 | peer->throttle = (NTP_SHIFT + 1) * (1 << peer->minpoll); |
2196 | 0 | poll_update(peer, pkt->ppoll); |
2197 | 0 | return; /* kiss-o'-death */ |
2198 | 0 | } |
2199 | 0 | if (kissCode != NOKISS) { |
2200 | 0 | peer->selbroken++; /* Increment the KoD count */ |
2201 | 0 | return; /* Drop any other kiss code packets */ |
2202 | 0 | } |
2203 | | |
2204 | | |
2205 | | /* |
2206 | | * XXX |
2207 | | */ |
2208 | | |
2209 | | |
2210 | | /* |
2211 | | * If: |
2212 | | * - this is a *cast (uni-, broad-, or m-) server packet |
2213 | | * - and it's symmetric-key authenticated |
2214 | | * then see if the sender's IP is trusted for this keyid. |
2215 | | * If it is, great - nothing special to do here. |
2216 | | * Otherwise, we should report and bail. |
2217 | | * |
2218 | | * Autokey-authenticated packets are accepted. |
2219 | | */ |
2220 | | |
2221 | 0 | switch (hismode) { |
2222 | 0 | case MODE_SERVER: /* server mode */ |
2223 | 0 | case MODE_BROADCAST: /* broadcast mode */ |
2224 | 0 | case MODE_ACTIVE: /* symmetric active mode */ |
2225 | 0 | case MODE_PASSIVE: /* symmetric passive mode */ |
2226 | 0 | if ( is_authentic == AUTH_OK |
2227 | 0 | && skeyid |
2228 | 0 | && skeyid <= NTP_MAXKEY |
2229 | 0 | && !authistrustedip(skeyid, &peer->srcadr)) { |
2230 | 0 | report_event(PEVNT_AUTH, peer, "authIP"); |
2231 | 0 | peer->badauth++; |
2232 | 0 | return; |
2233 | 0 | } |
2234 | 0 | break; |
2235 | | |
2236 | 0 | case MODE_CLIENT: /* client mode */ |
2237 | | #if 0 /* At this point, MODE_CONTROL is overloaded by MODE_BCLIENT */ |
2238 | | case MODE_CONTROL: /* control mode */ |
2239 | | #endif |
2240 | 0 | case MODE_PRIVATE: /* private mode */ |
2241 | 0 | case MODE_BCLIENT: /* broadcast client mode */ |
2242 | 0 | break; |
2243 | | |
2244 | 0 | case MODE_UNSPEC: /* unspecified (old version) */ |
2245 | 0 | default: |
2246 | 0 | msyslog(LOG_INFO, |
2247 | 0 | "receive: Unexpected mode (%d) in packet from %s", |
2248 | 0 | hismode, ntoa(&peer->srcadr)); |
2249 | 0 | break; |
2250 | 0 | } |
2251 | | |
2252 | | |
2253 | | /* |
2254 | | * That was hard and I am sweaty, but the packet is squeaky |
2255 | | * clean. Get on with real work. |
2256 | | */ |
2257 | 0 | peer->timereceived = current_time; |
2258 | 0 | peer->timelastrec = current_time; |
2259 | 0 | if (is_authentic == AUTH_OK) |
2260 | 0 | peer->flags |= FLAG_AUTHENTIC; |
2261 | 0 | else |
2262 | 0 | peer->flags &= ~FLAG_AUTHENTIC; |
2263 | |
|
2264 | | #ifdef AUTOKEY |
2265 | | /* |
2266 | | * More autokey dance. The rules of the cha-cha are as follows: |
2267 | | * |
2268 | | * 1. If there is no key or the key is not auto, do nothing. |
2269 | | * |
2270 | | * 2. If this packet is in response to the one just previously |
2271 | | * sent or from a broadcast server, do the extension fields. |
2272 | | * Otherwise, assume bogosity and bail out. |
2273 | | * |
2274 | | * 3. If an extension field contains a verified signature, it is |
2275 | | * self-authenticated and we sit the dance. |
2276 | | * |
2277 | | * 4. If this is a server reply, check only to see that the |
2278 | | * transmitted key ID matches the received key ID. |
2279 | | * |
2280 | | * 5. Check to see that one or more hashes of the current key ID |
2281 | | * matches the previous key ID or ultimate original key ID |
2282 | | * obtained from the broadcaster or symmetric peer. If no |
2283 | | * match, sit the dance and call for new autokey values. |
2284 | | * |
2285 | | * In case of crypto error, fire the orchestra, stop dancing and |
2286 | | * restart the protocol. |
2287 | | */ |
2288 | | if (peer->flags & FLAG_SKEY) { |
2289 | | /* |
2290 | | * Decrement remaining autokey hashes. This isn't |
2291 | | * perfect if a packet is lost, but results in no harm. |
2292 | | */ |
2293 | | ap = (struct autokey *)peer->recval.ptr; |
2294 | | if (ap != NULL) { |
2295 | | if (ap->seq > 0) |
2296 | | ap->seq--; |
2297 | | } |
2298 | | peer->flash |= TEST8; |
2299 | | rval = crypto_recv(peer, rbufp); |
2300 | | if (rval == XEVNT_OK) { |
2301 | | peer->unreach = 0; |
2302 | | } else { |
2303 | | if (rval == XEVNT_ERR) { |
2304 | | report_event(PEVNT_RESTART, peer, |
2305 | | "crypto error"); |
2306 | | peer_clear(peer, "CRYP"); |
2307 | | peer->flash |= TEST9; /* bad crypt */ |
2308 | | if (peer->flags & FLAG_PREEMPT) { |
2309 | | if (unpeer_crypto_early) { |
2310 | | unpeer(peer); |
2311 | | } |
2312 | | } |
2313 | | } |
2314 | | return; |
2315 | | } |
2316 | | |
2317 | | /* |
2318 | | * If server mode, verify the receive key ID matches |
2319 | | * the transmit key ID. |
2320 | | */ |
2321 | | if (hismode == MODE_SERVER) { |
2322 | | if (skeyid == peer->keyid) |
2323 | | peer->flash &= ~TEST8; |
2324 | | |
2325 | | /* |
2326 | | * If an extension field is present, verify only that it |
2327 | | * has been correctly signed. We don't need a sequence |
2328 | | * check here, but the sequence continues. |
2329 | | */ |
2330 | | } else if (!(peer->flash & TEST8)) { |
2331 | | peer->pkeyid = skeyid; |
2332 | | |
2333 | | /* |
2334 | | * Now the fun part. Here, skeyid is the current ID in |
2335 | | * the packet, pkeyid is the ID in the last packet and |
2336 | | * tkeyid is the hash of skeyid. If the autokey values |
2337 | | * have not been received, this is an automatic error. |
2338 | | * If so, check that the tkeyid matches pkeyid. If not, |
2339 | | * hash tkeyid and try again. If the number of hashes |
2340 | | * exceeds the number remaining in the sequence, declare |
2341 | | * a successful failure and refresh the autokey values. |
2342 | | */ |
2343 | | } else if (ap != NULL) { |
2344 | | int i; |
2345 | | |
2346 | | for (i = 0; ; i++) { |
2347 | | if ( tkeyid == peer->pkeyid |
2348 | | || tkeyid == ap->key) { |
2349 | | peer->flash &= ~TEST8; |
2350 | | peer->pkeyid = skeyid; |
2351 | | ap->seq -= i; |
2352 | | break; |
2353 | | } |
2354 | | if (i > ap->seq) { |
2355 | | peer->crypto &= |
2356 | | ~CRYPTO_FLAG_AUTO; |
2357 | | break; |
2358 | | } |
2359 | | tkeyid = session_key( |
2360 | | &rbufp->recv_srcadr, dstadr_sin, |
2361 | | tkeyid, pkeyid, 0); |
2362 | | } |
2363 | | if (peer->flash & TEST8) |
2364 | | report_event(PEVNT_AUTH, peer, "keylist"); |
2365 | | } |
2366 | | if (!(peer->crypto & CRYPTO_FLAG_PROV)) /* test 9 */ |
2367 | | peer->flash |= TEST8; /* bad autokey */ |
2368 | | |
2369 | | /* |
2370 | | * The maximum lifetime of the protocol is about one |
2371 | | * week before restarting the Autokey protocol to |
2372 | | * refresh certificates and leapseconds values. |
2373 | | */ |
2374 | | if (current_time > peer->refresh) { |
2375 | | report_event(PEVNT_RESTART, peer, |
2376 | | "crypto refresh"); |
2377 | | peer_clear(peer, "TIME"); |
2378 | | return; |
2379 | | } |
2380 | | } |
2381 | | #endif /* AUTOKEY */ |
2382 | | |
2383 | | /* |
2384 | | * The dance is complete and the flash bits have been lit. Toss |
2385 | | * the packet over the fence for processing, which may light up |
2386 | | * more flashers. |
2387 | | */ |
2388 | 0 | process_packet(peer, pkt, rbufp->recv_length); |
2389 | | |
2390 | | /* |
2391 | | * In interleaved mode update the state variables. Also adjust the |
2392 | | * transmit phase to avoid crossover. |
2393 | | */ |
2394 | 0 | if (peer->flip != 0) { |
2395 | 0 | peer->rec = p_rec; |
2396 | 0 | peer->dst = rbufp->recv_time; |
2397 | 0 | if (peer->nextdate - current_time < (1U << min(peer->ppoll, |
2398 | 0 | peer->hpoll)) / 2) |
2399 | 0 | peer->nextdate++; |
2400 | 0 | else |
2401 | 0 | peer->nextdate--; |
2402 | 0 | } |
2403 | 0 | } |
2404 | | |
2405 | | |
2406 | | /* |
2407 | | * process_packet - Packet Procedure, a la Section 3.4.4 of RFC-1305 |
2408 | | * Or almost, at least. If we're in here we have a reasonable |
2409 | | * expectation that we will be having a long term |
2410 | | * relationship with this host. |
2411 | | */ |
2412 | | void |
2413 | | process_packet( |
2414 | | register struct peer *peer, |
2415 | | register struct pkt *pkt, |
2416 | | u_int len |
2417 | | ) |
2418 | 0 | { |
2419 | 0 | double t34, t21; |
2420 | 0 | double p_offset, p_del, p_disp; |
2421 | 0 | l_fp p_rec, p_xmt, p_org, p_reftime, ci; |
2422 | 0 | u_char pmode, pleap, pversion, pstratum; |
2423 | 0 | char statstr[NTP_MAXSTRLEN]; |
2424 | | #ifdef ASSYM |
2425 | | int itemp; |
2426 | | double etemp, ftemp, td; |
2427 | | #endif /* ASSYM */ |
2428 | |
|
2429 | | #if 0 |
2430 | | sys_processed++; |
2431 | | peer->processed++; |
2432 | | #endif |
2433 | 0 | p_del = FPTOD(NTOHS_FP(pkt->rootdelay)); |
2434 | 0 | p_offset = 0; |
2435 | 0 | p_disp = FPTOD(NTOHS_FP(pkt->rootdisp)); |
2436 | 0 | NTOHL_FP(&pkt->reftime, &p_reftime); |
2437 | 0 | NTOHL_FP(&pkt->org, &p_org); |
2438 | 0 | NTOHL_FP(&pkt->rec, &p_rec); |
2439 | 0 | NTOHL_FP(&pkt->xmt, &p_xmt); |
2440 | 0 | pmode = PKT_MODE(pkt->li_vn_mode); |
2441 | 0 | pleap = PKT_LEAP(pkt->li_vn_mode); |
2442 | 0 | pversion = PKT_VERSION(pkt->li_vn_mode); |
2443 | 0 | pstratum = PKT_TO_STRATUM(pkt->stratum); |
2444 | | |
2445 | | /**/ |
2446 | | |
2447 | | /**/ |
2448 | | |
2449 | | /* |
2450 | | * Verify the server is synchronized; that is, the leap bits, |
2451 | | * stratum and root distance are valid. |
2452 | | */ |
2453 | | if ( pleap == LEAP_NOTINSYNC /* test 6 */ |
2454 | 0 | || pstratum < sys_floor || pstratum >= sys_ceiling) |
2455 | 0 | peer->flash |= TEST6; /* bad synch or strat */ |
2456 | 0 | if (p_del / 2 + p_disp >= MAXDISPERSE) /* test 7 */ |
2457 | 0 | peer->flash |= TEST7; /* bad header */ |
2458 | | |
2459 | | /* |
2460 | | * If any tests fail at this point, the packet is discarded. |
2461 | | * Note that some flashers may have already been set in the |
2462 | | * receive() routine. |
2463 | | */ |
2464 | 0 | if (peer->flash & PKT_TEST_MASK) { |
2465 | 0 | peer->seldisptoolarge++; |
2466 | 0 | DPRINTF(1, ("packet: flash header %04x\n", |
2467 | 0 | peer->flash)); |
2468 | 0 | poll_update(peer, peer->hpoll); /* ppoll updated? */ |
2469 | 0 | return; |
2470 | 0 | } |
2471 | | |
2472 | | /**/ |
2473 | | |
2474 | 0 | #if 1 |
2475 | 0 | sys_processed++; |
2476 | 0 | peer->processed++; |
2477 | 0 | #endif |
2478 | | |
2479 | | /* |
2480 | | * Capture the header values in the client/peer association.. |
2481 | | */ |
2482 | 0 | record_raw_stats(&peer->srcadr, |
2483 | 0 | peer->dstadr ? &peer->dstadr->sin : NULL, |
2484 | 0 | &p_org, &p_rec, &p_xmt, &peer->dst, |
2485 | 0 | pleap, pversion, pmode, pstratum, pkt->ppoll, pkt->precision, |
2486 | 0 | p_del, p_disp, pkt->refid, |
2487 | 0 | len - MIN_V4_PKT_LEN, (u_char *)&pkt->exten); |
2488 | 0 | peer->leap = pleap; |
2489 | 0 | peer->stratum = min(pstratum, STRATUM_UNSPEC); |
2490 | 0 | peer->pmode = pmode; |
2491 | 0 | peer->precision = pkt->precision; |
2492 | 0 | peer->rootdelay = p_del; |
2493 | 0 | peer->rootdisp = p_disp; |
2494 | 0 | peer->refid = pkt->refid; /* network byte order */ |
2495 | 0 | peer->reftime = p_reftime; |
2496 | | |
2497 | | /* |
2498 | | * First, if either burst mode is armed, enable the burst. |
2499 | | * Compute the headway for the next packet and delay if |
2500 | | * necessary to avoid exceeding the threshold. |
2501 | | */ |
2502 | 0 | if (peer->retry > 0) { |
2503 | 0 | peer->retry = 0; |
2504 | 0 | if (peer->reach) |
2505 | 0 | peer->burst = min(1 << (peer->hpoll - |
2506 | 0 | peer->minpoll), NTP_SHIFT) - 1; |
2507 | 0 | else |
2508 | 0 | peer->burst = NTP_IBURST - 1; |
2509 | 0 | if (peer->burst > 0) |
2510 | 0 | peer->nextdate = current_time; |
2511 | 0 | } |
2512 | 0 | poll_update(peer, peer->hpoll); |
2513 | | |
2514 | | /**/ |
2515 | | |
2516 | | /* |
2517 | | * If the peer was previously unreachable, raise a trap. In any |
2518 | | * case, mark it reachable. |
2519 | | */ |
2520 | 0 | if (!peer->reach) { |
2521 | 0 | report_event(PEVNT_REACH, peer, NULL); |
2522 | 0 | peer->timereachable = current_time; |
2523 | 0 | } |
2524 | 0 | peer->reach |= 1; |
2525 | | |
2526 | | /* |
2527 | | * For a client/server association, calculate the clock offset, |
2528 | | * roundtrip delay and dispersion. The equations are reordered |
2529 | | * from the spec for more efficient use of temporaries. For a |
2530 | | * broadcast association, offset the last measurement by the |
2531 | | * computed delay during the client/server volley. Note the |
2532 | | * computation of dispersion includes the system precision plus |
2533 | | * that due to the frequency error since the origin time. |
2534 | | * |
2535 | | * It is very important to respect the hazards of overflow. The |
2536 | | * only permitted operation on raw timestamps is subtraction, |
2537 | | * where the result is a signed quantity spanning from 68 years |
2538 | | * in the past to 68 years in the future. To avoid loss of |
2539 | | * precision, these calculations are done using 64-bit integer |
2540 | | * arithmetic. However, the offset and delay calculations are |
2541 | | * sums and differences of these first-order differences, which |
2542 | | * if done using 64-bit integer arithmetic, would be valid over |
2543 | | * only half that span. Since the typical first-order |
2544 | | * differences are usually very small, they are converted to 64- |
2545 | | * bit doubles and all remaining calculations done in floating- |
2546 | | * double arithmetic. This preserves the accuracy while |
2547 | | * retaining the 68-year span. |
2548 | | * |
2549 | | * There are three interleaving schemes, basic, interleaved |
2550 | | * symmetric and interleaved broadcast. The timestamps are |
2551 | | * idioscyncratically different. See the onwire briefing/white |
2552 | | * paper at www.eecis.udel.edu/~mills for details. |
2553 | | * |
2554 | | * Interleaved symmetric mode |
2555 | | * t1 = peer->aorg/borg, t2 = peer->rec, t3 = p_xmt, |
2556 | | * t4 = peer->dst |
2557 | | */ |
2558 | 0 | if (peer->flip != 0) { |
2559 | 0 | ci = p_xmt; /* t3 - t4 */ |
2560 | 0 | L_SUB(&ci, &peer->dst); |
2561 | 0 | LFPTOD(&ci, t34); |
2562 | 0 | ci = p_rec; /* t2 - t1 */ |
2563 | 0 | if (peer->flip > 0) |
2564 | 0 | L_SUB(&ci, &peer->borg); |
2565 | 0 | else |
2566 | 0 | L_SUB(&ci, &peer->aorg); |
2567 | 0 | LFPTOD(&ci, t21); |
2568 | 0 | p_del = t21 - t34; |
2569 | 0 | p_offset = (t21 + t34) / 2.; |
2570 | 0 | if (p_del < 0 || p_del > 1.) { |
2571 | 0 | snprintf(statstr, sizeof(statstr), |
2572 | 0 | "t21 %.6f t34 %.6f", t21, t34); |
2573 | 0 | report_event(PEVNT_XERR, peer, statstr); |
2574 | 0 | return; |
2575 | 0 | } |
2576 | | |
2577 | | /* |
2578 | | * Broadcast modes |
2579 | | */ |
2580 | 0 | } else if (peer->pmode == MODE_BROADCAST) { |
2581 | | |
2582 | | /* |
2583 | | * Interleaved broadcast mode. Use interleaved timestamps. |
2584 | | * t1 = peer->borg, t2 = p_org, t3 = p_org, t4 = aorg |
2585 | | */ |
2586 | 0 | if (peer->flags & FLAG_XB) { |
2587 | 0 | ci = p_org; /* delay */ |
2588 | 0 | L_SUB(&ci, &peer->aorg); |
2589 | 0 | LFPTOD(&ci, t34); |
2590 | 0 | ci = p_org; /* t2 - t1 */ |
2591 | 0 | L_SUB(&ci, &peer->borg); |
2592 | 0 | LFPTOD(&ci, t21); |
2593 | 0 | peer->aorg = p_xmt; |
2594 | 0 | peer->borg = peer->dst; |
2595 | 0 | if (t34 < 0 || t34 > 1.) { |
2596 | | /* drop all if in the initial volley */ |
2597 | 0 | if (FLAG_BC_VOL & peer->flags) |
2598 | 0 | goto bcc_init_volley_fail; |
2599 | 0 | snprintf(statstr, sizeof(statstr), |
2600 | 0 | "offset %.6f delay %.6f", t21, t34); |
2601 | 0 | report_event(PEVNT_XERR, peer, statstr); |
2602 | 0 | return; |
2603 | 0 | } |
2604 | 0 | p_offset = t21; |
2605 | 0 | peer->xleave = t34; |
2606 | | |
2607 | | /* |
2608 | | * Basic broadcast - use direct timestamps. |
2609 | | * t3 = p_xmt, t4 = peer->dst |
2610 | | */ |
2611 | 0 | } else { |
2612 | 0 | ci = p_xmt; /* t3 - t4 */ |
2613 | 0 | L_SUB(&ci, &peer->dst); |
2614 | 0 | LFPTOD(&ci, t34); |
2615 | 0 | p_offset = t34; |
2616 | 0 | } |
2617 | | |
2618 | | /* |
2619 | | * When calibration is complete and the clock is |
2620 | | * synchronized, the bias is calculated as the difference |
2621 | | * between the unicast timestamp and the broadcast |
2622 | | * timestamp. This works for both basic and interleaved |
2623 | | * modes. |
2624 | | * [Bug 3031] Don't keep this peer when the delay |
2625 | | * calculation gives reason to suspect clock steps. |
2626 | | * This is assumed for delays > 50ms. |
2627 | | */ |
2628 | 0 | if (FLAG_BC_VOL & peer->flags) { |
2629 | 0 | peer->flags &= ~FLAG_BC_VOL; |
2630 | 0 | peer->delay = fabs(peer->offset - p_offset) * 2; |
2631 | 0 | DPRINTF(2, ("broadcast volley: initial delay=%.6f\n", |
2632 | 0 | peer->delay)); |
2633 | 0 | if (peer->delay > fabs(sys_bdelay)) { |
2634 | 0 | bcc_init_volley_fail: |
2635 | 0 | DPRINTF(2, ("%s", "broadcast volley: initial delay exceeds limit\n")); |
2636 | 0 | unpeer(peer); |
2637 | 0 | return; |
2638 | 0 | } |
2639 | 0 | } |
2640 | 0 | peer->nextdate = current_time + (1u << peer->ppoll) - 2u; |
2641 | 0 | p_del = peer->delay; |
2642 | 0 | p_offset += p_del / 2; |
2643 | | |
2644 | | |
2645 | | /* |
2646 | | * Basic mode, otherwise known as the old fashioned way. |
2647 | | * |
2648 | | * t1 = p_org, t2 = p_rec, t3 = p_xmt, t4 = peer->dst |
2649 | | */ |
2650 | 0 | } else { |
2651 | 0 | ci = p_xmt; /* t3 - t4 */ |
2652 | 0 | L_SUB(&ci, &peer->dst); |
2653 | 0 | LFPTOD(&ci, t34); |
2654 | 0 | ci = p_rec; /* t2 - t1 */ |
2655 | 0 | L_SUB(&ci, &p_org); |
2656 | 0 | LFPTOD(&ci, t21); |
2657 | 0 | p_del = fabs(t21 - t34); |
2658 | 0 | p_offset = (t21 + t34) / 2.; |
2659 | 0 | } |
2660 | 0 | p_del = max(p_del, LOGTOD(sys_precision)); |
2661 | 0 | p_disp = LOGTOD(sys_precision) + LOGTOD(peer->precision) + |
2662 | 0 | clock_phi * p_del; |
2663 | |
|
2664 | | #if ASSYM |
2665 | | /* |
2666 | | * This code calculates the outbound and inbound data rates by |
2667 | | * measuring the differences between timestamps at different |
2668 | | * packet lengths. This is helpful in cases of large asymmetric |
2669 | | * delays commonly experienced on deep space communication |
2670 | | * links. |
2671 | | */ |
2672 | | if (peer->t21_last > 0 && peer->t34_bytes > 0) { |
2673 | | itemp = peer->t21_bytes - peer->t21_last; |
2674 | | if (itemp > 25) { |
2675 | | etemp = t21 - peer->t21; |
2676 | | if (fabs(etemp) > 1e-6) { |
2677 | | ftemp = itemp / etemp; |
2678 | | if (ftemp > 1000.) |
2679 | | peer->r21 = ftemp; |
2680 | | } |
2681 | | } |
2682 | | itemp = len - peer->t34_bytes; |
2683 | | if (itemp > 25) { |
2684 | | etemp = -t34 - peer->t34; |
2685 | | if (fabs(etemp) > 1e-6) { |
2686 | | ftemp = itemp / etemp; |
2687 | | if (ftemp > 1000.) |
2688 | | peer->r34 = ftemp; |
2689 | | } |
2690 | | } |
2691 | | } |
2692 | | |
2693 | | /* |
2694 | | * The following section compensates for different data rates on |
2695 | | * the outbound (d21) and inbound (t34) directions. To do this, |
2696 | | * it finds t such that r21 * t - r34 * (d - t) = 0, where d is |
2697 | | * the roundtrip delay. Then it calculates the correction as a |
2698 | | * fraction of d. |
2699 | | */ |
2700 | | peer->t21 = t21; |
2701 | | peer->t21_last = peer->t21_bytes; |
2702 | | peer->t34 = -t34; |
2703 | | peer->t34_bytes = len; |
2704 | | DPRINTF(2, ("packet: t21 %.9lf %d t34 %.9lf %d\n", peer->t21, |
2705 | | peer->t21_bytes, peer->t34, peer->t34_bytes)); |
2706 | | if (peer->r21 > 0 && peer->r34 > 0 && p_del > 0) { |
2707 | | if (peer->pmode != MODE_BROADCAST) |
2708 | | td = (peer->r34 / (peer->r21 + peer->r34) - |
2709 | | .5) * p_del; |
2710 | | else |
2711 | | td = 0; |
2712 | | |
2713 | | /* |
2714 | | * Unfortunately, in many cases the errors are |
2715 | | * unacceptable, so for the present the rates are not |
2716 | | * used. In future, we might find conditions where the |
2717 | | * calculations are useful, so this should be considered |
2718 | | * a work in progress. |
2719 | | */ |
2720 | | t21 -= td; |
2721 | | t34 -= td; |
2722 | | DPRINTF(2, ("packet: del %.6lf r21 %.1lf r34 %.1lf %.6lf\n", |
2723 | | p_del, peer->r21 / 1e3, peer->r34 / 1e3, |
2724 | | td)); |
2725 | | } |
2726 | | #endif /* ASSYM */ |
2727 | | |
2728 | | /* |
2729 | | * That was awesome. Now hand off to the clock filter. |
2730 | | */ |
2731 | 0 | clock_filter(peer, p_offset + peer->bias, p_del, p_disp); |
2732 | | |
2733 | | /* |
2734 | | * If we are in broadcast calibrate mode, return to broadcast |
2735 | | * client mode when the client is fit and the autokey dance is |
2736 | | * complete. |
2737 | | */ |
2738 | 0 | if ( (FLAG_BC_VOL & peer->flags) |
2739 | 0 | && MODE_CLIENT == peer->hmode |
2740 | 0 | && !(TEST11 & peer_unfit(peer))) { /* distance exceeded */ |
2741 | | #ifdef AUTOKEY |
2742 | | if (peer->flags & FLAG_SKEY) { |
2743 | | if (!(~peer->crypto & CRYPTO_FLAG_ALL)) |
2744 | | peer->hmode = MODE_BCLIENT; |
2745 | | } else { |
2746 | | peer->hmode = MODE_BCLIENT; |
2747 | | } |
2748 | | #else /* !AUTOKEY follows */ |
2749 | 0 | peer->hmode = MODE_BCLIENT; |
2750 | 0 | #endif /* !AUTOKEY */ |
2751 | 0 | } |
2752 | 0 | } |
2753 | | |
2754 | | |
2755 | | /* |
2756 | | * clock_update - Called at system process update intervals. |
2757 | | */ |
2758 | | static void |
2759 | | clock_update( |
2760 | | struct peer *peer /* peer structure pointer */ |
2761 | | ) |
2762 | 0 | { |
2763 | 0 | double dtemp; |
2764 | 0 | l_fp now; |
2765 | | #ifdef HAVE_LIBSCF_H |
2766 | | char *fmri; |
2767 | | #endif /* HAVE_LIBSCF_H */ |
2768 | | |
2769 | | /* |
2770 | | * Update the system state variables. We do this very carefully, |
2771 | | * as the poll interval might need to be clamped differently. |
2772 | | */ |
2773 | 0 | sys_peer = peer; |
2774 | 0 | sys_epoch = peer->epoch; |
2775 | 0 | if (sys_poll < peer->minpoll) |
2776 | 0 | sys_poll = peer->minpoll; |
2777 | 0 | if (sys_poll > peer->maxpoll) |
2778 | 0 | sys_poll = peer->maxpoll; |
2779 | 0 | poll_update(peer, sys_poll); |
2780 | 0 | sys_stratum = min(peer->stratum + 1, STRATUM_UNSPEC); |
2781 | 0 | if ( peer->stratum == STRATUM_REFCLOCK |
2782 | 0 | || peer->stratum == STRATUM_UNSPEC) |
2783 | 0 | sys_refid = peer->refid; |
2784 | 0 | else |
2785 | 0 | sys_refid = addr2refid(&peer->srcadr); |
2786 | | /* |
2787 | | * Root Dispersion (E) is defined (in RFC 5905) as: |
2788 | | * |
2789 | | * E = p.epsilon_r + p.epsilon + p.psi + PHI*(s.t - p.t) + |THETA| |
2790 | | * |
2791 | | * where: |
2792 | | * p.epsilon_r is the PollProc's root dispersion |
2793 | | * p.epsilon is the PollProc's dispersion |
2794 | | * p.psi is the PollProc's jitter |
2795 | | * THETA is the combined offset |
2796 | | * |
2797 | | * NB: Think Hard about where these numbers come from and |
2798 | | * what they mean. When did peer->update happen? Has anything |
2799 | | * interesting happened since then? What values are the most |
2800 | | * defensible? Why? |
2801 | | * |
2802 | | * DLM thinks this equation is probably the best of all worse choices. |
2803 | | */ |
2804 | 0 | dtemp = peer->rootdisp |
2805 | 0 | + peer->disp |
2806 | 0 | + sys_jitter |
2807 | 0 | + clock_phi * (current_time - peer->update) |
2808 | 0 | + fabs(sys_offset); |
2809 | |
|
2810 | 0 | if (dtemp > sys_mindisp) |
2811 | 0 | sys_rootdisp = dtemp; |
2812 | 0 | else |
2813 | 0 | sys_rootdisp = sys_mindisp; |
2814 | 0 | sys_rootdelay = peer->delay + peer->rootdelay; |
2815 | 0 | sys_reftime = peer->dst; |
2816 | |
|
2817 | 0 | DPRINTF(1, ("clock_update: at %lu sample %lu associd %d\n", |
2818 | 0 | current_time, peer->epoch, peer->associd)); |
2819 | | |
2820 | | /* |
2821 | | * Comes now the moment of truth. Crank the clock discipline and |
2822 | | * see what comes out. |
2823 | | */ |
2824 | 0 | switch (local_clock(peer, sys_offset)) { |
2825 | | |
2826 | | /* |
2827 | | * Clock exceeds panic threshold. Life as we know it ends. |
2828 | | */ |
2829 | 0 | case -1: |
2830 | | #ifdef HAVE_LIBSCF_H |
2831 | | /* |
2832 | | * For Solaris enter the maintenance mode. |
2833 | | */ |
2834 | | if ((fmri = getenv("SMF_FMRI")) != NULL) { |
2835 | | if (smf_maintain_instance(fmri, 0) < 0) { |
2836 | | printf("smf_maintain_instance: %s\n", |
2837 | | scf_strerror(scf_error())); |
2838 | | exit(1); |
2839 | | } |
2840 | | /* |
2841 | | * Sleep until SMF kills us. |
2842 | | */ |
2843 | | for (;;) |
2844 | | pause(); |
2845 | | } |
2846 | | #endif /* HAVE_LIBSCF_H */ |
2847 | 0 | exit (-1); |
2848 | | /* not reached */ |
2849 | | |
2850 | | /* |
2851 | | * Clock was stepped. Flush all time values of all peers. |
2852 | | */ |
2853 | 0 | case 2: |
2854 | 0 | clear_all(); |
2855 | 0 | set_sys_leap(LEAP_NOTINSYNC); |
2856 | 0 | sys_stratum = STRATUM_UNSPEC; |
2857 | 0 | memcpy(&sys_refid, "STEP", 4); |
2858 | 0 | sys_rootdelay = 0; |
2859 | 0 | sys_rootdisp = 0; |
2860 | 0 | L_CLR(&sys_reftime); |
2861 | 0 | sys_jitter = LOGTOD(sys_precision); |
2862 | 0 | leapsec_reset_frame(); |
2863 | 0 | break; |
2864 | | |
2865 | | /* |
2866 | | * Clock was slewed. Handle the leapsecond stuff. |
2867 | | */ |
2868 | 0 | case 1: |
2869 | | |
2870 | | /* |
2871 | | * If this is the first time the clock is set, reset the |
2872 | | * leap bits. If crypto, the timer will goose the setup |
2873 | | * process. |
2874 | | */ |
2875 | 0 | if (sys_leap == LEAP_NOTINSYNC) { |
2876 | 0 | set_sys_leap(LEAP_NOWARNING); |
2877 | | #ifdef AUTOKEY |
2878 | | if (crypto_flags) |
2879 | | crypto_update(); |
2880 | | #endif /* AUTOKEY */ |
2881 | | /* |
2882 | | * If our parent process is waiting for the |
2883 | | * first clock sync, send them home satisfied. |
2884 | | */ |
2885 | 0 | #ifdef HAVE_WORKING_FORK |
2886 | 0 | if (waitsync_fd_to_close != -1) { |
2887 | 0 | close(waitsync_fd_to_close); |
2888 | 0 | waitsync_fd_to_close = -1; |
2889 | 0 | DPRINTF(1, ("notified parent --wait-sync is done\n")); |
2890 | 0 | } |
2891 | 0 | #endif /* HAVE_WORKING_FORK */ |
2892 | |
|
2893 | 0 | } |
2894 | | |
2895 | | /* |
2896 | | * If there is no leap second pending and the number of |
2897 | | * survivor leap bits is greater than half the number of |
2898 | | * survivors, try to schedule a leap for the end of the |
2899 | | * current month. (This only works if no leap second for |
2900 | | * that range is in the table, so doing this more than |
2901 | | * once is mostly harmless.) |
2902 | | */ |
2903 | 0 | if (leapsec == LSPROX_NOWARN) { |
2904 | 0 | if ( leap_vote_ins > leap_vote_del |
2905 | 0 | && leap_vote_ins > sys_survivors / 2) { |
2906 | 0 | get_systime(&now); |
2907 | 0 | leapsec_add_dyn(TRUE, now.l_ui, NULL); |
2908 | 0 | } |
2909 | 0 | if ( leap_vote_del > leap_vote_ins |
2910 | 0 | && leap_vote_del > sys_survivors / 2) { |
2911 | 0 | get_systime(&now); |
2912 | 0 | leapsec_add_dyn(FALSE, now.l_ui, NULL); |
2913 | 0 | } |
2914 | 0 | } |
2915 | 0 | break; |
2916 | | |
2917 | | /* |
2918 | | * Popcorn spike or step threshold exceeded. Pretend it never |
2919 | | * happened. |
2920 | | */ |
2921 | 0 | default: |
2922 | 0 | break; |
2923 | 0 | } |
2924 | 0 | } |
2925 | | |
2926 | | |
2927 | | /* |
2928 | | * poll_update - update peer poll interval |
2929 | | */ |
2930 | | void |
2931 | | poll_update( |
2932 | | struct peer *peer, /* peer structure pointer */ |
2933 | | u_char mpoll |
2934 | | ) |
2935 | 0 | { |
2936 | 0 | u_long next, utemp; |
2937 | 0 | u_char hpoll; |
2938 | | |
2939 | | /* |
2940 | | * This routine figures out when the next poll should be sent. |
2941 | | * That turns out to be wickedly complicated. One problem is |
2942 | | * that sometimes the time for the next poll is in the past when |
2943 | | * the poll interval is reduced. We watch out for races here |
2944 | | * between the receive process and the poll process. |
2945 | | * |
2946 | | * Clamp the poll interval between minpoll and maxpoll. |
2947 | | */ |
2948 | 0 | hpoll = max(min(peer->maxpoll, mpoll), peer->minpoll); |
2949 | |
|
2950 | | #ifdef AUTOKEY |
2951 | | /* |
2952 | | * If during the crypto protocol the poll interval has changed, |
2953 | | * the lifetimes in the key list are probably bogus. Purge the |
2954 | | * the key list and regenerate it later. |
2955 | | */ |
2956 | | if ((peer->flags & FLAG_SKEY) && hpoll != peer->hpoll) |
2957 | | key_expire(peer); |
2958 | | #endif /* AUTOKEY */ |
2959 | 0 | peer->hpoll = hpoll; |
2960 | | |
2961 | | /* |
2962 | | * There are three variables important for poll scheduling, the |
2963 | | * current time (current_time), next scheduled time (nextdate) |
2964 | | * and the earliest time (utemp). The earliest time is 2 s |
2965 | | * seconds, but could be more due to rate management. When |
2966 | | * sending in a burst, use the earliest time. When not in a |
2967 | | * burst but with a reply pending, send at the earliest time |
2968 | | * unless the next scheduled time has not advanced. This can |
2969 | | * only happen if multiple replies are pending in the same |
2970 | | * response interval. Otherwise, send at the later of the next |
2971 | | * scheduled time and the earliest time. |
2972 | | * |
2973 | | * Now we figure out if there is an override. If a burst is in |
2974 | | * progress and we get called from the receive process, just |
2975 | | * slink away. If called from the poll process, delay 1 s for a |
2976 | | * reference clock, otherwise 2 s. |
2977 | | */ |
2978 | 0 | utemp = current_time + max(peer->throttle - (NTP_SHIFT - 1) * |
2979 | 0 | (1 << peer->minpoll), ntp_minpkt); |
2980 | 0 | if (peer->burst > 0) { |
2981 | 0 | if (peer->nextdate > current_time) |
2982 | 0 | return; |
2983 | 0 | #ifdef REFCLOCK |
2984 | 0 | else if (peer->flags & FLAG_REFCLOCK) |
2985 | 0 | peer->nextdate = current_time + RESP_DELAY; |
2986 | 0 | #endif /* REFCLOCK */ |
2987 | 0 | else |
2988 | 0 | peer->nextdate = utemp; |
2989 | |
|
2990 | | #ifdef AUTOKEY |
2991 | | /* |
2992 | | * If a burst is not in progress and a crypto response message |
2993 | | * is pending, delay 2 s, but only if this is a new interval. |
2994 | | */ |
2995 | | } else if (peer->cmmd != NULL) { |
2996 | | if (peer->nextdate > current_time) { |
2997 | | if (peer->nextdate + ntp_minpkt != utemp) |
2998 | | peer->nextdate = utemp; |
2999 | | } else { |
3000 | | peer->nextdate = utemp; |
3001 | | } |
3002 | | #endif /* AUTOKEY */ |
3003 | | |
3004 | | /* |
3005 | | * The ordinary case. If a retry, use minpoll; if unreachable, |
3006 | | * use host poll; otherwise, use the minimum of host and peer |
3007 | | * polls; In other words, oversampling is okay but |
3008 | | * understampling is evil. Use the maximum of this value and the |
3009 | | * headway. If the average headway is greater than the headway |
3010 | | * threshold, increase the headway by the minimum interval. |
3011 | | */ |
3012 | 0 | } else { |
3013 | 0 | if (peer->retry > 0) |
3014 | 0 | hpoll = peer->minpoll; |
3015 | 0 | else |
3016 | 0 | hpoll = min(peer->ppoll, peer->hpoll); |
3017 | 0 | #ifdef REFCLOCK |
3018 | 0 | if (peer->flags & FLAG_REFCLOCK) |
3019 | 0 | next = 1 << hpoll; |
3020 | 0 | else |
3021 | 0 | #endif /* REFCLOCK */ |
3022 | 0 | next = ((0x1000UL | (ntp_random() & 0x0ff)) << |
3023 | 0 | hpoll) >> 12; |
3024 | 0 | next += peer->outdate; |
3025 | 0 | if (next > utemp) |
3026 | 0 | peer->nextdate = next; |
3027 | 0 | else |
3028 | 0 | peer->nextdate = utemp; |
3029 | 0 | if (peer->throttle > (1 << peer->minpoll)) |
3030 | 0 | peer->nextdate += ntp_minpkt; |
3031 | 0 | } |
3032 | 0 | DPRINTF(2, ("poll_update: at %lu %s poll %d burst %d retry %d head %d early %lu next %lu\n", |
3033 | 0 | current_time, ntoa(&peer->srcadr), peer->hpoll, |
3034 | 0 | peer->burst, peer->retry, peer->throttle, |
3035 | 0 | utemp - current_time, peer->nextdate - |
3036 | 0 | current_time)); |
3037 | 0 | } |
3038 | | |
3039 | | |
3040 | | /* |
3041 | | * peer_clear - clear peer filter registers. See Section 3.4.8 of the |
3042 | | * spec. |
3043 | | */ |
3044 | | void |
3045 | | peer_clear( |
3046 | | struct peer *peer, /* peer structure */ |
3047 | | const char *ident /* tally lights */ |
3048 | | ) |
3049 | 0 | { |
3050 | 0 | u_char u; |
3051 | 0 | l_fp bxmt = peer->bxmt; /* bcast clients retain this! */ |
3052 | |
|
3053 | | #ifdef AUTOKEY |
3054 | | /* |
3055 | | * If cryptographic credentials have been acquired, toss them to |
3056 | | * Valhalla. Note that autokeys are ephemeral, in that they are |
3057 | | * tossed immediately upon use. Therefore, the keylist can be |
3058 | | * purged anytime without needing to preserve random keys. Note |
3059 | | * that, if the peer is purged, the cryptographic variables are |
3060 | | * purged, too. This makes it much harder to sneak in some |
3061 | | * unauthenticated data in the clock filter. |
3062 | | */ |
3063 | | key_expire(peer); |
3064 | | if (peer->iffval != NULL) |
3065 | | BN_free(peer->iffval); |
3066 | | value_free(&peer->cookval); |
3067 | | value_free(&peer->recval); |
3068 | | value_free(&peer->encrypt); |
3069 | | value_free(&peer->sndval); |
3070 | | if (peer->cmmd != NULL) |
3071 | | free(peer->cmmd); |
3072 | | if (peer->subject != NULL) |
3073 | | free(peer->subject); |
3074 | | if (peer->issuer != NULL) |
3075 | | free(peer->issuer); |
3076 | | #endif /* AUTOKEY */ |
3077 | | |
3078 | | /* |
3079 | | * Clear all values, including the optional crypto values above. |
3080 | | */ |
3081 | 0 | memset(CLEAR_TO_ZERO(peer), 0, LEN_CLEAR_TO_ZERO(peer)); |
3082 | 0 | peer->ppoll = peer->maxpoll; |
3083 | 0 | peer->hpoll = peer->minpoll; |
3084 | 0 | peer->disp = MAXDISPERSE; |
3085 | 0 | peer->flash = peer_unfit(peer); |
3086 | 0 | peer->jitter = LOGTOD(sys_precision); |
3087 | | |
3088 | | /* Don't throw away our broadcast replay protection */ |
3089 | 0 | if (peer->hmode == MODE_BCLIENT) |
3090 | 0 | peer->bxmt = bxmt; |
3091 | | |
3092 | | /* |
3093 | | * If interleave mode, initialize the alternate origin switch. |
3094 | | */ |
3095 | 0 | if (peer->flags & FLAG_XLEAVE) |
3096 | 0 | peer->flip = 1; |
3097 | 0 | for (u = 0; u < NTP_SHIFT; u++) { |
3098 | 0 | peer->filter_order[u] = u; |
3099 | 0 | peer->filter_disp[u] = MAXDISPERSE; |
3100 | 0 | } |
3101 | 0 | #ifdef REFCLOCK |
3102 | 0 | if (!(peer->flags & FLAG_REFCLOCK)) { |
3103 | 0 | #endif |
3104 | 0 | peer->leap = LEAP_NOTINSYNC; |
3105 | 0 | peer->stratum = STRATUM_UNSPEC; |
3106 | 0 | memcpy(&peer->refid, ident, 4); |
3107 | 0 | #ifdef REFCLOCK |
3108 | 0 | } else { |
3109 | | /* Clear refclock sample filter */ |
3110 | 0 | peer->procptr->codeproc = 0; |
3111 | 0 | peer->procptr->coderecv = 0; |
3112 | 0 | } |
3113 | 0 | #endif |
3114 | | |
3115 | | /* |
3116 | | * During initialization use the association count to spread out |
3117 | | * the polls at one-second intervals. Passive associations' |
3118 | | * first poll is delayed by the "discard minimum" to avoid rate |
3119 | | * limiting. Other post-startup new or cleared associations |
3120 | | * randomize the first poll over the minimum poll interval to |
3121 | | * avoid implosion. |
3122 | | */ |
3123 | 0 | peer->nextdate = peer->update = peer->outdate = current_time; |
3124 | 0 | if (initializing) { |
3125 | 0 | peer->nextdate += peer_associations; |
3126 | 0 | } else if (MODE_PASSIVE == peer->hmode) { |
3127 | 0 | peer->nextdate += ntp_minpkt; |
3128 | 0 | } else { |
3129 | 0 | peer->nextdate += ntp_random() % peer->minpoll; |
3130 | 0 | } |
3131 | | #ifdef AUTOKEY |
3132 | | peer->refresh = current_time + (1 << NTP_REFRESH); |
3133 | | #endif /* AUTOKEY */ |
3134 | 0 | DPRINTF(1, ("peer_clear: at %ld next %ld associd %d refid %s\n", |
3135 | 0 | current_time, peer->nextdate, peer->associd, |
3136 | 0 | ident)); |
3137 | 0 | } |
3138 | | |
3139 | | |
3140 | | /* |
3141 | | * clock_filter - add incoming clock sample to filter register and run |
3142 | | * the filter procedure to find the best sample. |
3143 | | */ |
3144 | | void |
3145 | | clock_filter( |
3146 | | struct peer *peer, /* peer structure pointer */ |
3147 | | double sample_offset, /* clock offset */ |
3148 | | double sample_delay, /* roundtrip delay */ |
3149 | | double sample_disp /* dispersion */ |
3150 | | ) |
3151 | 0 | { |
3152 | 0 | double dst[NTP_SHIFT]; /* distance vector */ |
3153 | 0 | int ord[NTP_SHIFT]; /* index vector */ |
3154 | 0 | int i, j, k, m; |
3155 | 0 | double dtemp, etemp; |
3156 | 0 | char tbuf[80]; |
3157 | | |
3158 | | /* |
3159 | | * A sample consists of the offset, delay, dispersion and epoch |
3160 | | * of arrival. The offset and delay are determined by the on- |
3161 | | * wire protocol. The dispersion grows from the last outbound |
3162 | | * packet to the arrival of this one increased by the sum of the |
3163 | | * peer precision and the system precision as required by the |
3164 | | * error budget. First, shift the new arrival into the shift |
3165 | | * register discarding the oldest one. |
3166 | | */ |
3167 | 0 | j = peer->filter_nextpt; |
3168 | 0 | peer->filter_offset[j] = sample_offset; |
3169 | 0 | peer->filter_delay[j] = sample_delay; |
3170 | 0 | peer->filter_disp[j] = sample_disp; |
3171 | 0 | peer->filter_epoch[j] = current_time; |
3172 | 0 | j = (j + 1) % NTP_SHIFT; |
3173 | 0 | peer->filter_nextpt = j; |
3174 | | |
3175 | | /* |
3176 | | * Update dispersions since the last update and at the same |
3177 | | * time initialize the distance and index lists. Since samples |
3178 | | * become increasingly uncorrelated beyond the Allan intercept, |
3179 | | * only under exceptional cases will an older sample be used. |
3180 | | * Therefore, the distance list uses a compound metric. If the |
3181 | | * dispersion is greater than the maximum dispersion, clamp the |
3182 | | * distance at that value. If the time since the last update is |
3183 | | * less than the Allan intercept use the delay; otherwise, use |
3184 | | * the sum of the delay and dispersion. |
3185 | | */ |
3186 | 0 | dtemp = clock_phi * (current_time - peer->update); |
3187 | 0 | peer->update = current_time; |
3188 | 0 | for (i = NTP_SHIFT - 1; i >= 0; i--) { |
3189 | 0 | if (i != 0) |
3190 | 0 | peer->filter_disp[j] += dtemp; |
3191 | 0 | if (peer->filter_disp[j] >= MAXDISPERSE) { |
3192 | 0 | peer->filter_disp[j] = MAXDISPERSE; |
3193 | 0 | dst[i] = MAXDISPERSE; |
3194 | 0 | } else if (peer->update - peer->filter_epoch[j] > |
3195 | 0 | (u_long)ULOGTOD(allan_xpt)) { |
3196 | 0 | dst[i] = peer->filter_delay[j] + |
3197 | 0 | peer->filter_disp[j]; |
3198 | 0 | } else { |
3199 | 0 | dst[i] = peer->filter_delay[j]; |
3200 | 0 | } |
3201 | 0 | ord[i] = j; |
3202 | 0 | j = (j + 1) % NTP_SHIFT; |
3203 | 0 | } |
3204 | | |
3205 | | /* |
3206 | | * If the clock has stabilized, sort the samples by distance. |
3207 | | */ |
3208 | 0 | if (freq_cnt == 0) { |
3209 | 0 | for (i = 1; i < NTP_SHIFT; i++) { |
3210 | 0 | for (j = 0; j < i; j++) { |
3211 | 0 | if (dst[j] > dst[i]) { |
3212 | 0 | k = ord[j]; |
3213 | 0 | ord[j] = ord[i]; |
3214 | 0 | ord[i] = k; |
3215 | 0 | etemp = dst[j]; |
3216 | 0 | dst[j] = dst[i]; |
3217 | 0 | dst[i] = etemp; |
3218 | 0 | } |
3219 | 0 | } |
3220 | 0 | } |
3221 | 0 | } |
3222 | | |
3223 | | /* |
3224 | | * Copy the index list to the association structure so ntpq |
3225 | | * can see it later. Prune the distance list to leave only |
3226 | | * samples less than the maximum dispersion, which disfavors |
3227 | | * uncorrelated samples older than the Allan intercept. To |
3228 | | * further improve the jitter estimate, of the remainder leave |
3229 | | * only samples less than the maximum distance, but keep at |
3230 | | * least two samples for jitter calculation. |
3231 | | */ |
3232 | 0 | m = 0; |
3233 | 0 | for (i = 0; i < NTP_SHIFT; i++) { |
3234 | 0 | peer->filter_order[i] = (u_char) ord[i]; |
3235 | 0 | if ( dst[i] >= MAXDISPERSE |
3236 | 0 | || (m >= 2 && dst[i] >= sys_maxdist)) |
3237 | 0 | continue; |
3238 | 0 | m++; |
3239 | 0 | } |
3240 | | |
3241 | | /* |
3242 | | * Compute the dispersion and jitter. The dispersion is weighted |
3243 | | * exponentially by NTP_FWEIGHT (0.5) so it is normalized close |
3244 | | * to 1.0. The jitter is the RMS differences relative to the |
3245 | | * lowest delay sample. |
3246 | | */ |
3247 | 0 | peer->disp = peer->jitter = 0; |
3248 | 0 | k = ord[0]; |
3249 | 0 | for (i = NTP_SHIFT - 1; i >= 0; i--) { |
3250 | 0 | j = ord[i]; |
3251 | 0 | peer->disp = NTP_FWEIGHT * (peer->disp + |
3252 | 0 | peer->filter_disp[j]); |
3253 | 0 | if (i < m) |
3254 | 0 | peer->jitter += DIFF(peer->filter_offset[j], |
3255 | 0 | peer->filter_offset[k]); |
3256 | 0 | } |
3257 | | |
3258 | | /* |
3259 | | * If no acceptable samples remain in the shift register, |
3260 | | * quietly tiptoe home leaving only the dispersion. Otherwise, |
3261 | | * save the offset, delay and jitter. Note the jitter must not |
3262 | | * be less than the precision. |
3263 | | */ |
3264 | 0 | if (m == 0) { |
3265 | 0 | clock_select(); |
3266 | 0 | return; |
3267 | 0 | } |
3268 | 0 | etemp = fabs(peer->offset - peer->filter_offset[k]); |
3269 | 0 | peer->offset = peer->filter_offset[k]; |
3270 | 0 | peer->delay = peer->filter_delay[k]; |
3271 | 0 | if (m > 1) |
3272 | 0 | peer->jitter /= m - 1; |
3273 | 0 | peer->jitter = max(SQRT(peer->jitter), LOGTOD(sys_precision)); |
3274 | | |
3275 | | /* |
3276 | | * If the the new sample and the current sample are both valid |
3277 | | * and the difference between their offsets exceeds CLOCK_SGATE |
3278 | | * (3) times the jitter and the interval between them is less |
3279 | | * than twice the host poll interval, consider the new sample |
3280 | | * a popcorn spike and ignore it. |
3281 | | */ |
3282 | 0 | if ( peer->disp < sys_maxdist |
3283 | 0 | && peer->filter_disp[k] < sys_maxdist |
3284 | 0 | && etemp > CLOCK_SGATE * peer->jitter |
3285 | 0 | && peer->filter_epoch[k] - peer->epoch |
3286 | 0 | < 2. * ULOGTOD(peer->hpoll)) { |
3287 | 0 | snprintf(tbuf, sizeof(tbuf), "%.6f s", etemp); |
3288 | 0 | report_event(PEVNT_POPCORN, peer, tbuf); |
3289 | 0 | return; |
3290 | 0 | } |
3291 | | |
3292 | | /* |
3293 | | * A new minimum sample is useful only if it is later than the |
3294 | | * last one used. In this design the maximum lifetime of any |
3295 | | * sample is not greater than eight times the poll interval, so |
3296 | | * the maximum interval between minimum samples is eight |
3297 | | * packets. |
3298 | | */ |
3299 | 0 | if (peer->filter_epoch[k] <= peer->epoch) { |
3300 | 0 | DPRINTF(2, ("clock_filter: old sample %lu\n", current_time - |
3301 | 0 | peer->filter_epoch[k])); |
3302 | 0 | return; |
3303 | 0 | } |
3304 | 0 | peer->epoch = peer->filter_epoch[k]; |
3305 | | |
3306 | | /* |
3307 | | * The mitigated sample statistics are saved for later |
3308 | | * processing. If not synchronized or not in a burst, tickle the |
3309 | | * clock select algorithm. |
3310 | | */ |
3311 | 0 | record_peer_stats(&peer->srcadr, ctlpeerstatus(peer), |
3312 | 0 | peer->offset, peer->delay, peer->disp, peer->jitter); |
3313 | 0 | DPRINTF(1, ("clock_filter: n %d off %.6f del %.6f dsp %.6f jit %.6f\n", |
3314 | 0 | m, peer->offset, peer->delay, peer->disp, |
3315 | 0 | peer->jitter)); |
3316 | 0 | if (peer->burst == 0 || sys_leap == LEAP_NOTINSYNC) |
3317 | 0 | clock_select(); |
3318 | 0 | } |
3319 | | |
3320 | | |
3321 | | /* |
3322 | | * clock_select - find the pick-of-the-litter clock |
3323 | | * |
3324 | | * LOCKCLOCK: (1) If the local clock is the prefer peer, it will always |
3325 | | * be enabled, even if declared falseticker, (2) only the prefer peer |
3326 | | * can be selected as the system peer, (3) if the external source is |
3327 | | * down, the system leap bits are set to 11 and the stratum set to |
3328 | | * infinity. |
3329 | | */ |
3330 | | void |
3331 | | clock_select(void) |
3332 | 0 | { |
3333 | 0 | struct peer *peer; |
3334 | 0 | int i, j, k, n; |
3335 | 0 | int nlist, nl2; |
3336 | 0 | int allow; |
3337 | 0 | int speer; |
3338 | 0 | double d, e, f, g; |
3339 | 0 | double high, low; |
3340 | 0 | double speermet; |
3341 | 0 | double orphmet = 2.0 * U_INT32_MAX; /* 2x is greater than */ |
3342 | 0 | struct endpoint endp; |
3343 | 0 | struct peer *osys_peer; |
3344 | 0 | struct peer *sys_prefer = NULL; /* prefer peer */ |
3345 | 0 | struct peer *typesystem = NULL; |
3346 | 0 | struct peer *typeorphan = NULL; |
3347 | 0 | #ifdef REFCLOCK |
3348 | 0 | struct peer *typeacts = NULL; |
3349 | 0 | struct peer *typelocal = NULL; |
3350 | 0 | struct peer *typepps = NULL; |
3351 | 0 | #endif /* REFCLOCK */ |
3352 | 0 | static struct endpoint *endpoint = NULL; |
3353 | 0 | static int *indx = NULL; |
3354 | 0 | static peer_select *peers = NULL; |
3355 | 0 | static u_int endpoint_size = 0; |
3356 | 0 | static u_int peers_size = 0; |
3357 | 0 | static u_int indx_size = 0; |
3358 | 0 | size_t octets; |
3359 | | |
3360 | | /* |
3361 | | * Initialize and create endpoint, index and peer lists big |
3362 | | * enough to handle all associations. |
3363 | | */ |
3364 | 0 | osys_peer = sys_peer; |
3365 | 0 | sys_survivors = 0; |
3366 | | #ifdef LOCKCLOCK |
3367 | | set_sys_leap(LEAP_NOTINSYNC); |
3368 | | sys_stratum = STRATUM_UNSPEC; |
3369 | | memcpy(&sys_refid, "DOWN", 4); |
3370 | | #endif /* LOCKCLOCK */ |
3371 | | |
3372 | | /* |
3373 | | * Allocate dynamic space depending on the number of |
3374 | | * associations. |
3375 | | */ |
3376 | 0 | nlist = 1; |
3377 | 0 | for (peer = peer_list; peer != NULL; peer = peer->p_link) |
3378 | 0 | nlist++; |
3379 | 0 | endpoint_size = ALIGNED_SIZE(nlist * 2 * sizeof(*endpoint)); |
3380 | 0 | peers_size = ALIGNED_SIZE(nlist * sizeof(*peers)); |
3381 | 0 | indx_size = ALIGNED_SIZE(nlist * 2 * sizeof(*indx)); |
3382 | 0 | octets = endpoint_size + peers_size + indx_size; |
3383 | 0 | endpoint = erealloc(endpoint, octets); |
3384 | 0 | peers = INC_ALIGNED_PTR(endpoint, endpoint_size); |
3385 | 0 | indx = INC_ALIGNED_PTR(peers, peers_size); |
3386 | | |
3387 | | /* |
3388 | | * Initially, we populate the island with all the rifraff peers |
3389 | | * that happen to be lying around. Those with seriously |
3390 | | * defective clocks are immediately booted off the island. Then, |
3391 | | * the falsetickers are culled and put to sea. The truechimers |
3392 | | * remaining are subject to repeated rounds where the most |
3393 | | * unpopular at each round is kicked off. When the population |
3394 | | * has dwindled to sys_minclock, the survivors split a million |
3395 | | * bucks and collectively crank the chimes. |
3396 | | */ |
3397 | 0 | nlist = nl2 = 0; /* none yet */ |
3398 | 0 | for (peer = peer_list; peer != NULL; peer = peer->p_link) { |
3399 | 0 | peer->new_status = CTL_PST_SEL_REJECT; |
3400 | | |
3401 | | /* |
3402 | | * Leave the island immediately if the peer is |
3403 | | * unfit to synchronize. |
3404 | | */ |
3405 | 0 | if (peer_unfit(peer)) { |
3406 | 0 | continue; |
3407 | 0 | } |
3408 | | |
3409 | | /* |
3410 | | * If this peer is an orphan parent, elect the |
3411 | | * one with the lowest metric defined as the |
3412 | | * IPv4 address or the first 64 bits of the |
3413 | | * hashed IPv6 address. To ensure convergence |
3414 | | * on the same selected orphan, consider as |
3415 | | * well that this system may have the lowest |
3416 | | * metric and be the orphan parent. If this |
3417 | | * system wins, sys_peer will be NULL to trigger |
3418 | | * orphan mode in timer(). |
3419 | | */ |
3420 | 0 | if (peer->stratum == sys_orphan) { |
3421 | 0 | u_int32 localmet; |
3422 | 0 | u_int32 peermet; |
3423 | |
|
3424 | 0 | if (peer->dstadr != NULL) |
3425 | 0 | localmet = ntohl(peer->dstadr->addr_refid); |
3426 | 0 | else |
3427 | 0 | localmet = U_INT32_MAX; |
3428 | 0 | peermet = ntohl(addr2refid(&peer->srcadr)); |
3429 | 0 | if (peermet < localmet && peermet < orphmet) { |
3430 | 0 | typeorphan = peer; |
3431 | 0 | orphmet = peermet; |
3432 | 0 | } |
3433 | 0 | continue; |
3434 | 0 | } |
3435 | | |
3436 | | /* |
3437 | | * If this peer could have the orphan parent |
3438 | | * as a synchronization ancestor, exclude it |
3439 | | * from selection to avoid forming a |
3440 | | * synchronization loop within the orphan mesh, |
3441 | | * triggering stratum climb to infinity |
3442 | | * instability. Peers at stratum higher than |
3443 | | * the orphan stratum could have the orphan |
3444 | | * parent in ancestry so are excluded. |
3445 | | * See http://bugs.ntp.org/2050 |
3446 | | */ |
3447 | 0 | if (peer->stratum > sys_orphan) { |
3448 | 0 | continue; |
3449 | 0 | } |
3450 | 0 | #ifdef REFCLOCK |
3451 | | /* |
3452 | | * The following are special cases. We deal |
3453 | | * with them later. |
3454 | | */ |
3455 | 0 | if (!(peer->flags & FLAG_PREFER)) { |
3456 | 0 | switch (peer->refclktype) { |
3457 | 0 | case REFCLK_LOCALCLOCK: |
3458 | 0 | if ( current_time > orphwait |
3459 | 0 | && typelocal == NULL) |
3460 | 0 | typelocal = peer; |
3461 | 0 | continue; |
3462 | | |
3463 | 0 | case REFCLK_ACTS: |
3464 | 0 | if ( current_time > orphwait |
3465 | 0 | && typeacts == NULL) |
3466 | 0 | typeacts = peer; |
3467 | 0 | continue; |
3468 | 0 | } |
3469 | 0 | } |
3470 | 0 | #endif /* REFCLOCK */ |
3471 | | |
3472 | | /* |
3473 | | * If we get this far, the peer can stay on the |
3474 | | * island, but does not yet have the immunity |
3475 | | * idol. |
3476 | | */ |
3477 | 0 | peer->new_status = CTL_PST_SEL_SANE; |
3478 | 0 | f = root_distance(peer); |
3479 | 0 | peers[nlist].peer = peer; |
3480 | 0 | peers[nlist].error = peer->jitter; |
3481 | 0 | peers[nlist].synch = f; |
3482 | 0 | nlist++; |
3483 | | |
3484 | | /* |
3485 | | * Insert each interval endpoint on the unsorted |
3486 | | * endpoint[] list. |
3487 | | */ |
3488 | 0 | e = peer->offset; |
3489 | 0 | endpoint[nl2].type = -1; /* lower end */ |
3490 | 0 | endpoint[nl2].val = e - f; |
3491 | 0 | nl2++; |
3492 | 0 | endpoint[nl2].type = 1; /* upper end */ |
3493 | 0 | endpoint[nl2].val = e + f; |
3494 | 0 | nl2++; |
3495 | 0 | } |
3496 | | /* |
3497 | | * Construct sorted indx[] of endpoint[] indexes ordered by |
3498 | | * offset. |
3499 | | */ |
3500 | 0 | for (i = 0; i < nl2; i++) |
3501 | 0 | indx[i] = i; |
3502 | 0 | for (i = 0; i < nl2; i++) { |
3503 | 0 | endp = endpoint[indx[i]]; |
3504 | 0 | e = endp.val; |
3505 | 0 | k = i; |
3506 | 0 | for (j = i + 1; j < nl2; j++) { |
3507 | 0 | endp = endpoint[indx[j]]; |
3508 | 0 | if (endp.val < e) { |
3509 | 0 | e = endp.val; |
3510 | 0 | k = j; |
3511 | 0 | } |
3512 | 0 | } |
3513 | 0 | if (k != i) { |
3514 | 0 | j = indx[k]; |
3515 | 0 | indx[k] = indx[i]; |
3516 | 0 | indx[i] = j; |
3517 | 0 | } |
3518 | 0 | } |
3519 | 0 | for (i = 0; i < nl2; i++) |
3520 | 0 | DPRINTF(3, ("select: endpoint %2d %.6f\n", |
3521 | 0 | endpoint[indx[i]].type, endpoint[indx[i]].val)); |
3522 | | |
3523 | | /* |
3524 | | * This is the actual algorithm that cleaves the truechimers |
3525 | | * from the falsetickers. The original algorithm was described |
3526 | | * in Keith Marzullo's dissertation, but has been modified for |
3527 | | * better accuracy. |
3528 | | * |
3529 | | * Briefly put, we first assume there are no falsetickers, then |
3530 | | * scan the candidate list first from the low end upwards and |
3531 | | * then from the high end downwards. The scans stop when the |
3532 | | * number of intersections equals the number of candidates less |
3533 | | * the number of falsetickers. If this doesn't happen for a |
3534 | | * given number of falsetickers, we bump the number of |
3535 | | * falsetickers and try again. If the number of falsetickers |
3536 | | * becomes equal to or greater than half the number of |
3537 | | * candidates, the Albanians have won the Byzantine wars and |
3538 | | * correct synchronization is not possible. |
3539 | | * |
3540 | | * Here, nlist is the number of candidates and allow is the |
3541 | | * number of falsetickers. Upon exit, the truechimers are the |
3542 | | * survivors with offsets not less than low and not greater than |
3543 | | * high. There may be none of them. |
3544 | | */ |
3545 | 0 | low = 1e9; |
3546 | 0 | high = -1e9; |
3547 | 0 | for (allow = 0; 2 * allow < nlist; allow++) { |
3548 | | |
3549 | | /* |
3550 | | * Bound the interval (low, high) as the smallest |
3551 | | * interval containing points from the most sources. |
3552 | | */ |
3553 | 0 | n = 0; |
3554 | 0 | for (i = 0; i < nl2; i++) { |
3555 | 0 | low = endpoint[indx[i]].val; |
3556 | 0 | n -= endpoint[indx[i]].type; |
3557 | 0 | if (n >= nlist - allow) |
3558 | 0 | break; |
3559 | 0 | } |
3560 | 0 | n = 0; |
3561 | 0 | for (j = nl2 - 1; j >= 0; j--) { |
3562 | 0 | high = endpoint[indx[j]].val; |
3563 | 0 | n += endpoint[indx[j]].type; |
3564 | 0 | if (n >= nlist - allow) |
3565 | 0 | break; |
3566 | 0 | } |
3567 | | |
3568 | | /* |
3569 | | * If an interval containing truechimers is found, stop. |
3570 | | * If not, increase the number of falsetickers and go |
3571 | | * around again. |
3572 | | */ |
3573 | 0 | if (high > low) |
3574 | 0 | break; |
3575 | 0 | } |
3576 | | |
3577 | | /* |
3578 | | * Clustering algorithm. Whittle candidate list of falsetickers, |
3579 | | * who leave the island immediately. The TRUE peer is always a |
3580 | | * truechimer. We must leave at least one peer to collect the |
3581 | | * million bucks. |
3582 | | * |
3583 | | * We assert the correct time is contained in the interval, but |
3584 | | * the best offset estimate for the interval might not be |
3585 | | * contained in the interval. For this purpose, a truechimer is |
3586 | | * defined as the midpoint of an interval that overlaps the |
3587 | | * intersection interval. |
3588 | | */ |
3589 | 0 | j = 0; |
3590 | 0 | for (i = 0; i < nlist; i++) { |
3591 | 0 | double h; |
3592 | |
|
3593 | 0 | peer = peers[i].peer; |
3594 | 0 | h = peers[i].synch; |
3595 | 0 | if (( high <= low |
3596 | 0 | || peer->offset + h < low |
3597 | 0 | || peer->offset - h > high |
3598 | 0 | ) && !(peer->flags & FLAG_TRUE)) |
3599 | 0 | continue; |
3600 | | |
3601 | 0 | #ifdef REFCLOCK |
3602 | | /* |
3603 | | * Eligible PPS peers must survive the intersection |
3604 | | * algorithm. Use the first one found, but don't |
3605 | | * include any of them in the cluster population. |
3606 | | */ |
3607 | 0 | if (peer->flags & FLAG_PPS) { |
3608 | 0 | if (typepps == NULL) |
3609 | 0 | typepps = peer; |
3610 | 0 | if (!(peer->flags & FLAG_TSTAMP_PPS)) |
3611 | 0 | continue; |
3612 | 0 | } |
3613 | 0 | #endif /* REFCLOCK */ |
3614 | | |
3615 | 0 | if (j != i) |
3616 | 0 | peers[j] = peers[i]; |
3617 | 0 | j++; |
3618 | 0 | } |
3619 | 0 | nlist = j; |
3620 | | |
3621 | | /* |
3622 | | * If no survivors remain at this point, check if the modem |
3623 | | * driver, local driver or orphan parent in that order. If so, |
3624 | | * nominate the first one found as the only survivor. |
3625 | | * Otherwise, give up and leave the island to the rats. |
3626 | | */ |
3627 | 0 | if (nlist == 0) { |
3628 | 0 | peers[0].error = 0; |
3629 | 0 | peers[0].synch = sys_mindisp; |
3630 | 0 | #ifdef REFCLOCK |
3631 | 0 | if (typeacts != NULL) { |
3632 | 0 | peers[0].peer = typeacts; |
3633 | 0 | nlist = 1; |
3634 | 0 | } else if (typelocal != NULL) { |
3635 | 0 | peers[0].peer = typelocal; |
3636 | 0 | nlist = 1; |
3637 | 0 | } else |
3638 | 0 | #endif /* REFCLOCK */ |
3639 | 0 | if (typeorphan != NULL) { |
3640 | 0 | peers[0].peer = typeorphan; |
3641 | 0 | nlist = 1; |
3642 | 0 | } |
3643 | 0 | } |
3644 | | |
3645 | | /* |
3646 | | * Mark the candidates at this point as truechimers. |
3647 | | */ |
3648 | 0 | for (i = 0; i < nlist; i++) { |
3649 | 0 | peers[i].peer->new_status = CTL_PST_SEL_SELCAND; |
3650 | 0 | DPRINTF(2, ("select: survivor %s %f\n", |
3651 | 0 | stoa(&peers[i].peer->srcadr), peers[i].synch)); |
3652 | 0 | } |
3653 | | |
3654 | | /* |
3655 | | * Now, vote outliers off the island by select jitter weighted |
3656 | | * by root distance. Continue voting as long as there are more |
3657 | | * than sys_minclock survivors and the select jitter of the peer |
3658 | | * with the worst metric is greater than the minimum peer |
3659 | | * jitter. Stop if we are about to discard a TRUE or PREFER |
3660 | | * peer, who of course have the immunity idol. |
3661 | | */ |
3662 | 0 | while (1) { |
3663 | 0 | d = 1e9; |
3664 | 0 | e = -1e9; |
3665 | 0 | g = 0; |
3666 | 0 | k = 0; |
3667 | 0 | for (i = 0; i < nlist; i++) { |
3668 | 0 | if (peers[i].error < d) |
3669 | 0 | d = peers[i].error; |
3670 | 0 | peers[i].seljit = 0; |
3671 | 0 | if (nlist > 1) { |
3672 | 0 | f = 0; |
3673 | 0 | for (j = 0; j < nlist; j++) |
3674 | 0 | f += DIFF(peers[j].peer->offset, |
3675 | 0 | peers[i].peer->offset); |
3676 | 0 | peers[i].seljit = SQRT(f / (nlist - 1)); |
3677 | 0 | } |
3678 | 0 | if (peers[i].seljit * peers[i].synch > e) { |
3679 | 0 | g = peers[i].seljit; |
3680 | 0 | e = peers[i].seljit * peers[i].synch; |
3681 | 0 | k = i; |
3682 | 0 | } |
3683 | 0 | } |
3684 | 0 | g = max(g, LOGTOD(sys_precision)); |
3685 | 0 | if ( nlist <= max(1, sys_minclock) |
3686 | 0 | || g <= d |
3687 | 0 | || ((FLAG_TRUE | FLAG_PREFER) & peers[k].peer->flags)) |
3688 | 0 | break; |
3689 | | |
3690 | 0 | DPRINTF(3, ("select: drop %s seljit %.6f jit %.6f\n", |
3691 | 0 | ntoa(&peers[k].peer->srcadr), g, d)); |
3692 | 0 | if (nlist > sys_maxclock) |
3693 | 0 | peers[k].peer->new_status = CTL_PST_SEL_EXCESS; |
3694 | 0 | for (j = k + 1; j < nlist; j++) |
3695 | 0 | peers[j - 1] = peers[j]; |
3696 | 0 | nlist--; |
3697 | 0 | } |
3698 | | |
3699 | | /* |
3700 | | * What remains is a list usually not greater than sys_minclock |
3701 | | * peers. Note that unsynchronized peers cannot survive this |
3702 | | * far. Count and mark these survivors. |
3703 | | * |
3704 | | * While at it, count the number of leap warning bits found. |
3705 | | * This will be used later to vote the system leap warning bit. |
3706 | | * If a leap warning bit is found on a reference clock, the vote |
3707 | | * is always won. |
3708 | | * |
3709 | | * Choose the system peer using a hybrid metric composed of the |
3710 | | * selection jitter scaled by the root distance augmented by |
3711 | | * stratum scaled by sys_mindisp (.001 by default). The goal of |
3712 | | * the small stratum factor is to avoid clockhop between a |
3713 | | * reference clock and a network peer which has a refclock and |
3714 | | * is using an older ntpd, which does not floor sys_rootdisp at |
3715 | | * sys_mindisp. |
3716 | | * |
3717 | | * In contrast, ntpd 4.2.6 and earlier used stratum primarily |
3718 | | * in selecting the system peer, using a weight of 1 second of |
3719 | | * additional root distance per stratum. This heavy bias is no |
3720 | | * longer appropriate, as the scaled root distance provides a |
3721 | | * more rational metric carrying the cumulative error budget. |
3722 | | */ |
3723 | 0 | e = 1e9; |
3724 | 0 | speer = 0; |
3725 | 0 | leap_vote_ins = 0; |
3726 | 0 | leap_vote_del = 0; |
3727 | 0 | for (i = 0; i < nlist; i++) { |
3728 | 0 | peer = peers[i].peer; |
3729 | 0 | peer->unreach = 0; |
3730 | 0 | peer->new_status = CTL_PST_SEL_SYNCCAND; |
3731 | 0 | sys_survivors++; |
3732 | 0 | if (peer->leap == LEAP_ADDSECOND) { |
3733 | 0 | if (peer->flags & FLAG_REFCLOCK) |
3734 | 0 | leap_vote_ins = nlist; |
3735 | 0 | else if (leap_vote_ins < nlist) |
3736 | 0 | leap_vote_ins++; |
3737 | 0 | } |
3738 | 0 | if (peer->leap == LEAP_DELSECOND) { |
3739 | 0 | if (peer->flags & FLAG_REFCLOCK) |
3740 | 0 | leap_vote_del = nlist; |
3741 | 0 | else if (leap_vote_del < nlist) |
3742 | 0 | leap_vote_del++; |
3743 | 0 | } |
3744 | 0 | if (peer->flags & FLAG_PREFER) |
3745 | 0 | sys_prefer = peer; |
3746 | 0 | speermet = peers[i].seljit * peers[i].synch + |
3747 | 0 | peer->stratum * sys_mindisp; |
3748 | 0 | if (speermet < e) { |
3749 | 0 | e = speermet; |
3750 | 0 | speer = i; |
3751 | 0 | } |
3752 | 0 | } |
3753 | | |
3754 | | /* |
3755 | | * Unless there are at least sys_misane survivors, leave the |
3756 | | * building dark. Otherwise, do a clockhop dance. Ordinarily, |
3757 | | * use the selected survivor speer. However, if the current |
3758 | | * system peer is not speer, stay with the current system peer |
3759 | | * as long as it doesn't get too old or too ugly. |
3760 | | */ |
3761 | 0 | if (nlist > 0 && nlist >= sys_minsane) { |
3762 | 0 | double x; |
3763 | |
|
3764 | 0 | typesystem = peers[speer].peer; |
3765 | 0 | if (osys_peer == NULL || osys_peer == typesystem) { |
3766 | 0 | sys_clockhop = 0; |
3767 | 0 | } else if ((x = fabs(typesystem->offset - |
3768 | 0 | osys_peer->offset)) < sys_mindisp) { |
3769 | 0 | if (sys_clockhop == 0) |
3770 | 0 | sys_clockhop = sys_mindisp; |
3771 | 0 | else |
3772 | 0 | sys_clockhop *= .5; |
3773 | 0 | DPRINTF(1, ("select: clockhop %d %.6f %.6f\n", |
3774 | 0 | j, x, sys_clockhop)); |
3775 | 0 | if (fabs(x) < sys_clockhop) |
3776 | 0 | typesystem = osys_peer; |
3777 | 0 | else |
3778 | 0 | sys_clockhop = 0; |
3779 | 0 | } else { |
3780 | 0 | sys_clockhop = 0; |
3781 | 0 | } |
3782 | 0 | } |
3783 | | |
3784 | | /* |
3785 | | * Mitigation rules of the game. We have the pick of the |
3786 | | * litter in typesystem if any survivors are left. If |
3787 | | * there is a prefer peer, use its offset and jitter. |
3788 | | * Otherwise, use the combined offset and jitter of all kitters. |
3789 | | */ |
3790 | 0 | if (typesystem != NULL) { |
3791 | 0 | if (sys_prefer == NULL) { |
3792 | 0 | typesystem->new_status = CTL_PST_SEL_SYSPEER; |
3793 | 0 | clock_combine(peers, sys_survivors, speer); |
3794 | 0 | } else { |
3795 | 0 | typesystem = sys_prefer; |
3796 | 0 | sys_clockhop = 0; |
3797 | 0 | typesystem->new_status = CTL_PST_SEL_SYSPEER; |
3798 | 0 | sys_offset = typesystem->offset; |
3799 | 0 | sys_jitter = typesystem->jitter; |
3800 | 0 | } |
3801 | 0 | DPRINTF(1, ("select: combine offset %.9f jitter %.9f\n", |
3802 | 0 | sys_offset, sys_jitter)); |
3803 | 0 | } |
3804 | 0 | #ifdef REFCLOCK |
3805 | | /* |
3806 | | * If a PPS driver is lit and the combined offset is less than |
3807 | | * 0.4 s, select the driver as the PPS peer and use its offset |
3808 | | * and jitter. However, if this is the atom driver, use it only |
3809 | | * if there is a prefer peer or there are no survivors and none |
3810 | | * are required. |
3811 | | */ |
3812 | 0 | if ( typepps != NULL |
3813 | 0 | && fabs(sys_offset) < 0.4 |
3814 | 0 | && ( typepps->refclktype != REFCLK_ATOM_PPS |
3815 | 0 | || ( typepps->refclktype == REFCLK_ATOM_PPS |
3816 | 0 | && ( sys_prefer != NULL |
3817 | 0 | || (typesystem == NULL && sys_minsane == 0))))) { |
3818 | 0 | typesystem = typepps; |
3819 | 0 | sys_clockhop = 0; |
3820 | 0 | typesystem->new_status = CTL_PST_SEL_PPS; |
3821 | 0 | sys_offset = typesystem->offset; |
3822 | 0 | sys_jitter = typesystem->jitter; |
3823 | 0 | DPRINTF(1, ("select: pps offset %.9f jitter %.9f\n", |
3824 | 0 | sys_offset, sys_jitter)); |
3825 | 0 | } |
3826 | 0 | #endif /* REFCLOCK */ |
3827 | | |
3828 | | /* |
3829 | | * If there are no survivors at this point, there is no |
3830 | | * system peer. If so and this is an old update, keep the |
3831 | | * current statistics, but do not update the clock. |
3832 | | */ |
3833 | 0 | if (typesystem == NULL) { |
3834 | 0 | if (osys_peer != NULL) { |
3835 | 0 | if (sys_orphwait > 0) |
3836 | 0 | orphwait = current_time + sys_orphwait; |
3837 | 0 | report_event(EVNT_NOPEER, NULL, NULL); |
3838 | 0 | } |
3839 | 0 | sys_peer = NULL; |
3840 | 0 | for (peer = peer_list; peer != NULL; peer = peer->p_link) |
3841 | 0 | peer->status = peer->new_status; |
3842 | 0 | return; |
3843 | 0 | } |
3844 | | |
3845 | | /* |
3846 | | * Do not use old data, as this may mess up the clock discipline |
3847 | | * stability. |
3848 | | */ |
3849 | 0 | if (typesystem->epoch <= sys_epoch) |
3850 | 0 | return; |
3851 | | |
3852 | | /* |
3853 | | * We have found the alpha male. Wind the clock. |
3854 | | */ |
3855 | 0 | if (osys_peer != typesystem) |
3856 | 0 | report_event(PEVNT_NEWPEER, typesystem, NULL); |
3857 | 0 | for (peer = peer_list; peer != NULL; peer = peer->p_link) |
3858 | 0 | peer->status = peer->new_status; |
3859 | 0 | clock_update(typesystem); |
3860 | 0 | } |
3861 | | |
3862 | | |
3863 | | static void |
3864 | | clock_combine( |
3865 | | peer_select * peers, /* survivor list */ |
3866 | | int npeers, /* number of survivors */ |
3867 | | int syspeer /* index of sys.peer */ |
3868 | | ) |
3869 | 0 | { |
3870 | 0 | int i; |
3871 | 0 | double x, y, z, w; |
3872 | |
|
3873 | 0 | y = z = w = 0; |
3874 | 0 | for (i = 0; i < npeers; i++) { |
3875 | 0 | x = 1. / peers[i].synch; |
3876 | 0 | y += x; |
3877 | 0 | z += x * peers[i].peer->offset; |
3878 | 0 | w += x * DIFF(peers[i].peer->offset, |
3879 | 0 | peers[syspeer].peer->offset); |
3880 | 0 | } |
3881 | 0 | sys_offset = z / y; |
3882 | 0 | sys_jitter = SQRT(w / y + SQUARE(peers[syspeer].seljit)); |
3883 | 0 | } |
3884 | | |
3885 | | |
3886 | | /* |
3887 | | * root_distance - compute synchronization distance from peer to root |
3888 | | */ |
3889 | | static double |
3890 | | root_distance( |
3891 | | struct peer *peer /* peer structure pointer */ |
3892 | | ) |
3893 | 0 | { |
3894 | 0 | double dtemp; |
3895 | | |
3896 | | /* |
3897 | | * Root Distance (LAMBDA) is defined as: |
3898 | | * (delta + DELTA)/2 + epsilon + EPSILON + D |
3899 | | * |
3900 | | * where: |
3901 | | * delta is the round-trip delay |
3902 | | * DELTA is the root delay |
3903 | | * epsilon is the peer dispersion |
3904 | | * + (15 usec each second) |
3905 | | * EPSILON is the root dispersion |
3906 | | * D is sys_jitter |
3907 | | * |
3908 | | * NB: Think hard about why we are using these values, and what |
3909 | | * the alternatives are, and the various pros/cons. |
3910 | | * |
3911 | | * DLM thinks these are probably the best choices from any of the |
3912 | | * other worse choices. |
3913 | | */ |
3914 | 0 | dtemp = (peer->delay + peer->rootdelay) / 2 |
3915 | 0 | + peer->disp |
3916 | 0 | + clock_phi * (current_time - peer->update) |
3917 | 0 | + peer->rootdisp |
3918 | 0 | + peer->jitter; |
3919 | | /* |
3920 | | * Careful squeak here. The value returned must be greater than |
3921 | | * the minimum root dispersion in order to avoid clockhop with |
3922 | | * highly precise reference clocks. Note that the root distance |
3923 | | * cannot exceed the sys_maxdist, as this is the cutoff by the |
3924 | | * selection algorithm. |
3925 | | */ |
3926 | 0 | if (dtemp < sys_mindisp) |
3927 | 0 | dtemp = sys_mindisp; |
3928 | 0 | return (dtemp); |
3929 | 0 | } |
3930 | | |
3931 | | |
3932 | | /* |
3933 | | * peer_xmit - send packet for persistent association. |
3934 | | */ |
3935 | | static void |
3936 | | peer_xmit( |
3937 | | struct peer *peer /* peer structure pointer */ |
3938 | | ) |
3939 | 0 | { |
3940 | 0 | struct pkt xpkt; /* transmit packet */ |
3941 | 0 | size_t sendlen, authlen; |
3942 | 0 | keyid_t xkeyid = 0; /* transmit key ID */ |
3943 | 0 | l_fp xmt_tx, xmt_ty; |
3944 | |
|
3945 | 0 | if (!peer->dstadr) /* drop peers without interface */ |
3946 | 0 | return; |
3947 | | |
3948 | 0 | xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, peer->version, |
3949 | 0 | peer->hmode); |
3950 | 0 | xpkt.stratum = STRATUM_TO_PKT(sys_stratum); |
3951 | 0 | xpkt.ppoll = peer->hpoll; |
3952 | 0 | xpkt.precision = sys_precision; |
3953 | 0 | xpkt.refid = sys_refid; |
3954 | 0 | xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); |
3955 | 0 | xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); |
3956 | 0 | HTONL_FP(&sys_reftime, &xpkt.reftime); |
3957 | 0 | HTONL_FP(&peer->rec, &xpkt.org); |
3958 | 0 | HTONL_FP(&peer->dst, &xpkt.rec); |
3959 | | |
3960 | | /* |
3961 | | * If the received packet contains a MAC, the transmitted packet |
3962 | | * is authenticated and contains a MAC. If not, the transmitted |
3963 | | * packet is not authenticated. |
3964 | | * |
3965 | | * It is most important when autokey is in use that the local |
3966 | | * interface IP address be known before the first packet is |
3967 | | * sent. Otherwise, it is not possible to compute a correct MAC |
3968 | | * the recipient will accept. Thus, the I/O semantics have to do |
3969 | | * a little more work. In particular, the wildcard interface |
3970 | | * might not be usable. |
3971 | | */ |
3972 | 0 | sendlen = LEN_PKT_NOMAC; |
3973 | 0 | if ( |
3974 | | #ifdef AUTOKEY |
3975 | | !(peer->flags & FLAG_SKEY) && |
3976 | | #endif /* !AUTOKEY */ |
3977 | 0 | peer->keyid == 0) { |
3978 | | |
3979 | | /* |
3980 | | * Transmit a-priori timestamps |
3981 | | */ |
3982 | 0 | get_systime(&xmt_tx); |
3983 | 0 | if (peer->flip == 0) { /* basic mode */ |
3984 | 0 | peer->aorg = xmt_tx; |
3985 | 0 | HTONL_FP(&xmt_tx, &xpkt.xmt); |
3986 | 0 | } else { /* interleaved modes */ |
3987 | 0 | if (peer->hmode == MODE_BROADCAST) { /* bcst */ |
3988 | 0 | HTONL_FP(&xmt_tx, &xpkt.xmt); |
3989 | 0 | if (peer->flip > 0) |
3990 | 0 | HTONL_FP(&peer->borg, |
3991 | 0 | &xpkt.org); |
3992 | 0 | else |
3993 | 0 | HTONL_FP(&peer->aorg, |
3994 | 0 | &xpkt.org); |
3995 | 0 | } else { /* symmetric */ |
3996 | 0 | if (peer->flip > 0) |
3997 | 0 | HTONL_FP(&peer->borg, |
3998 | 0 | &xpkt.xmt); |
3999 | 0 | else |
4000 | 0 | HTONL_FP(&peer->aorg, |
4001 | 0 | &xpkt.xmt); |
4002 | 0 | } |
4003 | 0 | } |
4004 | 0 | peer->t21_bytes = sendlen; |
4005 | 0 | sendpkt(&peer->srcadr, peer->dstadr, |
4006 | 0 | sys_ttl[(peer->ttl >= sys_ttlmax) ? sys_ttlmax : peer->ttl], |
4007 | 0 | &xpkt, sendlen); |
4008 | 0 | peer->sent++; |
4009 | 0 | peer->throttle += (1 << peer->minpoll) - 2; |
4010 | | |
4011 | | /* |
4012 | | * Capture a-posteriori timestamps |
4013 | | */ |
4014 | 0 | get_systime(&xmt_ty); |
4015 | 0 | if (peer->flip != 0) { /* interleaved modes */ |
4016 | 0 | if (peer->flip > 0) |
4017 | 0 | peer->aorg = xmt_ty; |
4018 | 0 | else |
4019 | 0 | peer->borg = xmt_ty; |
4020 | 0 | peer->flip = -peer->flip; |
4021 | 0 | } |
4022 | 0 | L_SUB(&xmt_ty, &xmt_tx); |
4023 | 0 | LFPTOD(&xmt_ty, peer->xleave); |
4024 | 0 | DPRINTF(1, ("peer_xmit: at %ld %s->%s mode %d len %zu xmt %#010x.%08x\n", |
4025 | 0 | current_time, |
4026 | 0 | peer->dstadr ? stoa(&peer->dstadr->sin) : "-", |
4027 | 0 | stoa(&peer->srcadr), peer->hmode, sendlen, |
4028 | 0 | xmt_tx.l_ui, xmt_tx.l_uf)); |
4029 | 0 | return; |
4030 | 0 | } |
4031 | | |
4032 | | /* |
4033 | | * Authentication is enabled, so the transmitted packet must be |
4034 | | * authenticated. If autokey is enabled, fuss with the various |
4035 | | * modes; otherwise, symmetric key cryptography is used. |
4036 | | */ |
4037 | | #ifdef AUTOKEY |
4038 | | if (peer->flags & FLAG_SKEY) { |
4039 | | struct exten *exten; /* extension field */ |
4040 | | |
4041 | | /* |
4042 | | * The Public Key Dance (PKD): Cryptographic credentials |
4043 | | * are contained in extension fields, each including a |
4044 | | * 4-octet length/code word followed by a 4-octet |
4045 | | * association ID and optional additional data. Optional |
4046 | | * data includes a 4-octet data length field followed by |
4047 | | * the data itself. Request messages are sent from a |
4048 | | * configured association; response messages can be sent |
4049 | | * from a configured association or can take the fast |
4050 | | * path without ever matching an association. Response |
4051 | | * messages have the same code as the request, but have |
4052 | | * a response bit and possibly an error bit set. In this |
4053 | | * implementation, a message may contain no more than |
4054 | | * one command and one or more responses. |
4055 | | * |
4056 | | * Cryptographic session keys include both a public and |
4057 | | * a private componet. Request and response messages |
4058 | | * using extension fields are always sent with the |
4059 | | * private component set to zero. Packets without |
4060 | | * extension fields indlude the private component when |
4061 | | * the session key is generated. |
4062 | | */ |
4063 | | while (1) { |
4064 | | |
4065 | | /* |
4066 | | * Allocate and initialize a keylist if not |
4067 | | * already done. Then, use the list in inverse |
4068 | | * order, discarding keys once used. Keep the |
4069 | | * latest key around until the next one, so |
4070 | | * clients can use client/server packets to |
4071 | | * compute propagation delay. |
4072 | | * |
4073 | | * Note that once a key is used from the list, |
4074 | | * it is retained in the key cache until the |
4075 | | * next key is used. This is to allow a client |
4076 | | * to retrieve the encrypted session key |
4077 | | * identifier to verify authenticity. |
4078 | | * |
4079 | | * If for some reason a key is no longer in the |
4080 | | * key cache, a birthday has happened or the key |
4081 | | * has expired, so the pseudo-random sequence is |
4082 | | * broken. In that case, purge the keylist and |
4083 | | * regenerate it. |
4084 | | */ |
4085 | | if (peer->keynumber == 0) |
4086 | | make_keylist(peer, peer->dstadr); |
4087 | | else |
4088 | | peer->keynumber--; |
4089 | | xkeyid = peer->keylist[peer->keynumber]; |
4090 | | if (authistrusted(xkeyid)) |
4091 | | break; |
4092 | | else |
4093 | | key_expire(peer); |
4094 | | } |
4095 | | peer->keyid = xkeyid; |
4096 | | exten = NULL; |
4097 | | switch (peer->hmode) { |
4098 | | |
4099 | | /* |
4100 | | * In broadcast server mode the autokey values are |
4101 | | * required by the broadcast clients. Push them when a |
4102 | | * new keylist is generated; otherwise, push the |
4103 | | * association message so the client can request them at |
4104 | | * other times. |
4105 | | */ |
4106 | | case MODE_BROADCAST: |
4107 | | if (peer->flags & FLAG_ASSOC) |
4108 | | exten = crypto_args(peer, CRYPTO_AUTO | |
4109 | | CRYPTO_RESP, peer->associd, NULL); |
4110 | | else |
4111 | | exten = crypto_args(peer, CRYPTO_ASSOC | |
4112 | | CRYPTO_RESP, peer->associd, NULL); |
4113 | | break; |
4114 | | |
4115 | | /* |
4116 | | * In symmetric modes the parameter, certificate, |
4117 | | * identity, cookie and autokey exchanges are |
4118 | | * required. The leapsecond exchange is optional. But, a |
4119 | | * peer will not believe the other peer until the other |
4120 | | * peer has synchronized, so the certificate exchange |
4121 | | * might loop until then. If a peer finds a broken |
4122 | | * autokey sequence, it uses the autokey exchange to |
4123 | | * retrieve the autokey values. In any case, if a new |
4124 | | * keylist is generated, the autokey values are pushed. |
4125 | | */ |
4126 | | case MODE_ACTIVE: |
4127 | | case MODE_PASSIVE: |
4128 | | |
4129 | | /* |
4130 | | * Parameter, certificate and identity. |
4131 | | */ |
4132 | | if (!peer->crypto) |
4133 | | exten = crypto_args(peer, CRYPTO_ASSOC, |
4134 | | peer->associd, hostval.ptr); |
4135 | | else if (!(peer->crypto & CRYPTO_FLAG_CERT)) |
4136 | | exten = crypto_args(peer, CRYPTO_CERT, |
4137 | | peer->associd, peer->issuer); |
4138 | | else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) |
4139 | | exten = crypto_args(peer, |
4140 | | crypto_ident(peer), peer->associd, |
4141 | | NULL); |
4142 | | |
4143 | | /* |
4144 | | * Cookie and autokey. We request the cookie |
4145 | | * only when the this peer and the other peer |
4146 | | * are synchronized. But, this peer needs the |
4147 | | * autokey values when the cookie is zero. Any |
4148 | | * time we regenerate the key list, we offer the |
4149 | | * autokey values without being asked. If for |
4150 | | * some reason either peer finds a broken |
4151 | | * autokey sequence, the autokey exchange is |
4152 | | * used to retrieve the autokey values. |
4153 | | */ |
4154 | | else if ( sys_leap != LEAP_NOTINSYNC |
4155 | | && peer->leap != LEAP_NOTINSYNC |
4156 | | && !(peer->crypto & CRYPTO_FLAG_COOK)) |
4157 | | exten = crypto_args(peer, CRYPTO_COOK, |
4158 | | peer->associd, NULL); |
4159 | | else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) |
4160 | | exten = crypto_args(peer, CRYPTO_AUTO, |
4161 | | peer->associd, NULL); |
4162 | | else if ( peer->flags & FLAG_ASSOC |
4163 | | && peer->crypto & CRYPTO_FLAG_SIGN) |
4164 | | exten = crypto_args(peer, CRYPTO_AUTO | |
4165 | | CRYPTO_RESP, peer->assoc, NULL); |
4166 | | |
4167 | | /* |
4168 | | * Wait for clock sync, then sign the |
4169 | | * certificate and retrieve the leapsecond |
4170 | | * values. |
4171 | | */ |
4172 | | else if (sys_leap == LEAP_NOTINSYNC) |
4173 | | break; |
4174 | | |
4175 | | else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) |
4176 | | exten = crypto_args(peer, CRYPTO_SIGN, |
4177 | | peer->associd, hostval.ptr); |
4178 | | else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) |
4179 | | exten = crypto_args(peer, CRYPTO_LEAP, |
4180 | | peer->associd, NULL); |
4181 | | break; |
4182 | | |
4183 | | /* |
4184 | | * In client mode the parameter, certificate, identity, |
4185 | | * cookie and sign exchanges are required. The |
4186 | | * leapsecond exchange is optional. If broadcast client |
4187 | | * mode the same exchanges are required, except that the |
4188 | | * autokey exchange is substitutes for the cookie |
4189 | | * exchange, since the cookie is always zero. If the |
4190 | | * broadcast client finds a broken autokey sequence, it |
4191 | | * uses the autokey exchange to retrieve the autokey |
4192 | | * values. |
4193 | | */ |
4194 | | case MODE_CLIENT: |
4195 | | |
4196 | | /* |
4197 | | * Parameter, certificate and identity. |
4198 | | */ |
4199 | | if (!peer->crypto) |
4200 | | exten = crypto_args(peer, CRYPTO_ASSOC, |
4201 | | peer->associd, hostval.ptr); |
4202 | | else if (!(peer->crypto & CRYPTO_FLAG_CERT)) |
4203 | | exten = crypto_args(peer, CRYPTO_CERT, |
4204 | | peer->associd, peer->issuer); |
4205 | | else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) |
4206 | | exten = crypto_args(peer, |
4207 | | crypto_ident(peer), peer->associd, |
4208 | | NULL); |
4209 | | |
4210 | | /* |
4211 | | * Cookie and autokey. These are requests, but |
4212 | | * we use the peer association ID with autokey |
4213 | | * rather than our own. |
4214 | | */ |
4215 | | else if (!(peer->crypto & CRYPTO_FLAG_COOK)) |
4216 | | exten = crypto_args(peer, CRYPTO_COOK, |
4217 | | peer->associd, NULL); |
4218 | | else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) |
4219 | | exten = crypto_args(peer, CRYPTO_AUTO, |
4220 | | peer->assoc, NULL); |
4221 | | |
4222 | | /* |
4223 | | * Wait for clock sync, then sign the |
4224 | | * certificate and retrieve the leapsecond |
4225 | | * values. |
4226 | | */ |
4227 | | else if (sys_leap == LEAP_NOTINSYNC) |
4228 | | break; |
4229 | | |
4230 | | else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) |
4231 | | exten = crypto_args(peer, CRYPTO_SIGN, |
4232 | | peer->associd, hostval.ptr); |
4233 | | else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) |
4234 | | exten = crypto_args(peer, CRYPTO_LEAP, |
4235 | | peer->associd, NULL); |
4236 | | break; |
4237 | | } |
4238 | | |
4239 | | /* |
4240 | | * Add a queued extension field if present. This is |
4241 | | * always a request message, so the reply ID is already |
4242 | | * in the message. If an error occurs, the error bit is |
4243 | | * lit in the response. |
4244 | | */ |
4245 | | if (peer->cmmd != NULL) { |
4246 | | u_int32 temp32; |
4247 | | |
4248 | | temp32 = CRYPTO_RESP; |
4249 | | peer->cmmd->opcode |= htonl(temp32); |
4250 | | sendlen += crypto_xmit(peer, &xpkt, NULL, |
4251 | | sendlen, peer->cmmd, 0); |
4252 | | free(peer->cmmd); |
4253 | | peer->cmmd = NULL; |
4254 | | } |
4255 | | |
4256 | | /* |
4257 | | * Add an extension field created above. All but the |
4258 | | * autokey response message are request messages. |
4259 | | */ |
4260 | | if (exten != NULL) { |
4261 | | if (exten->opcode != 0) |
4262 | | sendlen += crypto_xmit(peer, &xpkt, |
4263 | | NULL, sendlen, exten, 0); |
4264 | | free(exten); |
4265 | | } |
4266 | | |
4267 | | /* |
4268 | | * Calculate the next session key. Since extension |
4269 | | * fields are present, the cookie value is zero. |
4270 | | */ |
4271 | | if (sendlen > (int)LEN_PKT_NOMAC) { |
4272 | | session_key(&peer->dstadr->sin, &peer->srcadr, |
4273 | | xkeyid, 0, 2); |
4274 | | } |
4275 | | } |
4276 | | #endif /* AUTOKEY */ |
4277 | | |
4278 | | /* |
4279 | | * Transmit a-priori timestamps |
4280 | | */ |
4281 | 0 | get_systime(&xmt_tx); |
4282 | 0 | if (peer->flip == 0) { /* basic mode */ |
4283 | 0 | peer->aorg = xmt_tx; |
4284 | 0 | HTONL_FP(&xmt_tx, &xpkt.xmt); |
4285 | 0 | } else { /* interleaved modes */ |
4286 | 0 | if (peer->hmode == MODE_BROADCAST) { /* bcst */ |
4287 | 0 | HTONL_FP(&xmt_tx, &xpkt.xmt); |
4288 | 0 | if (peer->flip > 0) |
4289 | 0 | HTONL_FP(&peer->borg, &xpkt.org); |
4290 | 0 | else |
4291 | 0 | HTONL_FP(&peer->aorg, &xpkt.org); |
4292 | 0 | } else { /* symmetric */ |
4293 | 0 | if (peer->flip > 0) |
4294 | 0 | HTONL_FP(&peer->borg, &xpkt.xmt); |
4295 | 0 | else |
4296 | 0 | HTONL_FP(&peer->aorg, &xpkt.xmt); |
4297 | 0 | } |
4298 | 0 | } |
4299 | 0 | xkeyid = peer->keyid; |
4300 | 0 | authlen = authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); |
4301 | 0 | if (authlen == 0) { |
4302 | 0 | report_event(PEVNT_AUTH, peer, "no key"); |
4303 | 0 | peer->flash |= TEST5; /* auth error */ |
4304 | 0 | peer->badauth++; |
4305 | 0 | return; |
4306 | 0 | } |
4307 | 0 | sendlen += authlen; |
4308 | | #ifdef AUTOKEY |
4309 | | if (xkeyid > NTP_MAXKEY) |
4310 | | authtrust(xkeyid, 0); |
4311 | | #endif /* AUTOKEY */ |
4312 | 0 | if (sendlen > sizeof(xpkt)) { |
4313 | 0 | msyslog(LOG_ERR, "peer_xmit: buffer overflow %zu", sendlen); |
4314 | 0 | exit (-1); |
4315 | 0 | } |
4316 | 0 | peer->t21_bytes = sendlen; |
4317 | 0 | sendpkt(&peer->srcadr, peer->dstadr, |
4318 | 0 | sys_ttl[(peer->ttl >= sys_ttlmax) ? sys_ttlmax : peer->ttl], |
4319 | 0 | &xpkt, sendlen); |
4320 | 0 | peer->sent++; |
4321 | 0 | peer->throttle += (1 << peer->minpoll) - 2; |
4322 | | |
4323 | | /* |
4324 | | * Capture a-posteriori timestamps |
4325 | | */ |
4326 | 0 | get_systime(&xmt_ty); |
4327 | 0 | if (peer->flip != 0) { /* interleaved modes */ |
4328 | 0 | if (peer->flip > 0) |
4329 | 0 | peer->aorg = xmt_ty; |
4330 | 0 | else |
4331 | 0 | peer->borg = xmt_ty; |
4332 | 0 | peer->flip = -peer->flip; |
4333 | 0 | } |
4334 | 0 | L_SUB(&xmt_ty, &xmt_tx); |
4335 | 0 | LFPTOD(&xmt_ty, peer->xleave); |
4336 | | #ifdef AUTOKEY |
4337 | | DPRINTF(1, ("peer_xmit: at %ld %s->%s mode %d keyid %08x len %zu index %d\n", |
4338 | | current_time, latoa(peer->dstadr), |
4339 | | ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen, |
4340 | | peer->keynumber)); |
4341 | | #else /* !AUTOKEY follows */ |
4342 | 0 | DPRINTF(1, ("peer_xmit: at %ld %s->%s mode %d keyid %08x len %zu\n", |
4343 | 0 | current_time, peer->dstadr ? |
4344 | 0 | ntoa(&peer->dstadr->sin) : "-", |
4345 | 0 | ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen)); |
4346 | 0 | #endif /* !AUTOKEY */ |
4347 | |
|
4348 | 0 | return; |
4349 | 0 | } |
4350 | | |
4351 | | |
4352 | | #ifdef LEAP_SMEAR |
4353 | | |
4354 | | static void |
4355 | | leap_smear_add_offs( |
4356 | | l_fp *t, |
4357 | | l_fp *t_recv |
4358 | | ) |
4359 | | { |
4360 | | |
4361 | | L_ADD(t, &leap_smear.offset); |
4362 | | |
4363 | | /* |
4364 | | ** XXX: Should the smear be added to the root dispersion? |
4365 | | */ |
4366 | | |
4367 | | return; |
4368 | | } |
4369 | | |
4370 | | #endif /* LEAP_SMEAR */ |
4371 | | |
4372 | | |
4373 | | /* |
4374 | | * fast_xmit - Send packet for nonpersistent association. Note that |
4375 | | * neither the source or destination can be a broadcast address. |
4376 | | */ |
4377 | | static void |
4378 | | fast_xmit( |
4379 | | struct recvbuf *rbufp, /* receive packet pointer */ |
4380 | | int xmode, /* receive mode */ |
4381 | | keyid_t xkeyid, /* transmit key ID */ |
4382 | | int flags /* restrict mask */ |
4383 | | ) |
4384 | 167 | { |
4385 | 167 | struct pkt xpkt; /* transmit packet structure */ |
4386 | 167 | struct pkt *rpkt; /* receive packet structure */ |
4387 | 167 | l_fp xmt_tx, xmt_ty; |
4388 | 167 | size_t sendlen; |
4389 | | #ifdef AUTOKEY |
4390 | | u_int32 temp32; |
4391 | | #endif |
4392 | | |
4393 | | /* |
4394 | | * Initialize transmit packet header fields from the receive |
4395 | | * buffer provided. We leave the fields intact as received, but |
4396 | | * set the peer poll at the maximum of the receive peer poll and |
4397 | | * the system minimum poll (ntp_minpoll). This is for KoD rate |
4398 | | * control and not strictly specification compliant, but doesn't |
4399 | | * break anything. |
4400 | | * |
4401 | | * If the gazinta was from a multicast address, the gazoutta |
4402 | | * must go out another way. |
4403 | | */ |
4404 | 167 | rpkt = &rbufp->recv_pkt; |
4405 | 167 | if (rbufp->dstadr->flags & INT_MCASTOPEN) |
4406 | 0 | rbufp->dstadr = findinterface(&rbufp->recv_srcadr); |
4407 | | |
4408 | | /* |
4409 | | * If this is a kiss-o'-death (KoD) packet, show leap |
4410 | | * unsynchronized, stratum zero, reference ID the four-character |
4411 | | * kiss code and system root delay. Note we don't reveal the |
4412 | | * local time, so these packets can't be used for |
4413 | | * synchronization. |
4414 | | */ |
4415 | 167 | if (flags & RES_KOD) { |
4416 | 0 | sys_kodsent++; |
4417 | 0 | xpkt.li_vn_mode = PKT_LI_VN_MODE(LEAP_NOTINSYNC, |
4418 | 0 | PKT_VERSION(rpkt->li_vn_mode), xmode); |
4419 | 0 | xpkt.stratum = STRATUM_PKT_UNSPEC; |
4420 | 0 | xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); |
4421 | 0 | xpkt.precision = rpkt->precision; |
4422 | 0 | memcpy(&xpkt.refid, "RATE", 4); |
4423 | 0 | xpkt.rootdelay = rpkt->rootdelay; |
4424 | 0 | xpkt.rootdisp = rpkt->rootdisp; |
4425 | 0 | xpkt.reftime = rpkt->reftime; |
4426 | 0 | xpkt.org = rpkt->xmt; |
4427 | 0 | xpkt.rec = rpkt->xmt; |
4428 | 0 | xpkt.xmt = rpkt->xmt; |
4429 | | |
4430 | | /* |
4431 | | * This is a normal packet. Use the system variables. |
4432 | | */ |
4433 | 167 | } else { |
4434 | | #ifdef LEAP_SMEAR |
4435 | | /* |
4436 | | * Make copies of the variables which can be affected by smearing. |
4437 | | */ |
4438 | | l_fp this_ref_time; |
4439 | | l_fp this_recv_time; |
4440 | | #endif |
4441 | | |
4442 | | /* |
4443 | | * If we are inside the leap smear interval we add the current smear offset to |
4444 | | * the packet receive time, to the packet transmit time, and eventually to the |
4445 | | * reftime to make sure the reftime isn't later than the transmit/receive times. |
4446 | | */ |
4447 | 167 | xpkt.li_vn_mode = PKT_LI_VN_MODE(xmt_leap, |
4448 | 167 | PKT_VERSION(rpkt->li_vn_mode), xmode); |
4449 | | |
4450 | 167 | xpkt.stratum = STRATUM_TO_PKT(sys_stratum); |
4451 | 167 | xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); |
4452 | 167 | xpkt.precision = sys_precision; |
4453 | 167 | xpkt.refid = sys_refid; |
4454 | 167 | xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); |
4455 | 167 | xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); |
4456 | | |
4457 | | #ifdef LEAP_SMEAR |
4458 | | this_ref_time = sys_reftime; |
4459 | | if (leap_smear.in_progress) { |
4460 | | leap_smear_add_offs(&this_ref_time, NULL); |
4461 | | xpkt.refid = convertLFPToRefID(leap_smear.offset); |
4462 | | DPRINTF(2, ("fast_xmit: leap_smear.in_progress: refid %8x, smear %s\n", |
4463 | | ntohl(xpkt.refid), |
4464 | | lfptoa(&leap_smear.offset, 8) |
4465 | | )); |
4466 | | } |
4467 | | HTONL_FP(&this_ref_time, &xpkt.reftime); |
4468 | | #else |
4469 | 167 | HTONL_FP(&sys_reftime, &xpkt.reftime); |
4470 | 167 | #endif |
4471 | | |
4472 | 167 | xpkt.org = rpkt->xmt; |
4473 | | |
4474 | | #ifdef LEAP_SMEAR |
4475 | | this_recv_time = rbufp->recv_time; |
4476 | | if (leap_smear.in_progress) |
4477 | | leap_smear_add_offs(&this_recv_time, NULL); |
4478 | | HTONL_FP(&this_recv_time, &xpkt.rec); |
4479 | | #else |
4480 | 167 | HTONL_FP(&rbufp->recv_time, &xpkt.rec); |
4481 | 167 | #endif |
4482 | | |
4483 | 167 | get_systime(&xmt_tx); |
4484 | | #ifdef LEAP_SMEAR |
4485 | | if (leap_smear.in_progress) |
4486 | | leap_smear_add_offs(&xmt_tx, &this_recv_time); |
4487 | | #endif |
4488 | 167 | HTONL_FP(&xmt_tx, &xpkt.xmt); |
4489 | 167 | } |
4490 | | |
4491 | | #ifdef HAVE_NTP_SIGND |
4492 | | if (flags & RES_MSSNTP) { |
4493 | | send_via_ntp_signd(rbufp, xmode, xkeyid, flags, &xpkt); |
4494 | | return; |
4495 | | } |
4496 | | #endif /* HAVE_NTP_SIGND */ |
4497 | | |
4498 | | /* |
4499 | | * If the received packet contains a MAC, the transmitted packet |
4500 | | * is authenticated and contains a MAC. If not, the transmitted |
4501 | | * packet is not authenticated. |
4502 | | */ |
4503 | 167 | sendlen = LEN_PKT_NOMAC; |
4504 | 167 | if (rbufp->recv_length == sendlen) { |
4505 | 24 | sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, |
4506 | 24 | sendlen); |
4507 | 24 | DPRINTF(1, ("fast_xmit: at %ld %s->%s mode %d len %lu\n", |
4508 | 24 | current_time, stoa(&rbufp->dstadr->sin), |
4509 | 24 | stoa(&rbufp->recv_srcadr), xmode, |
4510 | 24 | (u_long)sendlen)); |
4511 | 24 | return; |
4512 | 24 | } |
4513 | | |
4514 | | /* |
4515 | | * The received packet contains a MAC, so the transmitted packet |
4516 | | * must be authenticated. For symmetric key cryptography, use |
4517 | | * the predefined and trusted symmetric keys to generate the |
4518 | | * cryptosum. For autokey cryptography, use the server private |
4519 | | * value to generate the cookie, which is unique for every |
4520 | | * source-destination-key ID combination. |
4521 | | */ |
4522 | | #ifdef AUTOKEY |
4523 | | if (xkeyid > NTP_MAXKEY) { |
4524 | | keyid_t cookie; |
4525 | | |
4526 | | /* |
4527 | | * The only way to get here is a reply to a legitimate |
4528 | | * client request message, so the mode must be |
4529 | | * MODE_SERVER. If an extension field is present, there |
4530 | | * can be only one and that must be a command. Do what |
4531 | | * needs, but with private value of zero so the poor |
4532 | | * jerk can decode it. If no extension field is present, |
4533 | | * use the cookie to generate the session key. |
4534 | | */ |
4535 | | cookie = session_key(&rbufp->recv_srcadr, |
4536 | | &rbufp->dstadr->sin, 0, sys_private, 0); |
4537 | | if ((size_t)rbufp->recv_length > sendlen + MAX_MAC_LEN) { |
4538 | | session_key(&rbufp->dstadr->sin, |
4539 | | &rbufp->recv_srcadr, xkeyid, 0, 2); |
4540 | | temp32 = CRYPTO_RESP; |
4541 | | rpkt->exten[0] |= htonl(temp32); |
4542 | | sendlen += crypto_xmit(NULL, &xpkt, rbufp, |
4543 | | sendlen, (struct exten *)rpkt->exten, |
4544 | | cookie); |
4545 | | } else { |
4546 | | session_key(&rbufp->dstadr->sin, |
4547 | | &rbufp->recv_srcadr, xkeyid, cookie, 2); |
4548 | | } |
4549 | | } |
4550 | | #endif /* AUTOKEY */ |
4551 | 143 | get_systime(&xmt_tx); |
4552 | 143 | sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); |
4553 | | #ifdef AUTOKEY |
4554 | | if (xkeyid > NTP_MAXKEY) |
4555 | | authtrust(xkeyid, 0); |
4556 | | #endif /* AUTOKEY */ |
4557 | 143 | sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, sendlen); |
4558 | 143 | get_systime(&xmt_ty); |
4559 | 143 | L_SUB(&xmt_ty, &xmt_tx); |
4560 | 143 | sys_authdelay = xmt_ty; |
4561 | 143 | DPRINTF(1, ("fast_xmit: at %ld %s->%s mode %d keyid %08x len %lu\n", |
4562 | 143 | current_time, ntoa(&rbufp->dstadr->sin), |
4563 | 143 | ntoa(&rbufp->recv_srcadr), xmode, xkeyid, |
4564 | 143 | (u_long)sendlen)); |
4565 | 143 | } |
4566 | | |
4567 | | |
4568 | | /* |
4569 | | * pool_xmit - resolve hostname or send unicast solicitation for pool. |
4570 | | */ |
4571 | | static void |
4572 | | pool_xmit( |
4573 | | struct peer *pool /* pool solicitor association */ |
4574 | | ) |
4575 | 0 | { |
4576 | 0 | #ifdef WORKER |
4577 | 0 | struct pkt xpkt; /* transmit packet structure */ |
4578 | 0 | struct addrinfo hints; |
4579 | 0 | int rc; |
4580 | 0 | struct interface * lcladr; |
4581 | 0 | sockaddr_u * rmtadr; |
4582 | 0 | r4addr r4a; |
4583 | 0 | int restrict_mask; |
4584 | 0 | struct peer * p; |
4585 | 0 | l_fp xmt_tx; |
4586 | |
|
4587 | 0 | if (NULL == pool->ai) { |
4588 | 0 | if (pool->addrs != NULL) { |
4589 | | /* free() is used with copy_addrinfo_list() */ |
4590 | 0 | free(pool->addrs); |
4591 | 0 | pool->addrs = NULL; |
4592 | 0 | } |
4593 | 0 | ZERO(hints); |
4594 | 0 | hints.ai_family = AF(&pool->srcadr); |
4595 | 0 | hints.ai_socktype = SOCK_DGRAM; |
4596 | 0 | hints.ai_protocol = IPPROTO_UDP; |
4597 | | /* ignore getaddrinfo_sometime() errors, we will retry */ |
4598 | 0 | rc = getaddrinfo_sometime( |
4599 | 0 | pool->hostname, |
4600 | 0 | "ntp", |
4601 | 0 | &hints, |
4602 | 0 | 0, /* no retry */ |
4603 | 0 | &pool_name_resolved, |
4604 | 0 | (void *)(intptr_t)pool->associd); |
4605 | 0 | if (!rc) |
4606 | 0 | DPRINTF(1, ("pool DNS lookup %s started\n", |
4607 | 0 | pool->hostname)); |
4608 | 0 | else |
4609 | 0 | msyslog(LOG_ERR, |
4610 | 0 | "unable to start pool DNS %s: %m", |
4611 | 0 | pool->hostname); |
4612 | 0 | return; |
4613 | 0 | } |
4614 | | |
4615 | 0 | do { |
4616 | | /* copy_addrinfo_list ai_addr points to a sockaddr_u */ |
4617 | 0 | rmtadr = (sockaddr_u *)(void *)pool->ai->ai_addr; |
4618 | 0 | pool->ai = pool->ai->ai_next; |
4619 | 0 | p = findexistingpeer(rmtadr, NULL, NULL, MODE_CLIENT, 0, NULL); |
4620 | 0 | } while (p != NULL && pool->ai != NULL); |
4621 | 0 | if (p != NULL) |
4622 | 0 | return; /* out of addresses, re-query DNS next poll */ |
4623 | 0 | restrictions(rmtadr, &r4a); |
4624 | 0 | restrict_mask = r4a.rflags; |
4625 | 0 | if (RES_FLAGS & restrict_mask) |
4626 | 0 | restrict_source(rmtadr, 0, |
4627 | 0 | current_time + POOL_SOLICIT_WINDOW + 1); |
4628 | 0 | lcladr = findinterface(rmtadr); |
4629 | 0 | memset(&xpkt, 0, sizeof(xpkt)); |
4630 | 0 | xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, pool->version, |
4631 | 0 | MODE_CLIENT); |
4632 | 0 | xpkt.stratum = STRATUM_TO_PKT(sys_stratum); |
4633 | 0 | xpkt.ppoll = pool->hpoll; |
4634 | 0 | xpkt.precision = sys_precision; |
4635 | 0 | xpkt.refid = sys_refid; |
4636 | 0 | xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); |
4637 | 0 | xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); |
4638 | 0 | HTONL_FP(&sys_reftime, &xpkt.reftime); |
4639 | 0 | get_systime(&xmt_tx); |
4640 | 0 | pool->aorg = xmt_tx; |
4641 | 0 | HTONL_FP(&xmt_tx, &xpkt.xmt); |
4642 | 0 | sendpkt(rmtadr, lcladr, |
4643 | 0 | sys_ttl[(pool->ttl >= sys_ttlmax) ? sys_ttlmax : pool->ttl], |
4644 | 0 | &xpkt, LEN_PKT_NOMAC); |
4645 | 0 | pool->sent++; |
4646 | 0 | pool->throttle += (1 << pool->minpoll) - 2; |
4647 | 0 | DPRINTF(1, ("pool_xmit: at %ld %s->%s pool\n", |
4648 | 0 | current_time, latoa(lcladr), stoa(rmtadr))); |
4649 | 0 | msyslog(LOG_INFO, "Soliciting pool server %s", stoa(rmtadr)); |
4650 | 0 | #endif /* WORKER */ |
4651 | 0 | } |
4652 | | |
4653 | | |
4654 | | #ifdef AUTOKEY |
4655 | | /* |
4656 | | * group_test - test if this is the same group |
4657 | | * |
4658 | | * host assoc return action |
4659 | | * none none 0 mobilize * |
4660 | | * none group 0 mobilize * |
4661 | | * group none 0 mobilize * |
4662 | | * group group 1 mobilize |
4663 | | * group different 1 ignore |
4664 | | * * ignore if notrust |
4665 | | */ |
4666 | | int |
4667 | | group_test( |
4668 | | char *grp, |
4669 | | char *ident |
4670 | | ) |
4671 | | { |
4672 | | if (grp == NULL) |
4673 | | return (0); |
4674 | | |
4675 | | if (strcmp(grp, sys_groupname) == 0) |
4676 | | return (0); |
4677 | | |
4678 | | if (ident == NULL) |
4679 | | return (1); |
4680 | | |
4681 | | if (strcmp(grp, ident) == 0) |
4682 | | return (0); |
4683 | | |
4684 | | return (1); |
4685 | | } |
4686 | | #endif /* AUTOKEY */ |
4687 | | |
4688 | | |
4689 | | #ifdef WORKER |
4690 | | void |
4691 | | pool_name_resolved( |
4692 | | int rescode, |
4693 | | int gai_errno, |
4694 | | void * context, |
4695 | | const char * name, |
4696 | | const char * service, |
4697 | | const struct addrinfo * hints, |
4698 | | const struct addrinfo * res |
4699 | | ) |
4700 | 0 | { |
4701 | 0 | struct peer * pool; /* pool solicitor association */ |
4702 | 0 | associd_t assoc; |
4703 | |
|
4704 | 0 | if (rescode) { |
4705 | 0 | msyslog(LOG_ERR, |
4706 | 0 | "error resolving pool %s: %s (%d)", |
4707 | 0 | name, gai_strerror(rescode), rescode); |
4708 | 0 | return; |
4709 | 0 | } |
4710 | | |
4711 | 0 | assoc = (associd_t)(intptr_t)context; |
4712 | 0 | pool = findpeerbyassoc(assoc); |
4713 | 0 | if (NULL == pool) { |
4714 | 0 | msyslog(LOG_ERR, |
4715 | 0 | "Could not find assoc %u for pool DNS %s", |
4716 | 0 | assoc, name); |
4717 | 0 | return; |
4718 | 0 | } |
4719 | 0 | DPRINTF(1, ("pool DNS %s completed\n", name)); |
4720 | 0 | pool->addrs = copy_addrinfo_list(res); |
4721 | 0 | pool->ai = pool->addrs; |
4722 | 0 | pool_xmit(pool); |
4723 | |
|
4724 | 0 | } |
4725 | | #endif /* WORKER */ |
4726 | | |
4727 | | |
4728 | | #ifdef AUTOKEY |
4729 | | /* |
4730 | | * key_expire - purge the key list |
4731 | | */ |
4732 | | void |
4733 | | key_expire( |
4734 | | struct peer *peer /* peer structure pointer */ |
4735 | | ) |
4736 | | { |
4737 | | int i; |
4738 | | |
4739 | | if (peer->keylist != NULL) { |
4740 | | for (i = 0; i <= peer->keynumber; i++) |
4741 | | authtrust(peer->keylist[i], 0); |
4742 | | free(peer->keylist); |
4743 | | peer->keylist = NULL; |
4744 | | } |
4745 | | value_free(&peer->sndval); |
4746 | | peer->keynumber = 0; |
4747 | | peer->flags &= ~FLAG_ASSOC; |
4748 | | DPRINTF(1, ("key_expire: at %lu associd %d\n", current_time, |
4749 | | peer->associd)); |
4750 | | } |
4751 | | #endif /* AUTOKEY */ |
4752 | | |
4753 | | |
4754 | | /* |
4755 | | * local_refid(peer) - check peer refid to avoid selecting peers |
4756 | | * currently synced to this ntpd. |
4757 | | */ |
4758 | | static int |
4759 | | local_refid( |
4760 | | struct peer * p |
4761 | | ) |
4762 | 0 | { |
4763 | 0 | endpt * unicast_ep; |
4764 | |
|
4765 | 0 | if (p->dstadr != NULL && !(INT_MCASTIF & p->dstadr->flags)) |
4766 | 0 | unicast_ep = p->dstadr; |
4767 | 0 | else |
4768 | 0 | unicast_ep = findinterface(&p->srcadr); |
4769 | |
|
4770 | 0 | if (unicast_ep != NULL && p->refid == unicast_ep->addr_refid) |
4771 | 0 | return TRUE; |
4772 | 0 | else |
4773 | 0 | return FALSE; |
4774 | 0 | } |
4775 | | |
4776 | | |
4777 | | /* |
4778 | | * Determine if the peer is unfit for synchronization |
4779 | | * |
4780 | | * A peer is unfit for synchronization if |
4781 | | * > TEST10 bad leap or stratum below floor or at or above ceiling |
4782 | | * > TEST11 root distance exceeded for remote peer |
4783 | | * > TEST12 a direct or indirect synchronization loop would form |
4784 | | * > TEST13 unreachable or noselect |
4785 | | */ |
4786 | | int /* FALSE if fit, TRUE if unfit */ |
4787 | | peer_unfit( |
4788 | | struct peer *peer /* peer structure pointer */ |
4789 | | ) |
4790 | 0 | { |
4791 | 0 | int rval = 0; |
4792 | | |
4793 | | /* |
4794 | | * A stratum error occurs if (1) the server has never been |
4795 | | * synchronized, (2) the server stratum is below the floor or |
4796 | | * greater than or equal to the ceiling. |
4797 | | */ |
4798 | 0 | if ( peer->leap == LEAP_NOTINSYNC |
4799 | 0 | || peer->stratum < sys_floor |
4800 | 0 | || peer->stratum >= sys_ceiling) { |
4801 | 0 | rval |= TEST10; /* bad synch or stratum */ |
4802 | 0 | } |
4803 | | |
4804 | | /* |
4805 | | * A distance error for a remote peer occurs if the root |
4806 | | * distance is greater than or equal to the distance threshold |
4807 | | * plus the increment due to one host poll interval. |
4808 | | */ |
4809 | 0 | if ( !(peer->flags & FLAG_REFCLOCK) |
4810 | 0 | && root_distance(peer) >= sys_maxdist |
4811 | 0 | + clock_phi * ULOGTOD(peer->hpoll)) { |
4812 | 0 | rval |= TEST11; /* distance exceeded */ |
4813 | 0 | } |
4814 | | |
4815 | | /* |
4816 | | * A loop error occurs if the remote peer is synchronized to the |
4817 | | * local peer or if the remote peer is synchronized to the same |
4818 | | * server as the local peer but only if the remote peer is |
4819 | | * neither a reference clock nor an orphan. |
4820 | | */ |
4821 | 0 | if (peer->stratum > 1 && local_refid(peer)) { |
4822 | 0 | rval |= TEST12; /* synchronization loop */ |
4823 | 0 | } |
4824 | | |
4825 | | /* |
4826 | | * An unreachable error occurs if the server is unreachable or |
4827 | | * the noselect bit is set. |
4828 | | */ |
4829 | 0 | if (!peer->reach || (peer->flags & FLAG_NOSELECT)) { |
4830 | 0 | rval |= TEST13; /* unreachable */ |
4831 | 0 | } |
4832 | |
|
4833 | 0 | peer->flash &= ~PEER_TEST_MASK; |
4834 | 0 | peer->flash |= rval; |
4835 | 0 | return (rval); |
4836 | 0 | } |
4837 | | |
4838 | | |
4839 | | /* |
4840 | | * Find the precision of this particular machine |
4841 | | */ |
4842 | 13 | #define MINSTEP 20e-9 /* minimum clock increment (s) */ |
4843 | 1 | #define MAXSTEP 1 /* maximum clock increment (s) */ |
4844 | 14 | #define MINCHANGES 12 /* minimum number of step samples */ |
4845 | 26 | #define MAXLOOPS ((int)(1. / MINSTEP)) /* avoid infinite loop */ |
4846 | | |
4847 | | /* |
4848 | | * This routine measures the system precision defined as the minimum of |
4849 | | * a sequence of differences between successive readings of the system |
4850 | | * clock. However, if a difference is less than MINSTEP, the clock has |
4851 | | * been read more than once during a clock tick and the difference is |
4852 | | * ignored. We set MINSTEP greater than zero in case something happens |
4853 | | * like a cache miss, and to tolerate underlying system clocks which |
4854 | | * ensure each reading is strictly greater than prior readings while |
4855 | | * using an underlying stepping (not interpolated) clock. |
4856 | | * |
4857 | | * sys_tick and sys_precision represent the time to read the clock for |
4858 | | * systems with high-precision clocks, and the tick interval or step |
4859 | | * size for lower-precision stepping clocks. |
4860 | | * |
4861 | | * This routine also measures the time to read the clock on stepping |
4862 | | * system clocks by counting the number of readings between changes of |
4863 | | * the underlying clock. With either type of clock, the minimum time |
4864 | | * to read the clock is saved as sys_fuzz, and used to ensure the |
4865 | | * get_systime() readings always increase and are fuzzed below sys_fuzz. |
4866 | | */ |
4867 | | void |
4868 | | measure_precision(void) |
4869 | 1 | { |
4870 | | /* |
4871 | | * With sys_fuzz set to zero, get_systime() fuzzing of low bits |
4872 | | * is effectively disabled. trunc_os_clock is FALSE to disable |
4873 | | * get_ostime() simulation of a low-precision system clock. |
4874 | | */ |
4875 | 1 | set_sys_fuzz(0.); |
4876 | 1 | trunc_os_clock = FALSE; |
4877 | 1 | measured_tick = measure_tick_fuzz(); |
4878 | 1 | set_sys_tick_precision(measured_tick); |
4879 | 1 | msyslog(LOG_INFO, "proto: precision = %.3f usec (%d)", |
4880 | 1 | sys_tick * 1e6, sys_precision); |
4881 | 1 | if (sys_fuzz < sys_tick) { |
4882 | 0 | msyslog(LOG_NOTICE, "proto: fuzz beneath %.3f usec", |
4883 | 0 | sys_fuzz * 1e6); |
4884 | 0 | } |
4885 | 1 | } |
4886 | | |
4887 | | |
4888 | | /* |
4889 | | * measure_tick_fuzz() |
4890 | | * |
4891 | | * measures the minimum time to read the clock (stored in sys_fuzz) |
4892 | | * and returns the tick, the larger of the minimum increment observed |
4893 | | * between successive clock readings and the time to read the clock. |
4894 | | */ |
4895 | | double |
4896 | | measure_tick_fuzz(void) |
4897 | 1 | { |
4898 | 1 | l_fp minstep; /* MINSTEP as l_fp */ |
4899 | 1 | l_fp val; /* current seconds fraction */ |
4900 | 1 | l_fp last; /* last seconds fraction */ |
4901 | 1 | l_fp ldiff; /* val - last */ |
4902 | 1 | double tick; /* computed tick value */ |
4903 | 1 | double diff; |
4904 | 1 | long repeats; |
4905 | 1 | long max_repeats; |
4906 | 1 | int changes; |
4907 | 1 | int i; /* log2 precision */ |
4908 | | |
4909 | 1 | tick = MAXSTEP; |
4910 | 1 | max_repeats = 0; |
4911 | 1 | repeats = 0; |
4912 | 1 | changes = 0; |
4913 | 1 | DTOLFP(MINSTEP, &minstep); |
4914 | 1 | get_systime(&last); |
4915 | 13 | for (i = 0; i < MAXLOOPS && changes < MINCHANGES; i++) { |
4916 | 12 | get_systime(&val); |
4917 | 12 | ldiff = val; |
4918 | 12 | L_SUB(&ldiff, &last); |
4919 | 12 | last = val; |
4920 | 12 | if (L_ISGT(&ldiff, &minstep)) { |
4921 | 12 | max_repeats = max(repeats, max_repeats); |
4922 | 12 | repeats = 0; |
4923 | 12 | changes++; |
4924 | 12 | LFPTOD(&ldiff, diff); |
4925 | 12 | tick = min(diff, tick); |
4926 | 12 | } else { |
4927 | 0 | repeats++; |
4928 | 0 | } |
4929 | 12 | } |
4930 | 1 | if (changes < MINCHANGES) { |
4931 | 0 | msyslog(LOG_ERR, "Fatal error: precision could not be measured (MINSTEP too large?)"); |
4932 | 0 | exit(1); |
4933 | 0 | } |
4934 | | |
4935 | 1 | if (0 == max_repeats) { |
4936 | 1 | set_sys_fuzz(tick); |
4937 | 1 | } else { |
4938 | 0 | set_sys_fuzz(tick / max_repeats); |
4939 | 0 | } |
4940 | | |
4941 | 1 | return tick; |
4942 | 1 | } |
4943 | | |
4944 | | |
4945 | | void |
4946 | | set_sys_tick_precision( |
4947 | | double tick |
4948 | | ) |
4949 | 1 | { |
4950 | 1 | int i; |
4951 | | |
4952 | 1 | if (tick > 1.) { |
4953 | 0 | msyslog(LOG_ERR, |
4954 | 0 | "unsupported tick %.3f > 1s ignored", tick); |
4955 | 0 | return; |
4956 | 0 | } |
4957 | 1 | if (tick < measured_tick) { |
4958 | 0 | msyslog(LOG_ERR, |
4959 | 0 | "proto: tick %.3f less than measured tick %.3f, ignored", |
4960 | 0 | tick, measured_tick); |
4961 | 0 | return; |
4962 | 1 | } else if (tick > measured_tick) { |
4963 | 0 | trunc_os_clock = TRUE; |
4964 | 0 | msyslog(LOG_NOTICE, |
4965 | 0 | "proto: truncating system clock to multiples of %.9f", |
4966 | 0 | tick); |
4967 | 0 | } |
4968 | 1 | sys_tick = tick; |
4969 | | |
4970 | | /* |
4971 | | * Find the nearest power of two. |
4972 | | */ |
4973 | 25 | for (i = 0; tick <= 1; i--) |
4974 | 24 | tick *= 2; |
4975 | 1 | if (tick - 1 > 1 - tick / 2) |
4976 | 0 | i++; |
4977 | | |
4978 | 1 | sys_precision = (s_char)i; |
4979 | 1 | } |
4980 | | |
4981 | | |
4982 | | /* |
4983 | | * init_proto - initialize the protocol module's data |
4984 | | */ |
4985 | | void |
4986 | | init_proto(void) |
4987 | 1 | { |
4988 | 1 | l_fp dummy; |
4989 | 1 | int i; |
4990 | | |
4991 | | /* |
4992 | | * Fill in the sys_* stuff. Default is don't listen to |
4993 | | * broadcasting, require authentication. |
4994 | | */ |
4995 | 1 | set_sys_leap(LEAP_NOTINSYNC); |
4996 | 1 | sys_stratum = STRATUM_UNSPEC; |
4997 | 1 | memcpy(&sys_refid, "INIT", 4); |
4998 | 1 | sys_peer = NULL; |
4999 | 1 | sys_rootdelay = 0; |
5000 | 1 | sys_rootdisp = 0; |
5001 | 1 | L_CLR(&sys_reftime); |
5002 | 1 | sys_jitter = 0; |
5003 | 1 | measure_precision(); |
5004 | 1 | get_systime(&dummy); |
5005 | 1 | sys_survivors = 0; |
5006 | 1 | sys_manycastserver = 0; |
5007 | 1 | sys_bclient = 0; |
5008 | 1 | sys_bdelay = BDELAY_DEFAULT; /*[Bug 3031] delay cutoff */ |
5009 | 1 | sys_authenticate = 1; |
5010 | 1 | sys_stattime = current_time; |
5011 | 1 | orphwait = current_time + sys_orphwait; |
5012 | 1 | proto_clr_stats(); |
5013 | 9 | for (i = 0; i < MAX_TTL; ++i) |
5014 | 8 | sys_ttl[i] = (u_char)((i * 256) / MAX_TTL); |
5015 | 1 | sys_ttlmax = (MAX_TTL - 1); |
5016 | 1 | hardpps_enable = 0; |
5017 | 1 | stats_control = 1; |
5018 | 1 | } |
5019 | | |
5020 | | |
5021 | | /* |
5022 | | * proto_config - configure the protocol module |
5023 | | */ |
5024 | | void |
5025 | | proto_config( |
5026 | | int item, |
5027 | | u_long value, |
5028 | | double dvalue, |
5029 | | sockaddr_u *svalue |
5030 | | ) |
5031 | 0 | { |
5032 | | /* |
5033 | | * Figure out what he wants to change, then do it |
5034 | | */ |
5035 | 0 | DPRINTF(2, ("proto_config: code %d value %lu dvalue %lf\n", |
5036 | 0 | item, value, dvalue)); |
5037 | |
|
5038 | 0 | switch (item) { |
5039 | | |
5040 | | /* |
5041 | | * enable and disable commands - arguments are Boolean. |
5042 | | */ |
5043 | 0 | case PROTO_AUTHENTICATE: /* authentication (auth) */ |
5044 | 0 | sys_authenticate = value; |
5045 | 0 | break; |
5046 | | |
5047 | 0 | case PROTO_BROADCLIENT: /* broadcast client (bclient) */ |
5048 | 0 | sys_bclient = (int)value; |
5049 | 0 | if (sys_bclient == 0) |
5050 | 0 | io_unsetbclient(); |
5051 | 0 | else |
5052 | 0 | io_setbclient(); |
5053 | 0 | break; |
5054 | | |
5055 | 0 | #ifdef REFCLOCK |
5056 | 0 | case PROTO_CAL: /* refclock calibrate (calibrate) */ |
5057 | 0 | cal_enable = value; |
5058 | 0 | break; |
5059 | 0 | #endif /* REFCLOCK */ |
5060 | | |
5061 | 0 | case PROTO_KERNEL: /* kernel discipline (kernel) */ |
5062 | 0 | select_loop(value); |
5063 | 0 | break; |
5064 | | |
5065 | 0 | case PROTO_MONITOR: /* monitoring (monitor) */ |
5066 | 0 | if (value) |
5067 | 0 | mon_start(MON_ON); |
5068 | 0 | else { |
5069 | 0 | mon_stop(MON_ON); |
5070 | 0 | if (mon_enabled) |
5071 | 0 | msyslog(LOG_WARNING, |
5072 | 0 | "restrict: 'monitor' cannot be disabled while 'limited' is enabled"); |
5073 | 0 | } |
5074 | 0 | break; |
5075 | | |
5076 | 0 | case PROTO_NTP: /* NTP discipline (ntp) */ |
5077 | 0 | ntp_enable = value; |
5078 | 0 | break; |
5079 | | |
5080 | 0 | case PROTO_MODE7: /* mode7 management (ntpdc) */ |
5081 | 0 | ntp_mode7 = value; |
5082 | 0 | break; |
5083 | | |
5084 | 0 | case PROTO_PPS: /* PPS discipline (pps) */ |
5085 | 0 | hardpps_enable = value; |
5086 | 0 | break; |
5087 | | |
5088 | 0 | case PROTO_FILEGEN: /* statistics (stats) */ |
5089 | 0 | stats_control = value; |
5090 | 0 | break; |
5091 | | |
5092 | | /* |
5093 | | * tos command - arguments are double, sometimes cast to int |
5094 | | */ |
5095 | | |
5096 | 0 | case PROTO_BCPOLLBSTEP: /* Broadcast Poll Backstep gate (bcpollbstep) */ |
5097 | 0 | sys_bcpollbstep = (u_char)dvalue; |
5098 | 0 | break; |
5099 | | |
5100 | 0 | case PROTO_BEACON: /* manycast beacon (beacon) */ |
5101 | 0 | sys_beacon = (int)dvalue; |
5102 | 0 | break; |
5103 | | |
5104 | 0 | case PROTO_BROADDELAY: /* default broadcast delay (bdelay) */ |
5105 | 0 | sys_bdelay = (dvalue ? dvalue : BDELAY_DEFAULT); |
5106 | 0 | break; |
5107 | | |
5108 | 0 | case PROTO_CEILING: /* stratum ceiling (ceiling) */ |
5109 | 0 | sys_ceiling = (int)dvalue; |
5110 | 0 | break; |
5111 | | |
5112 | 0 | case PROTO_COHORT: /* cohort switch (cohort) */ |
5113 | 0 | sys_cohort = (int)dvalue; |
5114 | 0 | break; |
5115 | | |
5116 | 0 | case PROTO_FLOOR: /* stratum floor (floor) */ |
5117 | 0 | sys_floor = (int)dvalue; |
5118 | 0 | break; |
5119 | | |
5120 | 0 | case PROTO_MAXCLOCK: /* maximum candidates (maxclock) */ |
5121 | 0 | sys_maxclock = (int)dvalue; |
5122 | 0 | break; |
5123 | | |
5124 | 0 | case PROTO_MAXDIST: /* select threshold (maxdist) */ |
5125 | 0 | sys_maxdist = dvalue; |
5126 | 0 | break; |
5127 | | |
5128 | 0 | case PROTO_CALLDELAY: /* modem call delay (mdelay) */ |
5129 | 0 | break; /* NOT USED */ |
5130 | | |
5131 | 0 | case PROTO_MINCLOCK: /* minimum candidates (minclock) */ |
5132 | 0 | sys_minclock = (int)dvalue; |
5133 | 0 | break; |
5134 | | |
5135 | 0 | case PROTO_MINDISP: /* minimum distance (mindist) */ |
5136 | 0 | sys_mindisp = dvalue; |
5137 | 0 | break; |
5138 | | |
5139 | 0 | case PROTO_MINSANE: /* minimum survivors (minsane) */ |
5140 | 0 | sys_minsane = (int)dvalue; |
5141 | 0 | break; |
5142 | | |
5143 | 0 | case PROTO_ORPHAN: /* orphan stratum (orphan) */ |
5144 | 0 | sys_orphan = (int)dvalue; |
5145 | 0 | break; |
5146 | | |
5147 | 0 | case PROTO_ORPHWAIT: /* orphan wait (orphwait) */ |
5148 | 0 | orphwait -= sys_orphwait; |
5149 | 0 | sys_orphwait = (int)dvalue; |
5150 | 0 | orphwait += sys_orphwait; |
5151 | 0 | break; |
5152 | | |
5153 | | /* |
5154 | | * Miscellaneous commands |
5155 | | */ |
5156 | 0 | case PROTO_MULTICAST_ADD: /* add group address */ |
5157 | 0 | if (svalue != NULL) |
5158 | 0 | io_multicast_add(svalue); |
5159 | 0 | sys_bclient = 1; |
5160 | 0 | break; |
5161 | | |
5162 | 0 | case PROTO_MULTICAST_DEL: /* delete group address */ |
5163 | 0 | if (svalue != NULL) |
5164 | 0 | io_multicast_del(svalue); |
5165 | 0 | break; |
5166 | | |
5167 | | /* |
5168 | | * Peer_clear Early policy choices |
5169 | | */ |
5170 | | |
5171 | 0 | case PROTO_PCEDIGEST: /* Digest */ |
5172 | 0 | peer_clear_digest_early = value; |
5173 | 0 | break; |
5174 | | |
5175 | | /* |
5176 | | * Unpeer Early policy choices |
5177 | | */ |
5178 | | |
5179 | 0 | case PROTO_UECRYPTO: /* Crypto */ |
5180 | 0 | unpeer_crypto_early = value; |
5181 | 0 | break; |
5182 | | |
5183 | 0 | case PROTO_UECRYPTONAK: /* Crypto_NAK */ |
5184 | 0 | unpeer_crypto_nak_early = value; |
5185 | 0 | break; |
5186 | | |
5187 | 0 | case PROTO_UEDIGEST: /* Digest */ |
5188 | 0 | unpeer_digest_early = value; |
5189 | 0 | break; |
5190 | | |
5191 | 0 | default: |
5192 | 0 | msyslog(LOG_NOTICE, |
5193 | 0 | "proto: unsupported option %d", item); |
5194 | 0 | } |
5195 | 0 | } |
5196 | | |
5197 | | |
5198 | | /* |
5199 | | * proto_clr_stats - clear protocol stat counters |
5200 | | */ |
5201 | | void |
5202 | | proto_clr_stats(void) |
5203 | 1 | { |
5204 | 1 | sys_stattime = current_time; |
5205 | 1 | sys_received = 0; |
5206 | 1 | sys_processed = 0; |
5207 | 1 | sys_newversion = 0; |
5208 | 1 | sys_oldversion = 0; |
5209 | 1 | sys_declined = 0; |
5210 | 1 | sys_restricted = 0; |
5211 | 1 | sys_badlength = 0; |
5212 | 1 | sys_badauth = 0; |
5213 | 1 | sys_limitrejected = 0; |
5214 | 1 | sys_kodsent = 0; |
5215 | 1 | sys_lamport = 0; |
5216 | 1 | sys_tsrounding = 0; |
5217 | 1 | } |