/src/ntp-dev/ntpd/ntp_refclock.c
Line | Count | Source |
1 | | /* |
2 | | * ntp_refclock - processing support for reference clocks |
3 | | */ |
4 | | #ifdef HAVE_CONFIG_H |
5 | | # include <config.h> |
6 | | #endif |
7 | | |
8 | | #include "ntpd.h" |
9 | | #include "ntp_io.h" |
10 | | #include "ntp_unixtime.h" |
11 | | #include "ntp_tty.h" |
12 | | #include "ntp_refclock.h" |
13 | | #include "ntp_clockdev.h" |
14 | | #include "ntp_stdlib.h" |
15 | | #include "ntp_assert.h" |
16 | | #include "timespecops.h" |
17 | | |
18 | | #include <stdio.h> |
19 | | |
20 | | #ifdef HAVE_SYS_IOCTL_H |
21 | | # include <sys/ioctl.h> |
22 | | #endif /* HAVE_SYS_IOCTL_H */ |
23 | | |
24 | | #ifdef REFCLOCK |
25 | | |
26 | | #ifdef KERNEL_PLL |
27 | | #include "ntp_syscall.h" |
28 | | #endif /* KERNEL_PLL */ |
29 | | |
30 | | #ifdef HAVE_PPSAPI |
31 | | #include "ppsapi_timepps.h" |
32 | | #include "refclock_atom.h" |
33 | | #endif /* HAVE_PPSAPI */ |
34 | | |
35 | | /* |
36 | | * Reference clock support is provided here by maintaining the fiction |
37 | | * that the clock is actually a peer. As no packets are exchanged with |
38 | | * a reference clock, however, we replace the transmit, receive and |
39 | | * packet procedures with separate code to simulate them. Routines |
40 | | * refclock_transmit() and refclock_receive() maintain the peer |
41 | | * variables in a state analogous to an actual peer and pass reference |
42 | | * clock data on through the filters. Routines refclock_peer() and |
43 | | * refclock_unpeer() are called to initialize and terminate reference |
44 | | * clock associations. A set of utility routines is included to open |
45 | | * serial devices, process sample data, and to perform various debugging |
46 | | * functions. |
47 | | * |
48 | | * The main interface used by these routines is the refclockproc |
49 | | * structure, which contains for most drivers the decimal equivalants |
50 | | * of the year, day, month, hour, second and millisecond/microsecond |
51 | | * decoded from the ASCII timecode. Additional information includes |
52 | | * the receive timestamp, exception report, statistics tallies, etc. |
53 | | * In addition, there may be a driver-specific unit structure used for |
54 | | * local control of the device. |
55 | | * |
56 | | * The support routines are passed a pointer to the peer structure, |
57 | | * which is used for all peer-specific processing and contains a |
58 | | * pointer to the refclockproc structure, which in turn contains a |
59 | | * pointer to the unit structure, if used. The peer structure is |
60 | | * identified by an interface address in the dotted quad form |
61 | | * 127.127.t.u, where t is the clock type and u the unit. |
62 | | */ |
63 | 0 | #define FUDGEFAC .1 /* fudge correction factor */ |
64 | | #define LF 0x0a /* ASCII LF */ |
65 | | |
66 | | int cal_enable; /* enable refclock calibrate */ |
67 | | |
68 | | /* |
69 | | * Forward declarations |
70 | | */ |
71 | | static int refclock_cmpl_fp (const void *, const void *); |
72 | | static int refclock_sample (struct refclockproc *); |
73 | | static int refclock_ioctl(int, u_int); |
74 | | static void refclock_checkburst(struct peer *, struct refclockproc *); |
75 | | |
76 | | /* circular buffer functions |
77 | | * |
78 | | * circular buffer management comes in two flovours: |
79 | | * for powers of two, and all others. |
80 | | */ |
81 | | |
82 | | #if MAXSTAGE & (MAXSTAGE - 1) |
83 | | |
84 | | static void clk_add_sample( |
85 | | struct refclockproc * const pp, |
86 | | double sv |
87 | | ) |
88 | | { |
89 | | pp->coderecv = (pp->coderecv + 1) % MAXSTAGE; |
90 | | if (pp->coderecv == pp->codeproc) |
91 | | pp->codeproc = (pp->codeproc + 1) % MAXSTAGE; |
92 | | pp->filter[pp->coderecv] = sv; |
93 | | } |
94 | | |
95 | | static double clk_pop_sample( |
96 | | struct refclockproc * const pp |
97 | | ) |
98 | | { |
99 | | if (pp->coderecv == pp->codeproc) |
100 | | return 0; /* Maybe a NaN would be better? */ |
101 | | pp->codeproc = (pp->codeproc + 1) % MAXSTAGE; |
102 | | return pp->filter[pp->codeproc]; |
103 | | } |
104 | | |
105 | | static inline u_int clk_cnt_sample( |
106 | | struct refclockproc * const pp |
107 | | ) |
108 | | { |
109 | | u_int retv = pp->coderecv - pp->codeproc; |
110 | | if (retv > MAXSTAGE) |
111 | | retv += MAXSTAGE; |
112 | | return retv; |
113 | | } |
114 | | |
115 | | #else |
116 | | |
117 | | static inline void clk_add_sample( |
118 | | struct refclockproc * const pp, |
119 | | double sv |
120 | | ) |
121 | 0 | { |
122 | 0 | pp->coderecv = (pp->coderecv + 1) & (MAXSTAGE - 1); |
123 | 0 | if (pp->coderecv == pp->codeproc) |
124 | 0 | pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1); |
125 | 0 | pp->filter[pp->coderecv] = sv; |
126 | 0 | } |
127 | | |
128 | | static inline double clk_pop_sample( |
129 | | struct refclockproc * const pp |
130 | | ) |
131 | 0 | { |
132 | 0 | if (pp->coderecv == pp->codeproc) |
133 | 0 | return 0; /* Maybe a NaN would be better? */ |
134 | 0 | pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1); |
135 | 0 | return pp->filter[pp->codeproc]; |
136 | 0 | } |
137 | | |
138 | | static inline u_int clk_cnt_sample( |
139 | | struct refclockproc * const pp |
140 | | ) |
141 | 0 | { |
142 | 0 | return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1); |
143 | 0 | } |
144 | | |
145 | | #endif |
146 | | |
147 | | /* |
148 | | * refclock_report - note the occurance of an event |
149 | | * |
150 | | * This routine presently just remembers the report and logs it, but |
151 | | * does nothing heroic for the trap handler. It tries to be a good |
152 | | * citizen and bothers the system log only if things change. |
153 | | */ |
154 | | void |
155 | | refclock_report( |
156 | | struct peer *peer, |
157 | | int code |
158 | | ) |
159 | 0 | { |
160 | 0 | struct refclockproc *pp; |
161 | |
|
162 | 0 | pp = peer->procptr; |
163 | 0 | if (pp == NULL) |
164 | 0 | return; |
165 | | |
166 | 0 | switch (code) { |
167 | | |
168 | 0 | case CEVNT_TIMEOUT: |
169 | 0 | pp->noreply++; |
170 | 0 | break; |
171 | | |
172 | 0 | case CEVNT_BADREPLY: |
173 | 0 | pp->badformat++; |
174 | 0 | break; |
175 | | |
176 | 0 | case CEVNT_FAULT: |
177 | 0 | break; |
178 | | |
179 | 0 | case CEVNT_BADDATE: |
180 | 0 | case CEVNT_BADTIME: |
181 | 0 | pp->baddata++; |
182 | 0 | break; |
183 | | |
184 | 0 | default: |
185 | | /* ignore others */ |
186 | 0 | break; |
187 | 0 | } |
188 | 0 | if ((code != CEVNT_NOMINAL) && (pp->lastevent < 15)) |
189 | 0 | pp->lastevent++; |
190 | 0 | if (pp->currentstatus != code) { |
191 | 0 | pp->currentstatus = (u_char)code; |
192 | 0 | report_event(PEVNT_CLOCK, peer, ceventstr(code)); |
193 | 0 | } |
194 | 0 | } |
195 | | |
196 | | |
197 | | /* |
198 | | * init_refclock - initialize the reference clock drivers |
199 | | * |
200 | | * This routine calls each of the drivers in turn to initialize internal |
201 | | * variables, if necessary. Most drivers have nothing to say at this |
202 | | * point. |
203 | | */ |
204 | | void |
205 | | init_refclock(void) |
206 | 0 | { |
207 | 0 | int i; |
208 | |
|
209 | 0 | for (i = 0; i < (int)num_refclock_conf; i++) |
210 | 0 | if (refclock_conf[i]->clock_init != noentry) |
211 | 0 | (refclock_conf[i]->clock_init)(); |
212 | 0 | } |
213 | | |
214 | | |
215 | | /* |
216 | | * refclock_newpeer - initialize and start a reference clock |
217 | | * |
218 | | * This routine allocates and initializes the interface structure which |
219 | | * supports a reference clock in the form of an ordinary NTP peer. A |
220 | | * driver-specific support routine completes the initialization, if |
221 | | * used. Default peer variables which identify the clock and establish |
222 | | * its reference ID and stratum are set here. It returns one if success |
223 | | * and zero if the clock address is invalid or already running, |
224 | | * insufficient resources are available or the driver declares a bum |
225 | | * rap. |
226 | | */ |
227 | | int |
228 | | refclock_newpeer( |
229 | | struct peer *peer /* peer structure pointer */ |
230 | | ) |
231 | 0 | { |
232 | 0 | struct refclockproc *pp; |
233 | 0 | u_char clktype; |
234 | 0 | int unit; |
235 | | |
236 | | /* |
237 | | * Check for valid clock address. If already running, shut it |
238 | | * down first. |
239 | | */ |
240 | 0 | if (!ISREFCLOCKADR(&peer->srcadr)) { |
241 | 0 | msyslog(LOG_ERR, |
242 | 0 | "refclock_newpeer: clock address %s invalid", |
243 | 0 | stoa(&peer->srcadr)); |
244 | 0 | return (0); |
245 | 0 | } |
246 | 0 | clktype = (u_char)REFCLOCKTYPE(&peer->srcadr); |
247 | 0 | unit = REFCLOCKUNIT(&peer->srcadr); |
248 | 0 | if (clktype >= num_refclock_conf || |
249 | 0 | refclock_conf[clktype]->clock_start == noentry) { |
250 | 0 | msyslog(LOG_ERR, |
251 | 0 | "refclock_newpeer: clock type %d invalid\n", |
252 | 0 | clktype); |
253 | 0 | return (0); |
254 | 0 | } |
255 | | |
256 | | /* |
257 | | * Allocate and initialize interface structure |
258 | | */ |
259 | 0 | pp = emalloc_zero(sizeof(*pp)); |
260 | 0 | peer->procptr = pp; |
261 | | |
262 | | /* |
263 | | * Initialize structures |
264 | | */ |
265 | 0 | peer->refclktype = clktype; |
266 | 0 | peer->refclkunit = (u_char)unit; |
267 | 0 | peer->flags |= FLAG_REFCLOCK; |
268 | 0 | peer->leap = LEAP_NOTINSYNC; |
269 | 0 | peer->stratum = STRATUM_REFCLOCK; |
270 | 0 | peer->ppoll = peer->maxpoll; |
271 | 0 | pp->type = clktype; |
272 | 0 | pp->conf = refclock_conf[clktype]; |
273 | 0 | pp->timestarted = current_time; |
274 | 0 | pp->io.fd = -1; |
275 | | |
276 | | /* |
277 | | * Set peer.pmode based on the hmode. For appearances only. |
278 | | */ |
279 | 0 | switch (peer->hmode) { |
280 | 0 | case MODE_ACTIVE: |
281 | 0 | peer->pmode = MODE_PASSIVE; |
282 | 0 | break; |
283 | | |
284 | 0 | default: |
285 | 0 | peer->pmode = MODE_SERVER; |
286 | 0 | break; |
287 | 0 | } |
288 | | |
289 | | /* |
290 | | * Do driver dependent initialization. The above defaults |
291 | | * can be wiggled, then finish up for consistency. |
292 | | */ |
293 | 0 | if (!((refclock_conf[clktype]->clock_start)(unit, peer))) { |
294 | 0 | refclock_unpeer(peer); |
295 | 0 | return (0); |
296 | 0 | } |
297 | 0 | peer->refid = pp->refid; |
298 | 0 | return (1); |
299 | 0 | } |
300 | | |
301 | | |
302 | | /* |
303 | | * refclock_unpeer - shut down a clock |
304 | | */ |
305 | | void |
306 | | refclock_unpeer( |
307 | | struct peer *peer /* peer structure pointer */ |
308 | | ) |
309 | 0 | { |
310 | 0 | u_char clktype; |
311 | 0 | int unit; |
312 | | |
313 | | /* |
314 | | * Wiggle the driver to release its resources, then give back |
315 | | * the interface structure. |
316 | | */ |
317 | 0 | if (NULL == peer->procptr) |
318 | 0 | return; |
319 | | |
320 | 0 | clktype = peer->refclktype; |
321 | 0 | unit = peer->refclkunit; |
322 | 0 | if (refclock_conf[clktype]->clock_shutdown != noentry) |
323 | 0 | (refclock_conf[clktype]->clock_shutdown)(unit, peer); |
324 | 0 | free(peer->procptr); |
325 | 0 | peer->procptr = NULL; |
326 | 0 | } |
327 | | |
328 | | |
329 | | /* |
330 | | * refclock_timer - called once per second for housekeeping. |
331 | | */ |
332 | | void |
333 | | refclock_timer( |
334 | | struct peer *p |
335 | | ) |
336 | 0 | { |
337 | 0 | struct refclockproc * pp; |
338 | 0 | int unit; |
339 | |
|
340 | 0 | unit = p->refclkunit; |
341 | 0 | pp = p->procptr; |
342 | 0 | if (pp->conf->clock_timer != noentry) |
343 | 0 | (*pp->conf->clock_timer)(unit, p); |
344 | 0 | if (pp->action != NULL && pp->nextaction <= current_time) |
345 | 0 | (*pp->action)(p); |
346 | 0 | } |
347 | | |
348 | | |
349 | | /* |
350 | | * refclock_transmit - simulate the transmit procedure |
351 | | * |
352 | | * This routine implements the NTP transmit procedure for a reference |
353 | | * clock. This provides a mechanism to call the driver at the NTP poll |
354 | | * interval, as well as provides a reachability mechanism to detect a |
355 | | * broken radio or other madness. |
356 | | */ |
357 | | void |
358 | | refclock_transmit( |
359 | | struct peer *peer /* peer structure pointer */ |
360 | | ) |
361 | 0 | { |
362 | 0 | u_char clktype; |
363 | 0 | int unit; |
364 | |
|
365 | 0 | clktype = peer->refclktype; |
366 | 0 | unit = peer->refclkunit; |
367 | 0 | peer->sent++; |
368 | 0 | get_systime(&peer->xmt); |
369 | | |
370 | | /* |
371 | | * This is a ripoff of the peer transmit routine, but |
372 | | * specialized for reference clocks. We do a little less |
373 | | * protocol here and call the driver-specific transmit routine. |
374 | | */ |
375 | 0 | if (peer->burst == 0) { |
376 | 0 | u_char oreach; |
377 | 0 | #ifdef DEBUG |
378 | 0 | if (debug) |
379 | 0 | printf("refclock_transmit: at %ld %s\n", |
380 | 0 | current_time, stoa(&(peer->srcadr))); |
381 | 0 | #endif |
382 | | |
383 | | /* |
384 | | * Update reachability and poll variables like the |
385 | | * network code. |
386 | | */ |
387 | 0 | oreach = peer->reach & 0xfe; |
388 | 0 | peer->reach <<= 1; |
389 | 0 | if (!(peer->reach & 0x0f)) |
390 | 0 | clock_filter(peer, 0., 0., MAXDISPERSE); |
391 | 0 | peer->outdate = current_time; |
392 | 0 | if (!peer->reach) { |
393 | 0 | if (oreach) { |
394 | 0 | report_event(PEVNT_UNREACH, peer, NULL); |
395 | 0 | peer->timereachable = current_time; |
396 | 0 | } |
397 | 0 | } else { |
398 | 0 | if (peer->flags & FLAG_BURST) |
399 | 0 | peer->burst = NSTAGE; |
400 | 0 | } |
401 | 0 | } else { |
402 | 0 | peer->burst--; |
403 | 0 | } |
404 | 0 | peer->procptr->inpoll = TRUE; |
405 | 0 | if (refclock_conf[clktype]->clock_poll != noentry) |
406 | 0 | (refclock_conf[clktype]->clock_poll)(unit, peer); |
407 | 0 | poll_update(peer, peer->hpoll, 0); |
408 | 0 | } |
409 | | |
410 | | |
411 | | /* |
412 | | * Compare two doubles - used with qsort() |
413 | | */ |
414 | | static int |
415 | | refclock_cmpl_fp( |
416 | | const void *p1, |
417 | | const void *p2 |
418 | | ) |
419 | 0 | { |
420 | 0 | const double *dp1 = (const double *)p1; |
421 | 0 | const double *dp2 = (const double *)p2; |
422 | |
|
423 | 0 | if (*dp1 < *dp2) |
424 | 0 | return -1; |
425 | 0 | if (*dp1 > *dp2) |
426 | 0 | return 1; |
427 | 0 | return 0; |
428 | 0 | } |
429 | | |
430 | | /* |
431 | | * Get number of available samples |
432 | | */ |
433 | | int |
434 | | refclock_samples_avail( |
435 | | struct refclockproc const * pp |
436 | | ) |
437 | 0 | { |
438 | 0 | u_int na; |
439 | |
|
440 | | # if MAXSTAGE & (MAXSTAGE - 1) |
441 | | |
442 | | na = pp->coderecv - pp->codeproc; |
443 | | if (na > MAXSTAGE) |
444 | | na += MAXSTAGE; |
445 | | |
446 | | # else |
447 | |
|
448 | 0 | na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1); |
449 | |
|
450 | 0 | # endif |
451 | 0 | return na; |
452 | 0 | } |
453 | | |
454 | | /* |
455 | | * Expire (remove) samples from the tail (oldest samples removed) |
456 | | * |
457 | | * Returns number of samples deleted |
458 | | */ |
459 | | int |
460 | | refclock_samples_expire( |
461 | | struct refclockproc * pp, |
462 | | int nd |
463 | | ) |
464 | 0 | { |
465 | 0 | u_int na; |
466 | |
|
467 | 0 | if (nd <= 0) |
468 | 0 | return 0; |
469 | | |
470 | | # if MAXSTAGE & (MAXSTAGE - 1) |
471 | | |
472 | | na = pp->coderecv - pp->codeproc; |
473 | | if (na > MAXSTAGE) |
474 | | na += MAXSTAGE; |
475 | | if ((u_int)nd < na) |
476 | | nd = na; |
477 | | pp->codeproc = (pp->codeproc + nd) % MAXSTAGE; |
478 | | |
479 | | # else |
480 | | |
481 | 0 | na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1); |
482 | 0 | if ((u_int)nd > na) |
483 | 0 | nd = (int)na; |
484 | 0 | pp->codeproc = (pp->codeproc + nd) & (MAXSTAGE - 1); |
485 | |
|
486 | 0 | # endif |
487 | 0 | return nd; |
488 | 0 | } |
489 | | |
490 | | /* |
491 | | * refclock_process_offset - update median filter |
492 | | * |
493 | | * This routine uses the given offset and timestamps to construct a new |
494 | | * entry in the median filter circular buffer. Samples that overflow the |
495 | | * filter are quietly discarded. |
496 | | */ |
497 | | void |
498 | | refclock_process_offset( |
499 | | struct refclockproc *pp, /* refclock structure pointer */ |
500 | | l_fp lasttim, /* last timecode timestamp */ |
501 | | l_fp lastrec, /* last receive timestamp */ |
502 | | double fudge |
503 | | ) |
504 | 0 | { |
505 | 0 | l_fp lftemp; |
506 | 0 | double doffset; |
507 | |
|
508 | 0 | pp->lastrec = lastrec; |
509 | 0 | lftemp = lasttim; |
510 | 0 | L_SUB(&lftemp, &lastrec); |
511 | 0 | LFPTOD(&lftemp, doffset); |
512 | 0 | clk_add_sample(pp, doffset + fudge); |
513 | 0 | refclock_checkburst(pp->io.srcclock, pp); |
514 | 0 | } |
515 | | |
516 | | |
517 | | /* |
518 | | * refclock_process - process a sample from the clock |
519 | | * refclock_process_f - refclock_process with other than time1 fudge |
520 | | * |
521 | | * This routine converts the timecode in the form days, hours, minutes, |
522 | | * seconds and milliseconds/microseconds to internal timestamp format, |
523 | | * then constructs a new entry in the median filter circular buffer. |
524 | | * Return success (1) if the data are correct and consistent with the |
525 | | * conventional calendar. |
526 | | * |
527 | | * Important for PPS users: Normally, the pp->lastrec is set to the |
528 | | * system time when the on-time character is received and the pp->year, |
529 | | * ..., pp->second decoded and the seconds fraction pp->nsec in |
530 | | * nanoseconds). When a PPS offset is available, pp->nsec is forced to |
531 | | * zero and the fraction for pp->lastrec is set to the PPS offset. |
532 | | */ |
533 | | int |
534 | | refclock_process_f( |
535 | | struct refclockproc *pp, /* refclock structure pointer */ |
536 | | double fudge |
537 | | ) |
538 | 0 | { |
539 | 0 | l_fp offset, ltemp; |
540 | | |
541 | | /* |
542 | | * Compute the timecode timestamp from the days, hours, minutes, |
543 | | * seconds and milliseconds/microseconds of the timecode. Use |
544 | | * clocktime() for the aggregate seconds and the msec/usec for |
545 | | * the fraction, when present. Note that this code relies on the |
546 | | * file system time for the years and does not use the years of |
547 | | * the timecode. |
548 | | */ |
549 | 0 | if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT, |
550 | 0 | pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui)) |
551 | 0 | return (0); |
552 | | |
553 | 0 | offset.l_uf = 0; |
554 | 0 | DTOLFP(pp->nsec / 1e9, <emp); |
555 | 0 | L_ADD(&offset, <emp); |
556 | 0 | refclock_process_offset(pp, offset, pp->lastrec, fudge); |
557 | 0 | return (1); |
558 | 0 | } |
559 | | |
560 | | |
561 | | int |
562 | | refclock_process( |
563 | | struct refclockproc *pp /* refclock structure pointer */ |
564 | | ) |
565 | 0 | { |
566 | 0 | return refclock_process_f(pp, pp->fudgetime1); |
567 | 0 | } |
568 | | |
569 | | |
570 | | /* |
571 | | * refclock_sample - process a pile of samples from the clock |
572 | | * |
573 | | * This routine implements a recursive median filter to suppress spikes |
574 | | * in the data, as well as determine a performance statistic. It |
575 | | * calculates the mean offset and RMS jitter. A time adjustment |
576 | | * fudgetime1 can be added to the final offset to compensate for various |
577 | | * systematic errors. The routine returns the number of samples |
578 | | * processed, which could be zero. |
579 | | */ |
580 | | static int |
581 | | refclock_sample( |
582 | | struct refclockproc *pp /* refclock structure pointer */ |
583 | | ) |
584 | 0 | { |
585 | 0 | size_t i, j, k, m, n; |
586 | 0 | double off[MAXSTAGE]; |
587 | | |
588 | | /* |
589 | | * Copy the raw offsets and sort into ascending order. Don't do |
590 | | * anything if the buffer is empty. |
591 | | */ |
592 | 0 | n = 0; |
593 | 0 | while (pp->codeproc != pp->coderecv) |
594 | 0 | off[n++] = clk_pop_sample(pp); |
595 | 0 | if (n == 0) |
596 | 0 | return (0); |
597 | | |
598 | 0 | if (n > 1) |
599 | 0 | qsort(off, n, sizeof(off[0]), refclock_cmpl_fp); |
600 | | |
601 | | /* |
602 | | * Reject the furthest from the median of the samples until |
603 | | * approximately 60 percent of the samples remain. |
604 | | * |
605 | | * [Bug 3672] The elimination is now based on the proper |
606 | | * definition of the median. The true median is not calculated |
607 | | * directly, though. |
608 | | */ |
609 | 0 | i = 0; j = n; |
610 | 0 | m = n - (n * 4) / 10; |
611 | 0 | while ((k = j - i) > m) { |
612 | 0 | k = (k - 1) >> 1; |
613 | 0 | if ((off[j - 1] - off[j - k - 1]) < (off[i + k] - off[i])) |
614 | 0 | i++; /* reject low end */ |
615 | 0 | else |
616 | 0 | j--; /* reject high end */ |
617 | 0 | } |
618 | | |
619 | | /* |
620 | | * Determine the offset and jitter. |
621 | | */ |
622 | 0 | pp->offset = off[i]; |
623 | 0 | pp->jitter = 0; |
624 | 0 | for (k = i + 1; k < j; k++) { |
625 | 0 | pp->offset += off[k]; |
626 | 0 | pp->jitter += SQUARE(off[k] - off[k - 1]); |
627 | 0 | } |
628 | 0 | pp->offset /= m; |
629 | 0 | m -= (m > 1); /* only (m-1) terms attribute to jitter! */ |
630 | 0 | pp->jitter = max(SQRT(pp->jitter / m), LOGTOD(sys_precision)); |
631 | | |
632 | | /* |
633 | | * If the source has a jitter that cannot be estimated, because |
634 | | * it is not statistic jitter, the source will be detected as |
635 | | * falseticker sooner or later. Enforcing a minimal jitter value |
636 | | * avoids a too low estimation while still detecting higher jitter. |
637 | | * |
638 | | * Note that this changes the refclock samples and ends up in the |
639 | | * clock dispersion, not the clock jitter, despite being called |
640 | | * jitter. To see the modified values, check the NTP clock variable |
641 | | * "filtdisp", not "jitter". |
642 | | */ |
643 | 0 | pp->jitter = max(pp->jitter, pp->fudgeminjitter); |
644 | |
|
645 | 0 | #ifdef DEBUG |
646 | 0 | if (debug) |
647 | 0 | printf( |
648 | 0 | "refclock_sample: n %d offset %.6f disp %.6f jitter %.6f\n", |
649 | 0 | (int)n, pp->offset, pp->disp, pp->jitter); |
650 | 0 | #endif |
651 | 0 | return (int)n; |
652 | 0 | } |
653 | | |
654 | | |
655 | | /* |
656 | | * refclock_receive - simulate the receive and packet procedures |
657 | | * |
658 | | * This routine simulates the NTP receive and packet procedures for a |
659 | | * reference clock. This provides a mechanism in which the ordinary NTP |
660 | | * filter, selection and combining algorithms can be used to suppress |
661 | | * misbehaving radios and to mitigate between them when more than one is |
662 | | * available for backup. |
663 | | */ |
664 | | void |
665 | | refclock_receive( |
666 | | struct peer *peer /* peer structure pointer */ |
667 | | ) |
668 | 0 | { |
669 | 0 | struct refclockproc *pp; |
670 | |
|
671 | 0 | #ifdef DEBUG |
672 | 0 | if (debug) |
673 | 0 | printf("refclock_receive: at %lu %s\n", |
674 | 0 | current_time, stoa(&peer->srcadr)); |
675 | 0 | #endif |
676 | | |
677 | | /* |
678 | | * Do a little sanity dance and update the peer structure. Groom |
679 | | * the median filter samples and give the data to the clock |
680 | | * filter. |
681 | | */ |
682 | 0 | pp = peer->procptr; |
683 | 0 | pp->inpoll = FALSE; |
684 | 0 | peer->leap = pp->leap; |
685 | 0 | if (peer->leap == LEAP_NOTINSYNC) |
686 | 0 | return; |
687 | | |
688 | 0 | peer->received++; |
689 | 0 | peer->timereceived = current_time; |
690 | 0 | if (!peer->reach) { |
691 | 0 | report_event(PEVNT_REACH, peer, NULL); |
692 | 0 | peer->timereachable = current_time; |
693 | 0 | } |
694 | 0 | peer->reach = (peer->reach << (peer->reach & 1)) | 1; |
695 | 0 | peer->reftime = pp->lastref; |
696 | 0 | peer->aorg = pp->lastrec; |
697 | 0 | peer->rootdisp = pp->disp; |
698 | 0 | get_systime(&peer->dst); |
699 | 0 | if (!refclock_sample(pp)) |
700 | 0 | return; |
701 | | |
702 | 0 | clock_filter(peer, pp->offset, 0., pp->jitter); |
703 | 0 | if (cal_enable && fabs(last_offset) < sys_mindisp && sys_peer != |
704 | 0 | NULL) { |
705 | 0 | if (sys_peer->refclktype == REFCLK_ATOM_PPS && |
706 | 0 | peer->refclktype != REFCLK_ATOM_PPS) |
707 | 0 | pp->fudgetime1 -= pp->offset * FUDGEFAC; |
708 | 0 | } |
709 | 0 | } |
710 | | |
711 | | |
712 | | /* |
713 | | * refclock_gtlin - groom next input line and extract timestamp |
714 | | * |
715 | | * This routine processes the timecode received from the clock and |
716 | | * strips the parity bit and control characters. It returns the number |
717 | | * of characters in the line followed by a NULL character ('\0'), which |
718 | | * is not included in the count. In case of an empty line, the previous |
719 | | * line is preserved. |
720 | | */ |
721 | | int |
722 | | refclock_gtlin( |
723 | | struct recvbuf *rbufp, /* receive buffer pointer */ |
724 | | char *lineptr, /* current line pointer */ |
725 | | int bmax, /* remaining characters in line */ |
726 | | l_fp *tsptr /* pointer to timestamp returned */ |
727 | | ) |
728 | 0 | { |
729 | 0 | const char *sp, *spend; |
730 | 0 | char *dp, *dpend; |
731 | 0 | int dlen; |
732 | |
|
733 | 0 | if (bmax <= 0) |
734 | 0 | return (0); |
735 | | |
736 | 0 | dp = lineptr; |
737 | 0 | dpend = dp + bmax - 1; /* leave room for NUL pad */ |
738 | 0 | sp = (const char *)rbufp->recv_buffer; |
739 | 0 | spend = sp + rbufp->recv_length; |
740 | |
|
741 | 0 | while (sp != spend && dp != dpend) { |
742 | 0 | char c; |
743 | |
|
744 | 0 | c = *sp++ & 0x7f; |
745 | 0 | if (c >= 0x20 && c < 0x7f) |
746 | 0 | *dp++ = c; |
747 | 0 | } |
748 | | /* Get length of data written to the destination buffer. If |
749 | | * zero, do *not* place a NUL byte to preserve the previous |
750 | | * buffer content. |
751 | | */ |
752 | 0 | dlen = dp - lineptr; |
753 | 0 | if (dlen) |
754 | 0 | *dp = '\0'; |
755 | 0 | *tsptr = rbufp->recv_time; |
756 | 0 | DPRINTF(2, ("refclock_gtlin: fd %d time %s timecode %d %s\n", |
757 | 0 | rbufp->fd, ulfptoa(&rbufp->recv_time, 6), dlen, |
758 | 0 | (dlen != 0) |
759 | 0 | ? lineptr |
760 | 0 | : "")); |
761 | 0 | return (dlen); |
762 | 0 | } |
763 | | |
764 | | |
765 | | /* |
766 | | * refclock_gtraw - get next line/chunk of data |
767 | | * |
768 | | * This routine returns the raw data received from the clock in both |
769 | | * canonical or raw modes. The terminal interface routines map CR to LF. |
770 | | * In canonical mode this results in two lines, one containing data |
771 | | * followed by LF and another containing only LF. In raw mode the |
772 | | * interface routines can deliver arbitraty chunks of data from one |
773 | | * character to a maximum specified by the calling routine. In either |
774 | | * mode the routine returns the number of characters in the line |
775 | | * followed by a NULL character ('\0'), which is not included in the |
776 | | * count. |
777 | | * |
778 | | * *tsptr receives a copy of the buffer timestamp. |
779 | | */ |
780 | | int |
781 | | refclock_gtraw( |
782 | | struct recvbuf *rbufp, /* receive buffer pointer */ |
783 | | char *lineptr, /* current line pointer */ |
784 | | int bmax, /* remaining characters in line */ |
785 | | l_fp *tsptr /* pointer to timestamp returned */ |
786 | | ) |
787 | 0 | { |
788 | 0 | if (bmax <= 0) |
789 | 0 | return (0); |
790 | 0 | bmax -= 1; /* leave room for trailing NUL */ |
791 | 0 | if (bmax > rbufp->recv_length) |
792 | 0 | bmax = rbufp->recv_length; |
793 | 0 | memcpy(lineptr, rbufp->recv_buffer, bmax); |
794 | 0 | lineptr[bmax] = '\0'; |
795 | |
|
796 | 0 | *tsptr = rbufp->recv_time; |
797 | 0 | DPRINTF(2, ("refclock_gtraw: fd %d time %s timecode %d %s\n", |
798 | 0 | rbufp->fd, ulfptoa(&rbufp->recv_time, 6), bmax, |
799 | 0 | lineptr)); |
800 | 0 | return (bmax); |
801 | 0 | } |
802 | | |
803 | | /* |
804 | | * refclock_fdwrite() |
805 | | * |
806 | | * Write data to a clock device. Does the necessary result checks and |
807 | | * logging, and encapsulates OS dependencies. |
808 | | */ |
809 | | #ifdef SYS_WINNT |
810 | | extern int async_write(int fd, const void * buf, unsigned int len); |
811 | | #endif |
812 | | |
813 | | size_t |
814 | | refclock_fdwrite( |
815 | | const struct peer * peer, |
816 | | int fd, |
817 | | const void * buf, |
818 | | size_t len, |
819 | | const char * what |
820 | | ) |
821 | 0 | { |
822 | 0 | size_t nret, nout; |
823 | 0 | int nerr; |
824 | | |
825 | 0 | nout = (INT_MAX > len) ? len : INT_MAX; |
826 | | # ifdef SYS_WINNT |
827 | | nret = (size_t)async_write(fd, buf, (unsigned int)nout); |
828 | | # else |
829 | 0 | nret = (size_t)write(fd, buf, nout); |
830 | 0 | # endif |
831 | 0 | if (NULL != what) { |
832 | 0 | if (nret == FDWRITE_ERROR) { |
833 | 0 | nerr = errno; |
834 | 0 | msyslog(LOG_INFO, |
835 | 0 | "%s: write %s failed, fd=%d, %m", |
836 | 0 | refnumtoa(&peer->srcadr), what, |
837 | 0 | fd); |
838 | 0 | errno = nerr; |
839 | 0 | } else if (nret != len) { |
840 | 0 | nerr = errno; |
841 | 0 | msyslog(LOG_NOTICE, |
842 | 0 | "%s: %s shortened, fd=%d, wrote %u of %u bytes", |
843 | 0 | refnumtoa(&peer->srcadr), what, |
844 | 0 | fd, (u_int)nret, (u_int)len); |
845 | 0 | errno = nerr; |
846 | 0 | } |
847 | 0 | } |
848 | 0 | return nret; |
849 | 0 | } |
850 | | |
851 | | size_t |
852 | | refclock_write( |
853 | | const struct peer * peer, |
854 | | const void * buf, |
855 | | size_t len, |
856 | | const char * what |
857 | | ) |
858 | 0 | { |
859 | 0 | if ( ! (peer && peer->procptr)) { |
860 | 0 | if (NULL != what) |
861 | 0 | msyslog(LOG_INFO, |
862 | 0 | "%s: write %s failed, invalid clock peer", |
863 | 0 | refnumtoa(&peer->srcadr), what); |
864 | 0 | errno = EINVAL; |
865 | 0 | return FDWRITE_ERROR; |
866 | 0 | } |
867 | 0 | return refclock_fdwrite(peer, peer->procptr->io.fd, |
868 | 0 | buf, len, what); |
869 | 0 | } |
870 | | |
871 | | /* |
872 | | * indicate_refclock_packet() |
873 | | * |
874 | | * Passes a fragment of refclock input read from the device to the |
875 | | * driver direct input routine, which may consume it (batch it for |
876 | | * queuing once a logical unit is assembled). If it is not so |
877 | | * consumed, queue it for the driver's receive entrypoint. |
878 | | * |
879 | | * The return value is TRUE if the data has been consumed as a fragment |
880 | | * and should not be counted as a received packet. |
881 | | */ |
882 | | int |
883 | | indicate_refclock_packet( |
884 | | struct refclockio * rio, |
885 | | struct recvbuf * rb |
886 | | ) |
887 | 0 | { |
888 | | /* Does this refclock use direct input routine? */ |
889 | 0 | if (rio->io_input != NULL && (*rio->io_input)(rb) == 0) { |
890 | | /* |
891 | | * data was consumed - nothing to pass up |
892 | | * into block input machine |
893 | | */ |
894 | 0 | freerecvbuf(rb); |
895 | |
|
896 | 0 | return TRUE; |
897 | 0 | } |
898 | 0 | add_full_recv_buffer(rb); |
899 | |
|
900 | 0 | return FALSE; |
901 | 0 | } |
902 | | |
903 | | |
904 | | /* |
905 | | * process_refclock_packet() |
906 | | * |
907 | | * Used for deferred processing of 'io_input' on systems where threading |
908 | | * is used (notably Windows). This is acting as a trampoline to make the |
909 | | * real calls to the refclock functions. |
910 | | */ |
911 | | #ifdef HAVE_IO_COMPLETION_PORT |
912 | | void |
913 | | process_refclock_packet( |
914 | | struct recvbuf * rb |
915 | | ) |
916 | | { |
917 | | struct refclockio * rio; |
918 | | |
919 | | /* get the refclockio structure from the receive buffer */ |
920 | | rio = &rb->recv_peer->procptr->io; |
921 | | |
922 | | /* call 'clock_recv' if either there is no input function or the |
923 | | * raw input function tells us to feed the packet to the |
924 | | * receiver. |
925 | | */ |
926 | | if (rio->io_input == NULL || (*rio->io_input)(rb) != 0) { |
927 | | rio->recvcount++; |
928 | | packets_received++; |
929 | | handler_pkts++; |
930 | | (*rio->clock_recv)(rb); |
931 | | } |
932 | | } |
933 | | #endif /* HAVE_IO_COMPLETION_PORT */ |
934 | | |
935 | | |
936 | | /* |
937 | | * The following code does not apply to WINNT & VMS ... |
938 | | */ |
939 | | #if !defined(SYS_VXWORKS) && !defined(SYS_WINNT) |
940 | | #if defined(HAVE_TERMIOS) || defined(HAVE_SYSV_TTYS) || defined(HAVE_BSD_TTYS) |
941 | | |
942 | | /* |
943 | | * refclock_open - open serial port for reference clock |
944 | | * |
945 | | * This routine opens a serial port for I/O and sets default options. It |
946 | | * returns the file descriptor if successful, or logs an error and |
947 | | * returns -1. |
948 | | */ |
949 | | int |
950 | | refclock_open( |
951 | | const sockaddr_u *srcadr, |
952 | | const char *dev, /* device name pointer */ |
953 | | u_int speed, /* serial port speed (code) */ |
954 | | u_int lflags /* line discipline flags */ |
955 | | ) |
956 | 0 | { |
957 | 0 | const char *cdev; |
958 | 0 | int fd; |
959 | 0 | int omode; |
960 | 0 | #ifdef O_NONBLOCK |
961 | 0 | char trash[128]; /* litter bin for old input data */ |
962 | 0 | #endif |
963 | | |
964 | | /* |
965 | | * Open serial port and set default options |
966 | | */ |
967 | 0 | omode = O_RDWR; |
968 | 0 | #ifdef O_NONBLOCK |
969 | 0 | omode |= O_NONBLOCK; |
970 | 0 | #endif |
971 | 0 | #ifdef O_NOCTTY |
972 | 0 | omode |= O_NOCTTY; |
973 | 0 | #endif |
974 | |
|
975 | 0 | if (NULL != (cdev = clockdev_lookup(srcadr, 0))) |
976 | 0 | dev = cdev; |
977 | | |
978 | 0 | fd = open(dev, omode, 0777); |
979 | | /* refclock_open() long returned 0 on failure, avoid it. */ |
980 | 0 | if (0 == fd) { |
981 | 0 | fd = dup(0); |
982 | 0 | SAVE_ERRNO( |
983 | 0 | close(0); |
984 | 0 | ) |
985 | 0 | } |
986 | 0 | if (fd < 0) { |
987 | 0 | SAVE_ERRNO( |
988 | 0 | msyslog(LOG_ERR, "refclock_open %s: %m", dev); |
989 | 0 | ) |
990 | 0 | return -1; |
991 | 0 | } |
992 | 0 | if (!refclock_setup(fd, speed, lflags)) { |
993 | 0 | close(fd); |
994 | 0 | return -1; |
995 | 0 | } |
996 | 0 | if (!refclock_ioctl(fd, lflags)) { |
997 | 0 | close(fd); |
998 | 0 | return -1; |
999 | 0 | } |
1000 | 0 | msyslog(LOG_NOTICE, "%s serial %s open at %d bps", |
1001 | 0 | refnumtoa(srcadr), dev, symBaud2numBaud(speed)); |
1002 | |
|
1003 | 0 | #ifdef O_NONBLOCK |
1004 | | /* |
1005 | | * We want to make sure there is no pending trash in the input |
1006 | | * buffer. Since we have non-blocking IO available, this is a |
1007 | | * good moment to read and dump all available outdated stuff |
1008 | | * that might have become toxic for the driver. |
1009 | | */ |
1010 | 0 | while (read(fd, trash, sizeof(trash)) > 0 || errno == EINTR) |
1011 | 0 | /*NOP*/; |
1012 | 0 | #endif |
1013 | 0 | return fd; |
1014 | 0 | } |
1015 | | |
1016 | | |
1017 | | /* |
1018 | | * refclock_setup - initialize terminal interface structure |
1019 | | */ |
1020 | | int |
1021 | | refclock_setup( |
1022 | | int fd, /* file descriptor */ |
1023 | | u_int speed, /* serial port speed (code) */ |
1024 | | u_int lflags /* line discipline flags */ |
1025 | | ) |
1026 | 0 | { |
1027 | 0 | int i; |
1028 | 0 | TTY ttyb, *ttyp; |
1029 | | |
1030 | | /* |
1031 | | * By default, the serial line port is initialized in canonical |
1032 | | * (line-oriented) mode at specified line speed, 8 bits and no |
1033 | | * parity. LF ends the line and CR is mapped to LF. The break, |
1034 | | * erase and kill functions are disabled. There is a different |
1035 | | * section for each terminal interface, as selected at compile |
1036 | | * time. The flag bits can be used to set raw mode and echo. |
1037 | | */ |
1038 | 0 | ttyp = &ttyb; |
1039 | 0 | #ifdef HAVE_TERMIOS |
1040 | | |
1041 | | /* |
1042 | | * POSIX serial line parameters (termios interface) |
1043 | | */ |
1044 | 0 | if (tcgetattr(fd, ttyp) < 0) { |
1045 | 0 | SAVE_ERRNO( |
1046 | 0 | msyslog(LOG_ERR, |
1047 | 0 | "refclock_setup fd %d tcgetattr: %m", |
1048 | 0 | fd); |
1049 | 0 | ) |
1050 | 0 | return FALSE; |
1051 | 0 | } |
1052 | | |
1053 | | /* |
1054 | | * Set canonical mode and local connection; set specified speed, |
1055 | | * 8 bits and no parity; map CR to NL; ignore break. |
1056 | | */ |
1057 | 0 | if (speed) { |
1058 | 0 | u_int ltemp = 0; |
1059 | |
|
1060 | 0 | ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL; |
1061 | 0 | ttyp->c_oflag = 0; |
1062 | 0 | ttyp->c_cflag = CS8 | CLOCAL | CREAD; |
1063 | 0 | if (lflags & LDISC_7O1) { |
1064 | | /* HP Z3801A needs 7-bit, odd parity */ |
1065 | 0 | ttyp->c_cflag = CS7 | PARENB | PARODD | CLOCAL | CREAD; |
1066 | 0 | } |
1067 | 0 | cfsetispeed(&ttyb, speed); |
1068 | 0 | cfsetospeed(&ttyb, speed); |
1069 | 0 | for (i = 0; i < NCCS; ++i) |
1070 | 0 | ttyp->c_cc[i] = '\0'; |
1071 | |
|
1072 | 0 | #if defined(TIOCMGET) && !defined(SCO5_CLOCK) |
1073 | | |
1074 | | /* |
1075 | | * If we have modem control, check to see if modem leads |
1076 | | * are active; if so, set remote connection. This is |
1077 | | * necessary for the kernel pps mods to work. |
1078 | | */ |
1079 | 0 | if (ioctl(fd, TIOCMGET, (char *)<emp) < 0) |
1080 | 0 | msyslog(LOG_ERR, |
1081 | 0 | "refclock_setup fd %d TIOCMGET: %m", fd); |
1082 | 0 | #ifdef DEBUG |
1083 | 0 | if (debug) |
1084 | 0 | printf("refclock_setup fd %d modem status: 0x%x\n", |
1085 | 0 | fd, ltemp); |
1086 | 0 | #endif |
1087 | 0 | if (ltemp & TIOCM_DSR && lflags & LDISC_REMOTE) |
1088 | 0 | ttyp->c_cflag &= ~CLOCAL; |
1089 | 0 | #endif /* TIOCMGET */ |
1090 | 0 | } |
1091 | | |
1092 | | /* |
1093 | | * Set raw and echo modes. These can be changed on-fly. |
1094 | | */ |
1095 | 0 | ttyp->c_lflag = ICANON; |
1096 | 0 | if (lflags & LDISC_RAW) { |
1097 | 0 | ttyp->c_lflag = 0; |
1098 | 0 | ttyp->c_iflag = 0; |
1099 | 0 | ttyp->c_cc[VMIN] = 1; |
1100 | 0 | } |
1101 | 0 | if (lflags & LDISC_ECHO) |
1102 | 0 | ttyp->c_lflag |= ECHO; |
1103 | 0 | if (tcsetattr(fd, TCSANOW, ttyp) < 0) { |
1104 | 0 | SAVE_ERRNO( |
1105 | 0 | msyslog(LOG_ERR, |
1106 | 0 | "refclock_setup fd %d TCSANOW: %m", |
1107 | 0 | fd); |
1108 | 0 | ) |
1109 | 0 | return FALSE; |
1110 | 0 | } |
1111 | | |
1112 | | /* |
1113 | | * flush input and output buffers to discard any outdated stuff |
1114 | | * that might have become toxic for the driver. Failing to do so |
1115 | | * is logged, but we keep our fingers crossed otherwise. |
1116 | | */ |
1117 | 0 | if (tcflush(fd, TCIOFLUSH) < 0) |
1118 | 0 | msyslog(LOG_ERR, "refclock_setup fd %d tcflush(): %m", |
1119 | 0 | fd); |
1120 | 0 | #endif /* HAVE_TERMIOS */ |
1121 | |
|
1122 | | #ifdef HAVE_SYSV_TTYS |
1123 | | |
1124 | | /* |
1125 | | * System V serial line parameters (termio interface) |
1126 | | * |
1127 | | */ |
1128 | | if (ioctl(fd, TCGETA, ttyp) < 0) { |
1129 | | SAVE_ERRNO( |
1130 | | msyslog(LOG_ERR, |
1131 | | "refclock_setup fd %d TCGETA: %m", |
1132 | | fd); |
1133 | | ) |
1134 | | return FALSE; |
1135 | | } |
1136 | | |
1137 | | /* |
1138 | | * Set canonical mode and local connection; set specified speed, |
1139 | | * 8 bits and no parity; map CR to NL; ignore break. |
1140 | | */ |
1141 | | if (speed) { |
1142 | | u_int ltemp = 0; |
1143 | | |
1144 | | ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL; |
1145 | | ttyp->c_oflag = 0; |
1146 | | ttyp->c_cflag = speed | CS8 | CLOCAL | CREAD; |
1147 | | for (i = 0; i < NCCS; ++i) |
1148 | | ttyp->c_cc[i] = '\0'; |
1149 | | |
1150 | | #if defined(TIOCMGET) && !defined(SCO5_CLOCK) |
1151 | | |
1152 | | /* |
1153 | | * If we have modem control, check to see if modem leads |
1154 | | * are active; if so, set remote connection. This is |
1155 | | * necessary for the kernel pps mods to work. |
1156 | | */ |
1157 | | if (ioctl(fd, TIOCMGET, (char *)<emp) < 0) |
1158 | | msyslog(LOG_ERR, |
1159 | | "refclock_setup fd %d TIOCMGET: %m", fd); |
1160 | | #ifdef DEBUG |
1161 | | if (debug) |
1162 | | printf("refclock_setup fd %d modem status: %x\n", |
1163 | | fd, ltemp); |
1164 | | #endif |
1165 | | if (ltemp & TIOCM_DSR) |
1166 | | ttyp->c_cflag &= ~CLOCAL; |
1167 | | #endif /* TIOCMGET */ |
1168 | | } |
1169 | | |
1170 | | /* |
1171 | | * Set raw and echo modes. These can be changed on-fly. |
1172 | | */ |
1173 | | ttyp->c_lflag = ICANON; |
1174 | | if (lflags & LDISC_RAW) { |
1175 | | ttyp->c_lflag = 0; |
1176 | | ttyp->c_iflag = 0; |
1177 | | ttyp->c_cc[VMIN] = 1; |
1178 | | } |
1179 | | if (ioctl(fd, TCSETA, ttyp) < 0) { |
1180 | | SAVE_ERRNO( |
1181 | | msyslog(LOG_ERR, |
1182 | | "refclock_setup fd %d TCSETA: %m", fd); |
1183 | | ) |
1184 | | return FALSE; |
1185 | | } |
1186 | | #endif /* HAVE_SYSV_TTYS */ |
1187 | |
|
1188 | | #ifdef HAVE_BSD_TTYS |
1189 | | |
1190 | | /* |
1191 | | * 4.3bsd serial line parameters (sgttyb interface) |
1192 | | */ |
1193 | | if (ioctl(fd, TIOCGETP, (char *)ttyp) < 0) { |
1194 | | SAVE_ERRNO( |
1195 | | msyslog(LOG_ERR, |
1196 | | "refclock_setup fd %d TIOCGETP: %m", |
1197 | | fd); |
1198 | | ) |
1199 | | return FALSE; |
1200 | | } |
1201 | | if (speed) |
1202 | | ttyp->sg_ispeed = ttyp->sg_ospeed = speed; |
1203 | | ttyp->sg_flags = EVENP | ODDP | CRMOD; |
1204 | | if (ioctl(fd, TIOCSETP, (char *)ttyp) < 0) { |
1205 | | SAVE_ERRNO( |
1206 | | msyslog(LOG_ERR, "refclock_setup TIOCSETP: %m"); |
1207 | | ) |
1208 | | return FALSE; |
1209 | | } |
1210 | | #endif /* HAVE_BSD_TTYS */ |
1211 | 0 | return(1); |
1212 | 0 | } |
1213 | | #endif /* HAVE_TERMIOS || HAVE_SYSV_TTYS || HAVE_BSD_TTYS */ |
1214 | | |
1215 | | |
1216 | | /* |
1217 | | * refclock_ioctl - set serial port control functions |
1218 | | * |
1219 | | * This routine attempts to hide the internal, system-specific details |
1220 | | * of serial ports. It can handle POSIX (termios), SYSV (termio) and BSD |
1221 | | * (sgtty) interfaces with varying degrees of success. The routine sets |
1222 | | * up optional features such as tty_clk. The routine returns TRUE if |
1223 | | * successful. |
1224 | | */ |
1225 | | int |
1226 | | refclock_ioctl( |
1227 | | int fd, /* file descriptor */ |
1228 | | u_int lflags /* line discipline flags */ |
1229 | | ) |
1230 | 0 | { |
1231 | | /* |
1232 | | * simply return TRUE if no UNIX line discipline is supported |
1233 | | */ |
1234 | 0 | DPRINTF(1, ("refclock_ioctl: fd %d flags 0x%x\n", fd, lflags)); |
1235 | |
|
1236 | 0 | return TRUE; |
1237 | 0 | } |
1238 | | #endif /* !defined(SYS_VXWORKS) && !defined(SYS_WINNT) */ |
1239 | | |
1240 | | |
1241 | | /* |
1242 | | * refclock_control - set and/or return clock values |
1243 | | * |
1244 | | * This routine is used mainly for debugging. It returns designated |
1245 | | * values from the interface structure that can be displayed using |
1246 | | * ntpdc and the clockstat command. It can also be used to initialize |
1247 | | * configuration variables, such as fudgetimes, fudgevalues, reference |
1248 | | * ID and stratum. |
1249 | | */ |
1250 | | void |
1251 | | refclock_control( |
1252 | | sockaddr_u *srcadr, |
1253 | | const struct refclockstat *in, |
1254 | | struct refclockstat *out |
1255 | | ) |
1256 | 0 | { |
1257 | 0 | struct peer *peer; |
1258 | 0 | struct refclockproc *pp; |
1259 | 0 | u_char clktype; |
1260 | 0 | int unit; |
1261 | | |
1262 | | /* |
1263 | | * Check for valid address and running peer |
1264 | | */ |
1265 | 0 | if (!ISREFCLOCKADR(srcadr)) |
1266 | 0 | return; |
1267 | | |
1268 | 0 | clktype = (u_char)REFCLOCKTYPE(srcadr); |
1269 | 0 | unit = REFCLOCKUNIT(srcadr); |
1270 | |
|
1271 | 0 | peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL); |
1272 | |
|
1273 | 0 | if (NULL == peer) |
1274 | 0 | return; |
1275 | | |
1276 | 0 | INSIST(peer->procptr != NULL); |
1277 | 0 | pp = peer->procptr; |
1278 | | |
1279 | | /* |
1280 | | * Initialize requested data |
1281 | | */ |
1282 | 0 | if (in != NULL) { |
1283 | 0 | if (in->haveflags & CLK_HAVETIME1) |
1284 | 0 | pp->fudgetime1 = in->fudgetime1; |
1285 | 0 | if (in->haveflags & CLK_HAVETIME2) |
1286 | 0 | pp->fudgetime2 = in->fudgetime2; |
1287 | 0 | if (in->haveflags & CLK_HAVEVAL1) |
1288 | 0 | peer->stratum = pp->stratum = (u_char)in->fudgeval1; |
1289 | 0 | if (in->haveflags & CLK_HAVEVAL2) |
1290 | 0 | peer->refid = pp->refid = in->fudgeval2; |
1291 | 0 | if (in->haveflags & CLK_HAVEFLAG1) { |
1292 | 0 | pp->sloppyclockflag &= ~CLK_FLAG1; |
1293 | 0 | pp->sloppyclockflag |= in->flags & CLK_FLAG1; |
1294 | 0 | } |
1295 | 0 | if (in->haveflags & CLK_HAVEFLAG2) { |
1296 | 0 | pp->sloppyclockflag &= ~CLK_FLAG2; |
1297 | 0 | pp->sloppyclockflag |= in->flags & CLK_FLAG2; |
1298 | 0 | } |
1299 | 0 | if (in->haveflags & CLK_HAVEFLAG3) { |
1300 | 0 | pp->sloppyclockflag &= ~CLK_FLAG3; |
1301 | 0 | pp->sloppyclockflag |= in->flags & CLK_FLAG3; |
1302 | 0 | } |
1303 | 0 | if (in->haveflags & CLK_HAVEFLAG4) { |
1304 | 0 | pp->sloppyclockflag &= ~CLK_FLAG4; |
1305 | 0 | pp->sloppyclockflag |= in->flags & CLK_FLAG4; |
1306 | 0 | } |
1307 | 0 | if (in->haveflags & CLK_HAVEMINJIT) |
1308 | 0 | pp->fudgeminjitter = in->fudgeminjitter; |
1309 | 0 | } |
1310 | | |
1311 | | /* |
1312 | | * Readback requested data |
1313 | | */ |
1314 | 0 | if (out != NULL) { |
1315 | 0 | out->fudgeval1 = pp->stratum; |
1316 | 0 | out->fudgeval2 = pp->refid; |
1317 | 0 | out->haveflags = CLK_HAVEVAL1 | CLK_HAVEVAL2; |
1318 | 0 | out->fudgetime1 = pp->fudgetime1; |
1319 | 0 | if (0.0 != out->fudgetime1) |
1320 | 0 | out->haveflags |= CLK_HAVETIME1; |
1321 | 0 | out->fudgetime2 = pp->fudgetime2; |
1322 | 0 | if (0.0 != out->fudgetime2) |
1323 | 0 | out->haveflags |= CLK_HAVETIME2; |
1324 | 0 | out->flags = (u_char) pp->sloppyclockflag; |
1325 | 0 | if (CLK_FLAG1 & out->flags) |
1326 | 0 | out->haveflags |= CLK_HAVEFLAG1; |
1327 | 0 | if (CLK_FLAG2 & out->flags) |
1328 | 0 | out->haveflags |= CLK_HAVEFLAG2; |
1329 | 0 | if (CLK_FLAG3 & out->flags) |
1330 | 0 | out->haveflags |= CLK_HAVEFLAG3; |
1331 | 0 | if (CLK_FLAG4 & out->flags) |
1332 | 0 | out->haveflags |= CLK_HAVEFLAG4; |
1333 | 0 | out->fudgeminjitter = pp->fudgeminjitter; |
1334 | 0 | if (0.0 != out->fudgeminjitter) |
1335 | 0 | out->haveflags |= CLK_HAVEMINJIT; |
1336 | |
|
1337 | 0 | out->timereset = current_time - pp->timestarted; |
1338 | 0 | out->polls = pp->polls; |
1339 | 0 | out->noresponse = pp->noreply; |
1340 | 0 | out->badformat = pp->badformat; |
1341 | 0 | out->baddata = pp->baddata; |
1342 | |
|
1343 | 0 | out->lastevent = pp->lastevent; |
1344 | 0 | out->currentstatus = pp->currentstatus; |
1345 | 0 | out->type = pp->type; |
1346 | 0 | out->clockdesc = pp->clockdesc; |
1347 | 0 | out->lencode = (u_short)pp->lencode; |
1348 | 0 | out->p_lastcode = pp->a_lastcode; |
1349 | 0 | } |
1350 | | |
1351 | | /* |
1352 | | * Give the stuff to the clock |
1353 | | */ |
1354 | 0 | if (refclock_conf[clktype]->clock_control != noentry) |
1355 | 0 | (refclock_conf[clktype]->clock_control)(unit, in, out, peer); |
1356 | 0 | } |
1357 | | |
1358 | | |
1359 | | /* |
1360 | | * refclock_buginfo - return debugging info |
1361 | | * |
1362 | | * This routine is used mainly for debugging. It returns designated |
1363 | | * values from the interface structure that can be displayed using |
1364 | | * ntpdc and the clkbug command. |
1365 | | */ |
1366 | | void |
1367 | | refclock_buginfo( |
1368 | | sockaddr_u *srcadr, /* clock address */ |
1369 | | struct refclockbug *bug /* output structure */ |
1370 | | ) |
1371 | 0 | { |
1372 | 0 | struct peer *peer; |
1373 | 0 | struct refclockproc *pp; |
1374 | 0 | int clktype; |
1375 | 0 | int unit; |
1376 | 0 | unsigned u; |
1377 | | |
1378 | | /* |
1379 | | * Check for valid address and peer structure |
1380 | | */ |
1381 | 0 | if (!ISREFCLOCKADR(srcadr)) |
1382 | 0 | return; |
1383 | | |
1384 | 0 | clktype = (u_char) REFCLOCKTYPE(srcadr); |
1385 | 0 | unit = REFCLOCKUNIT(srcadr); |
1386 | |
|
1387 | 0 | peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL); |
1388 | |
|
1389 | 0 | if (NULL == peer || NULL == peer->procptr) |
1390 | 0 | return; |
1391 | | |
1392 | 0 | pp = peer->procptr; |
1393 | | |
1394 | | /* |
1395 | | * Copy structure values |
1396 | | */ |
1397 | 0 | bug->nvalues = 8; |
1398 | 0 | bug->svalues = 0x0000003f; |
1399 | 0 | bug->values[0] = pp->year; |
1400 | 0 | bug->values[1] = pp->day; |
1401 | 0 | bug->values[2] = pp->hour; |
1402 | 0 | bug->values[3] = pp->minute; |
1403 | 0 | bug->values[4] = pp->second; |
1404 | 0 | bug->values[5] = pp->nsec; |
1405 | 0 | bug->values[6] = pp->yearstart; |
1406 | 0 | bug->values[7] = pp->coderecv; |
1407 | 0 | bug->stimes = 0xfffffffc; |
1408 | 0 | bug->times[0] = pp->lastref; |
1409 | 0 | bug->times[1] = pp->lastrec; |
1410 | 0 | for (u = 2; u < bug->ntimes; u++) |
1411 | 0 | DTOLFP(pp->filter[u - 2], &bug->times[u]); |
1412 | | |
1413 | | /* |
1414 | | * Give the stuff to the clock |
1415 | | */ |
1416 | 0 | if (refclock_conf[clktype]->clock_buginfo != noentry) |
1417 | 0 | (refclock_conf[clktype]->clock_buginfo)(unit, bug, peer); |
1418 | 0 | } |
1419 | | |
1420 | | |
1421 | | #ifdef HAVE_PPSAPI |
1422 | | /* |
1423 | | * refclock_ppsapi - initialize/update ppsapi |
1424 | | * |
1425 | | * This routine is called after the fudge command to open the PPSAPI |
1426 | | * interface for later parameter setting after the fudge command. |
1427 | | */ |
1428 | | int |
1429 | | refclock_ppsapi( |
1430 | | int fddev, /* fd device */ |
1431 | | struct refclock_atom *ap /* atom structure pointer */ |
1432 | | ) |
1433 | | { |
1434 | | if (ap->handle == 0) { |
1435 | | if (time_pps_create(fddev, &ap->handle) < 0) { |
1436 | | msyslog(LOG_ERR, |
1437 | | "refclock_ppsapi: time_pps_create: %m"); |
1438 | | return (0); |
1439 | | } |
1440 | | ZERO(ap->ts); /* [Bug 2689] defined INIT state */ |
1441 | | } |
1442 | | return (1); |
1443 | | } |
1444 | | |
1445 | | |
1446 | | /* |
1447 | | * refclock_params - set ppsapi parameters |
1448 | | * |
1449 | | * This routine is called to set the PPSAPI parameters after the fudge |
1450 | | * command. |
1451 | | */ |
1452 | | int |
1453 | | refclock_params( |
1454 | | int mode, /* mode bits */ |
1455 | | struct refclock_atom *ap /* atom structure pointer */ |
1456 | | ) |
1457 | | { |
1458 | | ZERO(ap->pps_params); |
1459 | | ap->pps_params.api_version = PPS_API_VERS_1; |
1460 | | |
1461 | | /* |
1462 | | * Solaris serial ports provide PPS pulse capture only on the |
1463 | | * assert edge. FreeBSD serial ports provide capture on the |
1464 | | * clear edge, while FreeBSD parallel ports provide capture |
1465 | | * on the assert edge. Your mileage may vary. |
1466 | | */ |
1467 | | if (mode & CLK_FLAG2) |
1468 | | ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTURECLEAR; |
1469 | | else |
1470 | | ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTUREASSERT; |
1471 | | if (time_pps_setparams(ap->handle, &ap->pps_params) < 0) { |
1472 | | msyslog(LOG_ERR, |
1473 | | "refclock_params: time_pps_setparams: %m"); |
1474 | | return (0); |
1475 | | } |
1476 | | |
1477 | | /* |
1478 | | * If flag3 is lit, select the kernel PPS if we can. |
1479 | | * |
1480 | | * Note: EOPNOTSUPP is the only 'legal' error code we deal with; |
1481 | | * it is part of the 'if we can' strategy. Any other error |
1482 | | * indicates something more sinister and makes this function fail. |
1483 | | */ |
1484 | | if (mode & CLK_FLAG3) { |
1485 | | if (time_pps_kcbind(ap->handle, PPS_KC_HARDPPS, |
1486 | | ap->pps_params.mode & ~PPS_TSFMT_TSPEC, |
1487 | | PPS_TSFMT_TSPEC) < 0) |
1488 | | { |
1489 | | if (errno != EOPNOTSUPP) { |
1490 | | msyslog(LOG_ERR, |
1491 | | "refclock_params: time_pps_kcbind: %m"); |
1492 | | return (0); |
1493 | | } |
1494 | | } else { |
1495 | | hardpps_enable = 1; |
1496 | | } |
1497 | | } |
1498 | | return (1); |
1499 | | } |
1500 | | |
1501 | | |
1502 | | /* |
1503 | | * refclock_pps - called once per second |
1504 | | * |
1505 | | * This routine is called once per second. It snatches the PPS |
1506 | | * timestamp from the kernel and saves the sign-extended fraction in |
1507 | | * a circular buffer for processing at the next poll event. |
1508 | | */ |
1509 | | int |
1510 | | refclock_pps( |
1511 | | struct peer *peer, /* peer structure pointer */ |
1512 | | struct refclock_atom *ap, /* atom structure pointer */ |
1513 | | int mode /* mode bits */ |
1514 | | ) |
1515 | | { |
1516 | | struct refclockproc *pp; |
1517 | | pps_info_t pps_info; |
1518 | | struct timespec timeout; |
1519 | | double dtemp, dcorr, trash; |
1520 | | |
1521 | | /* |
1522 | | * We require the clock to be synchronized before setting the |
1523 | | * parameters. When the parameters have been set, fetch the |
1524 | | * most recent PPS timestamp. |
1525 | | */ |
1526 | | pp = peer->procptr; |
1527 | | if (ap->handle == 0) |
1528 | | return (0); |
1529 | | |
1530 | | if (ap->pps_params.mode == 0 && sys_leap != LEAP_NOTINSYNC) { |
1531 | | if (refclock_params(pp->sloppyclockflag, ap) < 1) |
1532 | | return (0); |
1533 | | } |
1534 | | ZERO(timeout); |
1535 | | ZERO(pps_info); |
1536 | | if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC, &pps_info, |
1537 | | &timeout) < 0) { |
1538 | | refclock_report(peer, CEVNT_FAULT); |
1539 | | return (0); |
1540 | | } |
1541 | | timeout = ap->ts; /* save old timestamp for check */ |
1542 | | if (ap->pps_params.mode & PPS_CAPTUREASSERT) |
1543 | | ap->ts = pps_info.assert_timestamp; |
1544 | | else if (ap->pps_params.mode & PPS_CAPTURECLEAR) |
1545 | | ap->ts = pps_info.clear_timestamp; |
1546 | | else |
1547 | | return (0); |
1548 | | |
1549 | | /* [Bug 2689] Discard the first sample we read -- if the PPS |
1550 | | * source is currently down / disconnected, we have read a |
1551 | | * potentially *very* stale value here. So if our old TS value |
1552 | | * is all-zero, we consider this sample unrealiable and drop it. |
1553 | | * |
1554 | | * Note 1: a better check would compare the PPS time stamp to |
1555 | | * the current system time and drop it if it's more than say 3s |
1556 | | * away. |
1557 | | * |
1558 | | * Note 2: If we ever again get an all-zero PPS sample, the next |
1559 | | * one will be discarded. This can happen every 136yrs and is |
1560 | | * unlikely to be ever observed. |
1561 | | */ |
1562 | | if (0 == (timeout.tv_sec | timeout.tv_nsec)) |
1563 | | return (0); |
1564 | | |
1565 | | /* If the PPS source fails to deliver a new sample between |
1566 | | * polls, it regurgitates the last sample. We do not want to |
1567 | | * process the same sample multiple times. |
1568 | | */ |
1569 | | if (0 == memcmp(&timeout, &ap->ts, sizeof(timeout))) |
1570 | | return (0); |
1571 | | |
1572 | | /* |
1573 | | * Convert to signed fraction offset, apply fudge and properly |
1574 | | * fold the correction into the [-0.5s,0.5s] range. Handle |
1575 | | * excessive fudge times, too. |
1576 | | */ |
1577 | | dtemp = ap->ts.tv_nsec / 1e9; |
1578 | | dcorr = modf((pp->fudgetime1 - dtemp), &trash); |
1579 | | if (dcorr > 0.5) |
1580 | | dcorr -= 1.0; |
1581 | | else if (dcorr < -0.5) |
1582 | | dcorr += 1.0; |
1583 | | |
1584 | | /* phase gate check: avoid wobbling by +/-1s when too close to |
1585 | | * the switch-over point. We allow +/-400ms max phase deviation. |
1586 | | * The trade-off is clear: The smaller the limit, the less |
1587 | | * sensitive to sampling noise the clock becomes. OTOH the |
1588 | | * system must get into phase gate range by other means for the |
1589 | | * PPS clock to lock in. |
1590 | | */ |
1591 | | if (fabs(dcorr) > 0.4) |
1592 | | return (0); |
1593 | | |
1594 | | /* |
1595 | | * record this time stamp and stuff in median filter |
1596 | | */ |
1597 | | pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970; |
1598 | | pp->lastrec.l_uf = (u_int32)(dtemp * FRAC); |
1599 | | clk_add_sample(pp, dcorr); |
1600 | | refclock_checkburst(peer, pp); |
1601 | | |
1602 | | #ifdef DEBUG |
1603 | | if (debug > 1) |
1604 | | printf("refclock_pps: %lu %f %f\n", current_time, |
1605 | | dcorr, pp->fudgetime1); |
1606 | | #endif |
1607 | | return (1); |
1608 | | } |
1609 | | #endif /* HAVE_PPSAPI */ |
1610 | | |
1611 | | |
1612 | | /* |
1613 | | * ------------------------------------------------------------------- |
1614 | | * refclock_ppsaugment(...) -- correlate with PPS edge |
1615 | | * |
1616 | | * This function is used to correlate a receive time stamp with a PPS |
1617 | | * edge time stamp. It applies the necessary fudges and then tries to |
1618 | | * move the receive time stamp to the corresponding edge. This can warp |
1619 | | * into future, if a transmission delay of more than 500ms is not |
1620 | | * compensated with a corresponding fudge time2 value, because then the |
1621 | | * next PPS edge is nearer than the last. (Similiar to what the PPS ATOM |
1622 | | * driver does, but we deal with full time stamps here, not just phase |
1623 | | * shift information.) Likewise, a negative fudge time2 value must be |
1624 | | * used if the reference time stamp correlates with the *following* PPS |
1625 | | * pulse. |
1626 | | * |
1627 | | * Note that the receive time fudge value only needs to move the receive |
1628 | | * stamp near a PPS edge but that close proximity is not required; |
1629 | | * +/-100ms precision should be enough. But since the fudge value will |
1630 | | * probably also be used to compensate the transmission delay when no |
1631 | | * PPS edge can be related to the time stamp, it's best to get it as |
1632 | | * close as possible. |
1633 | | * |
1634 | | * It should also be noted that the typical use case is matching to the |
1635 | | * preceeding edge, as most units relate their sentences to the current |
1636 | | * second. |
1637 | | * |
1638 | | * The function returns FALSE if there is no correlation possible, TRUE |
1639 | | * otherwise. Reason for failures are: |
1640 | | * |
1641 | | * - no PPS/ATOM unit given |
1642 | | * - PPS stamp is stale (that is, the difference between the PPS stamp |
1643 | | * and the corrected time stamp would exceed two seconds) |
1644 | | * - The phase difference is too close to 0.5, and the decision wether |
1645 | | * to move up or down is too sensitive to noise. |
1646 | | * |
1647 | | * On output, the receive time stamp is updated with the 'fixed' receive |
1648 | | * time. |
1649 | | * ------------------------------------------------------------------- |
1650 | | */ |
1651 | | |
1652 | | int |
1653 | | refclock_ppsaugment( |
1654 | | const struct refclock_atom * ap , /* for PPS io */ |
1655 | | l_fp * rcvtime , |
1656 | | double rcvfudge, /* i/o read fudge */ |
1657 | | double ppsfudge /* pps fudge */ |
1658 | | ) |
1659 | 0 | { |
1660 | 0 | l_fp delta[1]; |
1661 | |
|
1662 | | #ifdef HAVE_PPSAPI |
1663 | | |
1664 | | pps_info_t pps_info; |
1665 | | struct timespec timeout; |
1666 | | l_fp stamp[1]; |
1667 | | uint32_t phase; |
1668 | | |
1669 | | static const uint32_t s_plim_hi = UINT32_C(1932735284); |
1670 | | static const uint32_t s_plim_lo = UINT32_C(2362232013); |
1671 | | |
1672 | | /* fixup receive time in case we have to bail out early */ |
1673 | | DTOLFP(rcvfudge, delta); |
1674 | | L_SUB(rcvtime, delta); |
1675 | | |
1676 | | if (NULL == ap) |
1677 | | return FALSE; |
1678 | | |
1679 | | ZERO(timeout); |
1680 | | ZERO(pps_info); |
1681 | | |
1682 | | /* fetch PPS stamp from ATOM block */ |
1683 | | if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC, |
1684 | | &pps_info, &timeout) < 0) |
1685 | | return FALSE; /* can't get time stamps */ |
1686 | | |
1687 | | /* get last active PPS edge before receive */ |
1688 | | if (ap->pps_params.mode & PPS_CAPTUREASSERT) |
1689 | | timeout = pps_info.assert_timestamp; |
1690 | | else if (ap->pps_params.mode & PPS_CAPTURECLEAR) |
1691 | | timeout = pps_info.clear_timestamp; |
1692 | | else |
1693 | | return FALSE; /* WHICH edge, please?!? */ |
1694 | | |
1695 | | /* convert PPS stamp to l_fp and apply fudge */ |
1696 | | *stamp = tspec_stamp_to_lfp(timeout); |
1697 | | DTOLFP(ppsfudge, delta); |
1698 | | L_SUB(stamp, delta); |
1699 | | |
1700 | | /* Get difference between PPS stamp (--> yield) and receive time |
1701 | | * (--> base) |
1702 | | */ |
1703 | | *delta = *stamp; |
1704 | | L_SUB(delta, rcvtime); |
1705 | | |
1706 | | /* check if either the PPS or the STAMP is stale in relation |
1707 | | * to each other. Bail if it is so... |
1708 | | */ |
1709 | | phase = delta->l_ui; |
1710 | | if (phase >= 2 && phase < (uint32_t)-2) |
1711 | | return FALSE; /* PPS is stale, don't use it */ |
1712 | | |
1713 | | /* If the phase is too close to 0.5, the decision whether to |
1714 | | * move up or down is becoming noise sensitive. That is, we |
1715 | | * might amplify usec noise between samples into seconds with a |
1716 | | * simple threshold. This can be solved by a Schmitt Trigger |
1717 | | * characteristic, but that would also require additional state |
1718 | | * where we could remember previous decisions. Easier to play |
1719 | | * dead duck and wait for the conditions to become clear. |
1720 | | */ |
1721 | | phase = delta->l_uf; |
1722 | | if (phase > s_plim_hi && phase < s_plim_lo) |
1723 | | return FALSE; /* we're in the noise lock gap */ |
1724 | | |
1725 | | /* sign-extend fraction into seconds */ |
1726 | | delta->l_ui = UINT32_C(0) - ((phase >> 31) & 1); |
1727 | | /* add it up now */ |
1728 | | L_ADD(rcvtime, delta); |
1729 | | return TRUE; |
1730 | | |
1731 | | # else /* have no PPS support at all */ |
1732 | | |
1733 | | /* just fixup receive time and fail */ |
1734 | 0 | UNUSED_ARG(ap); |
1735 | 0 | UNUSED_ARG(ppsfudge); |
1736 | |
|
1737 | 0 | DTOLFP(rcvfudge, delta); |
1738 | 0 | L_SUB(rcvtime, delta); |
1739 | 0 | return FALSE; |
1740 | |
|
1741 | 0 | # endif |
1742 | 0 | } |
1743 | | |
1744 | | /* |
1745 | | * ------------------------------------------------------------------- |
1746 | | * check if it makes sense to schedule an 'early' poll to get the clock |
1747 | | * up fast after start or longer signal dropout. |
1748 | | */ |
1749 | | static void |
1750 | | refclock_checkburst( |
1751 | | struct peer * peer, |
1752 | | struct refclockproc * pp |
1753 | | ) |
1754 | 0 | { |
1755 | 0 | uint32_t limit; /* when we should poll */ |
1756 | 0 | u_int needs; /* needed number of samples */ |
1757 | | |
1758 | | /* Paranoia: stop here if peer and clockproc don't match up. |
1759 | | * And when a poll is actually pending, we don't have to do |
1760 | | * anything, either. Likewise if the reach mask is full, of |
1761 | | * course, and if the filter has stabilized. |
1762 | | */ |
1763 | 0 | if (pp->inpoll || (peer->procptr != pp) || |
1764 | 0 | ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE))) |
1765 | 0 | return; |
1766 | | |
1767 | | /* If the next poll is soon enough, bail out, too: */ |
1768 | 0 | limit = current_time + 1; |
1769 | 0 | if (peer->nextdate <= limit) |
1770 | 0 | return; |
1771 | | |
1772 | | /* Derive the number of samples needed from the popcount of the |
1773 | | * reach mask. With less samples available, we break away. |
1774 | | */ |
1775 | 0 | needs = peer->reach; |
1776 | 0 | needs -= (needs >> 1) & 0x55; |
1777 | 0 | needs = (needs & 0x33) + ((needs >> 2) & 0x33); |
1778 | 0 | needs = (needs + (needs >> 4)) & 0x0F; |
1779 | 0 | if (needs > 6) |
1780 | 0 | needs = 6; |
1781 | 0 | else if (needs < 3) |
1782 | 0 | needs = 3; |
1783 | 0 | if (clk_cnt_sample(pp) < needs) |
1784 | 0 | return; |
1785 | | |
1786 | | /* Get serious. Reduce the poll to minimum and schedule early. |
1787 | | * (Changing the peer poll is probably in vain, as it will be |
1788 | | * re-adjusted, but maybe some time the hint will work...) |
1789 | | */ |
1790 | 0 | peer->hpoll = peer->minpoll; |
1791 | 0 | peer->nextdate = limit; |
1792 | 0 | } |
1793 | | |
1794 | | /* |
1795 | | * ------------------------------------------------------------------- |
1796 | | * Save the last timecode string, making sure it's properly truncated |
1797 | | * if necessary and NUL terminated in any case. |
1798 | | */ |
1799 | | void |
1800 | | refclock_save_lcode( |
1801 | | struct refclockproc * pp, |
1802 | | char const * tc, |
1803 | | size_t len |
1804 | | ) |
1805 | 0 | { |
1806 | 0 | if (len == (size_t)-1) |
1807 | 0 | len = strnlen(tc, sizeof(pp->a_lastcode) - 1); |
1808 | 0 | else if (len >= sizeof(pp->a_lastcode)) |
1809 | 0 | len = sizeof(pp->a_lastcode) - 1; |
1810 | |
|
1811 | 0 | pp->lencode = (u_short)len; |
1812 | 0 | memcpy(pp->a_lastcode, tc, len); |
1813 | 0 | pp->a_lastcode[len] = '\0'; |
1814 | 0 | } |
1815 | | |
1816 | | /* format data into a_lastcode */ |
1817 | | void |
1818 | | refclock_vformat_lcode( |
1819 | | struct refclockproc * pp, |
1820 | | char const * fmt, |
1821 | | va_list va |
1822 | | ) |
1823 | 0 | { |
1824 | 0 | long len; |
1825 | |
|
1826 | 0 | len = vsnprintf(pp->a_lastcode, sizeof(pp->a_lastcode), fmt, va); |
1827 | 0 | if (len <= 0) { |
1828 | 0 | len = 0; |
1829 | 0 | } else if (len >= sizeof(pp->a_lastcode)) { |
1830 | 0 | len = sizeof(pp->a_lastcode) - 1; |
1831 | 0 | } |
1832 | |
|
1833 | 0 | pp->lencode = (u_short)len; |
1834 | 0 | pp->a_lastcode[len] = '\0'; |
1835 | | /* !note! the NUL byte is needed in case vsnprintf() really fails */ |
1836 | 0 | } |
1837 | | |
1838 | | void |
1839 | | refclock_format_lcode( |
1840 | | struct refclockproc * pp, |
1841 | | char const * fmt, |
1842 | | ... |
1843 | | ) |
1844 | 0 | { |
1845 | 0 | va_list va; |
1846 | |
|
1847 | 0 | va_start(va, fmt); |
1848 | 0 | refclock_vformat_lcode(pp, fmt, va); |
1849 | | va_end(va); |
1850 | 0 | } |
1851 | | |
1852 | | #endif /* REFCLOCK */ |