/src/ntp-dev/ntpd/ntp_scanner.c
Line | Count | Source |
1 | | |
2 | | /* ntp_scanner.c |
3 | | * |
4 | | * The source code for a simple lexical analyzer. |
5 | | * |
6 | | * Written By: Sachin Kamboj |
7 | | * University of Delaware |
8 | | * Newark, DE 19711 |
9 | | * Copyright (c) 2006 |
10 | | */ |
11 | | |
12 | | #ifdef HAVE_CONFIG_H |
13 | | # include <config.h> |
14 | | #endif |
15 | | |
16 | | #include <stdio.h> |
17 | | #include <ctype.h> |
18 | | #include <stdlib.h> |
19 | | #include <errno.h> |
20 | | #include <string.h> |
21 | | |
22 | | #include "ntpd.h" |
23 | | #include "ntp_config.h" |
24 | | #include "ntpsim.h" |
25 | | #include "ntp_scanner.h" |
26 | | #include "ntp_parser.h" |
27 | | |
28 | | /* ntp_keyword.h declares finite state machine and token text */ |
29 | | #include "ntp_keyword.h" |
30 | | |
31 | | |
32 | | |
33 | | /* SCANNER GLOBAL VARIABLES |
34 | | * ------------------------ |
35 | | */ |
36 | | |
37 | | #define MAX_LEXEME 128 /* The maximum size of a lexeme */ |
38 | | char yytext[MAX_LEXEME]; /* Buffer for storing the input text/lexeme */ |
39 | | u_int32 conf_file_sum; /* Simple sum of characters read */ |
40 | | |
41 | | static struct FILE_INFO * lex_stack = NULL; |
42 | | |
43 | | |
44 | | |
45 | | /* CONSTANTS |
46 | | * --------- |
47 | | */ |
48 | | |
49 | | |
50 | | /* SCANNER GLOBAL VARIABLES |
51 | | * ------------------------ |
52 | | */ |
53 | | const char special_chars[] = "{}(),;|="; |
54 | | |
55 | | |
56 | | /* FUNCTIONS |
57 | | * --------- |
58 | | */ |
59 | | |
60 | | static int is_keyword(char *lexeme, follby *pfollowedby); |
61 | | |
62 | | |
63 | | /* |
64 | | * keyword() - Return the keyword associated with token T_ identifier. |
65 | | * See also token_name() for the string-ized T_ identifier. |
66 | | * Example: keyword(T_Server) returns "server" |
67 | | * token_name(T_Server) returns "T_Server" |
68 | | */ |
69 | | const char * |
70 | | keyword( |
71 | | int token |
72 | | ) |
73 | 0 | { |
74 | 0 | size_t i; |
75 | 0 | const char *text; |
76 | 0 | static char sbuf[64]; |
77 | |
|
78 | 0 | i = token - LOWEST_KEYWORD_ID; |
79 | |
|
80 | 0 | switch (token) { |
81 | 0 | case T_ServerresponseFuzz: |
82 | 0 | text = "serverresponse fuzz"; |
83 | 0 | break; |
84 | | |
85 | 0 | default: |
86 | 0 | if (i < COUNTOF(keyword_text)) { |
87 | 0 | text = keyword_text[i]; |
88 | 0 | } else { |
89 | 0 | snprintf(sbuf, sizeof sbuf, |
90 | 0 | "(keyword #%u not found)", token); |
91 | 0 | text = sbuf; |
92 | 0 | } |
93 | 0 | } |
94 | | |
95 | 0 | return text; |
96 | 0 | } |
97 | | |
98 | | |
99 | | /* FILE & STRING BUFFER INTERFACE |
100 | | * ------------------------------ |
101 | | * |
102 | | * This set out as a couple of wrapper functions around the standard C |
103 | | * fgetc and ungetc functions in order to include positional |
104 | | * bookkeeping. Alas, this is no longer a good solution with nested |
105 | | * input files and the possibility to send configuration commands via |
106 | | * 'ntpdc' and 'ntpq'. |
107 | | * |
108 | | * Now there are a few functions to maintain a stack of nested input |
109 | | * sources (though nesting is only allowd for disk files) and from the |
110 | | * scanner / parser point of view there's no difference between both |
111 | | * types of sources. |
112 | | * |
113 | | * The 'fgetc()' / 'ungetc()' replacements now operate on a FILE_INFO |
114 | | * structure. Instead of trying different 'ungetc()' strategies for file |
115 | | * and buffer based parsing, we keep the backup char in our own |
116 | | * FILE_INFO structure. This is sufficient, as the parser does *not* |
117 | | * jump around via 'seek' or the like, and there's no need to |
118 | | * check/clear the backup store in other places than 'lex_getch()'. |
119 | | */ |
120 | | |
121 | | /* |
122 | | * Allocate an info structure and attach it to a file. |
123 | | * |
124 | | * Note: When 'mode' is NULL, then the INFO block will be set up to |
125 | | * contain a NULL file pointer, as suited for remote config command |
126 | | * parsing. Otherwise having a NULL file pointer is considered an error, |
127 | | * and a NULL info block pointer is returned to indicate failure! |
128 | | * |
129 | | * Note: We use a variable-sized structure to hold a copy of the file |
130 | | * name (or, more proper, the input source description). This is more |
131 | | * secure than keeping a reference to some other storage that might go |
132 | | * out of scope. |
133 | | */ |
134 | | static struct FILE_INFO * |
135 | | lex_open( |
136 | | const char *path, |
137 | | const char *mode |
138 | | ) |
139 | 0 | { |
140 | 0 | struct FILE_INFO *stream; |
141 | 0 | size_t nnambuf; |
142 | |
|
143 | 0 | nnambuf = strlen(path); |
144 | 0 | stream = emalloc_zero(sizeof(*stream) + nnambuf); |
145 | 0 | stream->curpos.nline = 1; |
146 | 0 | stream->backch = EOF; |
147 | | /* copy name with memcpy -- trailing NUL already there! */ |
148 | 0 | memcpy(stream->fname, path, nnambuf); |
149 | |
|
150 | 0 | if (NULL != mode) { |
151 | 0 | stream->fpi = fopen(path, mode); |
152 | 0 | if (NULL == stream->fpi) { |
153 | 0 | free(stream); |
154 | 0 | stream = NULL; |
155 | 0 | } |
156 | 0 | } |
157 | 0 | return stream; |
158 | 0 | } |
159 | | |
160 | | /* get next character from buffer or file. This will return any putback |
161 | | * character first; it will also make sure the last line is at least |
162 | | * virtually terminated with a '\n'. |
163 | | */ |
164 | | static int |
165 | | lex_getch( |
166 | | struct FILE_INFO *stream |
167 | | ) |
168 | 0 | { |
169 | 0 | int ch; |
170 | |
|
171 | 0 | if (NULL == stream || stream->force_eof) |
172 | 0 | return EOF; |
173 | | |
174 | 0 | if (EOF != stream->backch) { |
175 | 0 | ch = stream->backch; |
176 | 0 | stream->backch = EOF; |
177 | 0 | if (stream->fpi) |
178 | 0 | conf_file_sum += ch; |
179 | 0 | stream->curpos.ncol++; |
180 | 0 | } else if (stream->fpi) { |
181 | | /* fetch next 7-bit ASCII char (or EOF) from file */ |
182 | 0 | while ((ch = fgetc(stream->fpi)) != EOF && ch > SCHAR_MAX) |
183 | 0 | stream->curpos.ncol++; |
184 | 0 | if (EOF != ch) { |
185 | 0 | conf_file_sum += ch; |
186 | 0 | stream->curpos.ncol++; |
187 | 0 | } |
188 | 0 | } else { |
189 | | /* fetch next 7-bit ASCII char from buffer */ |
190 | 0 | const char * scan; |
191 | 0 | scan = &remote_config.buffer[remote_config.pos]; |
192 | 0 | while ((ch = (u_char)*scan) > SCHAR_MAX) { |
193 | 0 | scan++; |
194 | 0 | stream->curpos.ncol++; |
195 | 0 | } |
196 | 0 | if ('\0' != ch) { |
197 | 0 | scan++; |
198 | 0 | stream->curpos.ncol++; |
199 | 0 | } else { |
200 | 0 | ch = EOF; |
201 | 0 | } |
202 | 0 | remote_config.pos = (int)(scan - remote_config.buffer); |
203 | 0 | } |
204 | | |
205 | | /* If the last line ends without '\n', generate one. This |
206 | | * happens most likely on Windows, where editors often have a |
207 | | * sloppy concept of a line. |
208 | | */ |
209 | 0 | if (EOF == ch && stream->curpos.ncol != 0) |
210 | 0 | ch = '\n'; |
211 | | |
212 | | /* update scan position tallies */ |
213 | 0 | if (ch == '\n') { |
214 | 0 | stream->bakpos = stream->curpos; |
215 | 0 | stream->curpos.nline++; |
216 | 0 | stream->curpos.ncol = 0; |
217 | 0 | } |
218 | |
|
219 | 0 | return ch; |
220 | 0 | } |
221 | | |
222 | | /* Note: lex_ungetch will fail to track more than one line of push |
223 | | * back. But since it guarantees only one char of back storage anyway, |
224 | | * this should not be a problem. |
225 | | */ |
226 | | static int |
227 | | lex_ungetch( |
228 | | int ch, |
229 | | struct FILE_INFO *stream |
230 | | ) |
231 | 0 | { |
232 | | /* check preconditions */ |
233 | 0 | if (NULL == stream || stream->force_eof) |
234 | 0 | return EOF; |
235 | 0 | if (EOF != stream->backch || EOF == ch) |
236 | 0 | return EOF; |
237 | | |
238 | | /* keep for later reference and update checksum */ |
239 | 0 | stream->backch = (u_char)ch; |
240 | 0 | if (stream->fpi) |
241 | 0 | conf_file_sum -= stream->backch; |
242 | | |
243 | | /* update position */ |
244 | 0 | if (stream->backch == '\n') { |
245 | 0 | stream->curpos = stream->bakpos; |
246 | 0 | stream->bakpos.ncol = -1; |
247 | 0 | } |
248 | 0 | stream->curpos.ncol--; |
249 | 0 | return stream->backch; |
250 | 0 | } |
251 | | |
252 | | /* dispose of an input structure. If the file pointer is not NULL, close |
253 | | * the file. This function does not check the result of 'fclose()'. |
254 | | */ |
255 | | static void |
256 | | lex_close( |
257 | | struct FILE_INFO *stream |
258 | | ) |
259 | 0 | { |
260 | 0 | if (NULL != stream) { |
261 | 0 | if (NULL != stream->fpi) |
262 | 0 | fclose(stream->fpi); |
263 | 0 | free(stream); |
264 | 0 | } |
265 | 0 | } |
266 | | |
267 | | /* INPUT STACK |
268 | | * ----------- |
269 | | * |
270 | | * Nested input sources are a bit tricky at first glance. We deal with |
271 | | * this problem using a stack of input sources, that is, a forward |
272 | | * linked list of FILE_INFO structs. |
273 | | * |
274 | | * This stack is never empty during parsing; while an encounter with EOF |
275 | | * can and will remove nested input sources, removing the last element |
276 | | * in the stack will not work during parsing, and the EOF condition of |
277 | | * the outermost input file remains until the parser folds up. |
278 | | */ |
279 | | |
280 | | static struct FILE_INFO * |
281 | | drop_stack_do( |
282 | | struct FILE_INFO * head |
283 | | ) |
284 | 0 | { |
285 | 0 | struct FILE_INFO * tail; |
286 | 0 | while (NULL != head) { |
287 | 0 | tail = head->st_next; |
288 | 0 | lex_close(head); |
289 | 0 | head = tail; |
290 | 0 | } |
291 | 0 | return head; |
292 | 0 | } |
293 | | |
294 | | |
295 | | |
296 | | /* Create a singleton input source on an empty lexer stack. This will |
297 | | * fail if there is already an input source, or if the underlying disk |
298 | | * file cannot be opened. |
299 | | * |
300 | | * Returns TRUE if a new input object was successfully created. |
301 | | */ |
302 | | int/*BOOL*/ |
303 | | lex_init_stack( |
304 | | const char * path, |
305 | | const char * mode |
306 | | ) |
307 | 0 | { |
308 | 0 | if (NULL != lex_stack || NULL == path) |
309 | 0 | return FALSE; |
310 | | |
311 | 0 | lex_stack = lex_open(path, mode); |
312 | 0 | return (NULL != lex_stack); |
313 | 0 | } |
314 | | |
315 | | /* This removes *all* input sources from the stack, leaving the head |
316 | | * pointer as NULL. Any attempt to parse in that state is likely to bomb |
317 | | * with segmentation faults or the like. |
318 | | * |
319 | | * In other words: Use this to clean up after parsing, and do not parse |
320 | | * anything until the next 'lex_init_stack()' succeeded. |
321 | | */ |
322 | | void |
323 | | lex_drop_stack(void) |
324 | 0 | { |
325 | 0 | lex_stack = drop_stack_do(lex_stack); |
326 | 0 | } |
327 | | |
328 | | /* Flush the lexer input stack: This will nip all input objects on the |
329 | | * stack (but keeps the current top-of-stack) and marks the top-of-stack |
330 | | * as inactive. Any further calls to lex_getch yield only EOF, and it's |
331 | | * no longer possible to push something back. |
332 | | * |
333 | | * Returns TRUE if there is a head element (top-of-stack) that was not |
334 | | * in the force-eof mode before this call. |
335 | | */ |
336 | | int/*BOOL*/ |
337 | | lex_flush_stack(void) |
338 | 0 | { |
339 | 0 | int retv = FALSE; |
340 | |
|
341 | 0 | if (NULL != lex_stack) { |
342 | 0 | retv = !lex_stack->force_eof; |
343 | 0 | lex_stack->force_eof = TRUE; |
344 | 0 | lex_stack->st_next = drop_stack_do( |
345 | 0 | lex_stack->st_next); |
346 | 0 | } |
347 | 0 | return retv; |
348 | 0 | } |
349 | | |
350 | | /* Push another file on the parsing stack. If the mode is NULL, create a |
351 | | * FILE_INFO suitable for in-memory parsing; otherwise, create a |
352 | | * FILE_INFO that is bound to a local/disc file. Note that 'path' must |
353 | | * not be NULL, or the function will fail. |
354 | | * |
355 | | * Returns TRUE if a new info record was pushed onto the stack. |
356 | | */ |
357 | | int/*BOOL*/ lex_push_file( |
358 | | const char * path, |
359 | | const char * mode |
360 | | ) |
361 | 0 | { |
362 | 0 | struct FILE_INFO * next = NULL; |
363 | |
|
364 | 0 | if (NULL != path) { |
365 | 0 | next = lex_open(path, mode); |
366 | 0 | if (NULL != next) { |
367 | 0 | next->st_next = lex_stack; |
368 | 0 | lex_stack = next; |
369 | 0 | } |
370 | 0 | } |
371 | 0 | return (NULL != next); |
372 | 0 | } |
373 | | |
374 | | /* Pop, close & free the top of the include stack, unless the stack |
375 | | * contains only a singleton input object. In that case the function |
376 | | * fails, because the parser does not expect the input stack to be |
377 | | * empty. |
378 | | * |
379 | | * Returns TRUE if an object was successfuly popped from the stack. |
380 | | */ |
381 | | int/*BOOL*/ |
382 | | lex_pop_file(void) |
383 | 0 | { |
384 | 0 | struct FILE_INFO * head = lex_stack; |
385 | 0 | struct FILE_INFO * tail = NULL; |
386 | | |
387 | 0 | if (NULL != head) { |
388 | 0 | tail = head->st_next; |
389 | 0 | if (NULL != tail) { |
390 | 0 | lex_stack = tail; |
391 | 0 | lex_close(head); |
392 | 0 | } |
393 | 0 | } |
394 | 0 | return (NULL != tail); |
395 | 0 | } |
396 | | |
397 | | /* Get include nesting level. This currently loops over the stack and |
398 | | * counts elements; but since this is of concern only with an include |
399 | | * statement and the nesting depth has a small limit, there's no |
400 | | * bottleneck expected here. |
401 | | * |
402 | | * Returns the nesting level of includes, that is, the current depth of |
403 | | * the lexer input stack. |
404 | | * |
405 | | * Note: |
406 | | */ |
407 | | size_t |
408 | | lex_level(void) |
409 | 0 | { |
410 | 0 | size_t cnt = 0; |
411 | 0 | struct FILE_INFO *ipf = lex_stack; |
412 | |
|
413 | 0 | while (NULL != ipf) { |
414 | 0 | cnt++; |
415 | 0 | ipf = ipf->st_next; |
416 | 0 | } |
417 | 0 | return cnt; |
418 | 0 | } |
419 | | |
420 | | /* check if the current input is from a file */ |
421 | | int/*BOOL*/ |
422 | | lex_from_file(void) |
423 | 0 | { |
424 | 0 | return (NULL != lex_stack) && (NULL != lex_stack->fpi); |
425 | 0 | } |
426 | | |
427 | | struct FILE_INFO * |
428 | | lex_current(void) |
429 | 0 | { |
430 | | /* this became so simple, it could be a macro. But then, |
431 | | * lex_stack needed to be global... |
432 | | */ |
433 | 0 | return lex_stack; |
434 | 0 | } |
435 | | |
436 | | |
437 | | /* STATE MACHINES |
438 | | * -------------- |
439 | | */ |
440 | | |
441 | | /* Keywords */ |
442 | | static int |
443 | | is_keyword( |
444 | | char *lexeme, |
445 | | follby *pfollowedby |
446 | | ) |
447 | 0 | { |
448 | 0 | follby fb; |
449 | 0 | int curr_s; /* current state index */ |
450 | 0 | int token; |
451 | 0 | int i; |
452 | |
|
453 | 0 | curr_s = SCANNER_INIT_S; |
454 | 0 | token = 0; |
455 | |
|
456 | 0 | for (i = 0; lexeme[i]; i++) { |
457 | 0 | while (curr_s && (lexeme[i] != SS_CH(sst[curr_s]))) |
458 | 0 | curr_s = SS_OTHER_N(sst[curr_s]); |
459 | |
|
460 | 0 | if (curr_s && (lexeme[i] == SS_CH(sst[curr_s]))) { |
461 | 0 | if ('\0' == lexeme[i + 1] |
462 | 0 | && FOLLBY_NON_ACCEPTING |
463 | 0 | != SS_FB(sst[curr_s])) { |
464 | 0 | fb = SS_FB(sst[curr_s]); |
465 | 0 | *pfollowedby = fb; |
466 | 0 | token = curr_s; |
467 | 0 | break; |
468 | 0 | } |
469 | 0 | curr_s = SS_MATCH_N(sst[curr_s]); |
470 | 0 | } else |
471 | 0 | break; |
472 | 0 | } |
473 | |
|
474 | 0 | return token; |
475 | 0 | } |
476 | | |
477 | | |
478 | | /* Integer */ |
479 | | static int |
480 | | is_integer( |
481 | | char *lexeme |
482 | | ) |
483 | 0 | { |
484 | 0 | int i; |
485 | 0 | int is_neg; |
486 | 0 | u_int u_val; |
487 | | |
488 | 0 | i = 0; |
489 | | |
490 | | /* Allow a leading minus sign */ |
491 | 0 | if (lexeme[i] == '-') { |
492 | 0 | i++; |
493 | 0 | is_neg = TRUE; |
494 | 0 | } else { |
495 | 0 | is_neg = FALSE; |
496 | 0 | } |
497 | | |
498 | | /* Check that all the remaining characters are digits */ |
499 | 0 | for (; lexeme[i] != '\0'; i++) { |
500 | 0 | if (!isdigit((u_char)lexeme[i])) |
501 | 0 | return FALSE; |
502 | 0 | } |
503 | | |
504 | 0 | if (is_neg) |
505 | 0 | return TRUE; |
506 | | |
507 | | /* Reject numbers that fit in unsigned but not in signed int */ |
508 | 0 | if (1 == sscanf(lexeme, "%u", &u_val)) |
509 | 0 | return (u_val <= INT_MAX); |
510 | 0 | else |
511 | 0 | return FALSE; |
512 | 0 | } |
513 | | |
514 | | |
515 | | /* U_int -- assumes is_integer() has returned FALSE */ |
516 | | static int |
517 | | is_u_int( |
518 | | char *lexeme |
519 | | ) |
520 | 0 | { |
521 | 0 | int i; |
522 | 0 | int is_hex; |
523 | | |
524 | 0 | i = 0; |
525 | 0 | if ('0' == lexeme[i] && 'x' == tolower((u_char)lexeme[i + 1])) { |
526 | 0 | i += 2; |
527 | 0 | is_hex = TRUE; |
528 | 0 | } else { |
529 | 0 | is_hex = FALSE; |
530 | 0 | } |
531 | | |
532 | | /* Check that all the remaining characters are digits */ |
533 | 0 | for (; lexeme[i] != '\0'; i++) { |
534 | 0 | if (is_hex && !isxdigit((u_char)lexeme[i])) |
535 | 0 | return FALSE; |
536 | 0 | if (!is_hex && !isdigit((u_char)lexeme[i])) |
537 | 0 | return FALSE; |
538 | 0 | } |
539 | | |
540 | 0 | return TRUE; |
541 | 0 | } |
542 | | |
543 | | |
544 | | /* Double */ |
545 | | static int |
546 | | is_double( |
547 | | char *lexeme |
548 | | ) |
549 | 0 | { |
550 | 0 | u_int num_digits = 0; /* Number of digits read */ |
551 | 0 | u_int i; |
552 | |
|
553 | 0 | i = 0; |
554 | | |
555 | | /* Check for an optional '+' or '-' */ |
556 | 0 | if ('+' == lexeme[i] || '-' == lexeme[i]) |
557 | 0 | i++; |
558 | | |
559 | | /* Read the integer part */ |
560 | 0 | for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) |
561 | 0 | num_digits++; |
562 | | |
563 | | /* Check for the optional decimal point */ |
564 | 0 | if ('.' == lexeme[i]) { |
565 | 0 | i++; |
566 | | /* Check for any digits after the decimal point */ |
567 | 0 | for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) |
568 | 0 | num_digits++; |
569 | 0 | } |
570 | | |
571 | | /* |
572 | | * The number of digits in both the decimal part and the |
573 | | * fraction part must not be zero at this point |
574 | | */ |
575 | 0 | if (!num_digits) |
576 | 0 | return 0; |
577 | | |
578 | | /* Check if we are done */ |
579 | 0 | if (!lexeme[i]) |
580 | 0 | return 1; |
581 | | |
582 | | /* There is still more input, read the exponent */ |
583 | 0 | if ('e' == tolower((u_char)lexeme[i])) |
584 | 0 | i++; |
585 | 0 | else |
586 | 0 | return 0; |
587 | | |
588 | | /* Read an optional Sign */ |
589 | 0 | if ('+' == lexeme[i] || '-' == lexeme[i]) |
590 | 0 | i++; |
591 | | |
592 | | /* Now read the exponent part */ |
593 | 0 | while (lexeme[i] && isdigit((u_char)lexeme[i])) |
594 | 0 | i++; |
595 | | |
596 | | /* Check if we are done */ |
597 | 0 | if (!lexeme[i]) |
598 | 0 | return 1; |
599 | 0 | else |
600 | 0 | return 0; |
601 | 0 | } |
602 | | |
603 | | |
604 | | /* is_special() - Test whether a character is a token */ |
605 | | static inline int |
606 | | is_special( |
607 | | int ch |
608 | | ) |
609 | 0 | { |
610 | 0 | return strchr(special_chars, ch) != NULL; |
611 | 0 | } |
612 | | |
613 | | |
614 | | static int |
615 | | is_EOC( |
616 | | int ch |
617 | | ) |
618 | 0 | { |
619 | 0 | if ((old_config_style && (ch == '\n')) || |
620 | 0 | (!old_config_style && (ch == ';'))) |
621 | 0 | return 1; |
622 | 0 | return 0; |
623 | 0 | } |
624 | | |
625 | | |
626 | | char * |
627 | | quote_if_needed(char *str) |
628 | 0 | { |
629 | 0 | char *ret; |
630 | 0 | size_t len; |
631 | 0 | size_t octets; |
632 | |
|
633 | 0 | len = strlen(str); |
634 | 0 | octets = len + 2 + 1; |
635 | 0 | ret = emalloc(octets); |
636 | 0 | if ('"' != str[0] |
637 | 0 | && (strcspn(str, special_chars) < len |
638 | 0 | || strchr(str, ' ') != NULL)) { |
639 | 0 | snprintf(ret, octets, "\"%s\"", str); |
640 | 0 | } else |
641 | 0 | strlcpy(ret, str, octets); |
642 | |
|
643 | 0 | return ret; |
644 | 0 | } |
645 | | |
646 | | |
647 | | static int |
648 | | create_string_token( |
649 | | char *lexeme |
650 | | ) |
651 | 0 | { |
652 | 0 | char *pch; |
653 | | |
654 | | /* |
655 | | * ignore end of line whitespace |
656 | | */ |
657 | 0 | pch = lexeme; |
658 | 0 | while (*pch && isspace((u_char)*pch)) |
659 | 0 | pch++; |
660 | |
|
661 | 0 | if (!*pch) { |
662 | 0 | yylval.Integer = T_EOC; |
663 | 0 | return yylval.Integer; |
664 | 0 | } |
665 | | |
666 | 0 | yylval.String = estrdup(lexeme); |
667 | 0 | return T_String; |
668 | 0 | } |
669 | | |
670 | | |
671 | | /* |
672 | | * yylex() - function that does the actual scanning. |
673 | | * Bison expects this function to be called yylex and for it to take no |
674 | | * input and return an int. |
675 | | * Conceptually yylex "returns" yylval as well as the actual return |
676 | | * value representing the token or type. |
677 | | */ |
678 | | int |
679 | | yylex(void) |
680 | 0 | { |
681 | 0 | static follby followedby = FOLLBY_TOKEN; |
682 | 0 | size_t i; |
683 | 0 | int instring; |
684 | 0 | int yylval_was_set; |
685 | 0 | int converted; |
686 | 0 | int token; /* The return value */ |
687 | 0 | int ch; |
688 | |
|
689 | 0 | instring = FALSE; |
690 | 0 | yylval_was_set = FALSE; |
691 | |
|
692 | 0 | do { |
693 | | /* Ignore whitespace at the beginning */ |
694 | 0 | while (EOF != (ch = lex_getch(lex_stack)) && |
695 | 0 | isspace(ch) && |
696 | 0 | !is_EOC(ch)) |
697 | | |
698 | 0 | ; /* Null Statement */ |
699 | |
|
700 | 0 | if (EOF == ch) { |
701 | |
|
702 | 0 | if ( ! lex_pop_file()) |
703 | 0 | return 0; |
704 | 0 | token = T_EOC; |
705 | 0 | goto normal_return; |
706 | |
|
707 | 0 | } else if (is_EOC(ch)) { |
708 | | |
709 | | /* end FOLLBY_STRINGS_TO_EOC effect */ |
710 | 0 | followedby = FOLLBY_TOKEN; |
711 | 0 | token = T_EOC; |
712 | 0 | goto normal_return; |
713 | |
|
714 | 0 | } else if (is_special(ch) && FOLLBY_TOKEN == followedby) { |
715 | | /* special chars are their own token values */ |
716 | 0 | token = ch; |
717 | | /* |
718 | | * '=' outside simulator configuration implies |
719 | | * a single string following as in: |
720 | | * setvar Owner = "The Boss" default |
721 | | */ |
722 | 0 | if ('=' == ch && old_config_style) |
723 | 0 | followedby = FOLLBY_STRING; |
724 | 0 | yytext[0] = (char)ch; |
725 | 0 | yytext[1] = '\0'; |
726 | 0 | goto normal_return; |
727 | 0 | } else |
728 | 0 | lex_ungetch(ch, lex_stack); |
729 | | |
730 | | /* save the position of start of the token */ |
731 | 0 | lex_stack->tokpos = lex_stack->curpos; |
732 | | |
733 | | /* Read in the lexeme */ |
734 | 0 | i = 0; |
735 | 0 | while (EOF != (ch = lex_getch(lex_stack))) { |
736 | |
|
737 | 0 | yytext[i] = (char)ch; |
738 | | |
739 | | /* Break on whitespace or a special character */ |
740 | 0 | if (isspace(ch) || is_EOC(ch) |
741 | 0 | || '"' == ch |
742 | 0 | || (FOLLBY_TOKEN == followedby |
743 | 0 | && is_special(ch))) |
744 | 0 | break; |
745 | | |
746 | | /* Read the rest of the line on reading a start |
747 | | of comment character */ |
748 | 0 | if ('#' == ch) { |
749 | 0 | while (EOF != (ch = lex_getch(lex_stack)) |
750 | 0 | && '\n' != ch) |
751 | 0 | ; /* Null Statement */ |
752 | 0 | break; |
753 | 0 | } |
754 | | |
755 | 0 | i++; |
756 | 0 | if (i >= COUNTOF(yytext)) |
757 | 0 | goto lex_too_long; |
758 | 0 | } |
759 | | /* Pick up all of the string inside between " marks, to |
760 | | * end of line. If we make it to EOL without a |
761 | | * terminating " assume it for them. |
762 | | * |
763 | | * XXX - HMS: I'm not sure we want to assume the closing " |
764 | | */ |
765 | 0 | if ('"' == ch) { |
766 | 0 | instring = TRUE; |
767 | 0 | while (EOF != (ch = lex_getch(lex_stack)) && |
768 | 0 | ch != '"' && ch != '\n') { |
769 | 0 | yytext[i++] = (char)ch; |
770 | 0 | if (i >= COUNTOF(yytext)) |
771 | 0 | goto lex_too_long; |
772 | 0 | } |
773 | | /* |
774 | | * yytext[i] will be pushed back as not part of |
775 | | * this lexeme, but any closing quote should |
776 | | * not be pushed back, so we read another char. |
777 | | */ |
778 | 0 | if ('"' == ch) |
779 | 0 | ch = lex_getch(lex_stack); |
780 | 0 | } |
781 | | /* Pushback the last character read that is not a part |
782 | | * of this lexeme. This fails silently if ch is EOF, |
783 | | * but then the EOF condition persists and is handled on |
784 | | * the next turn by the include stack mechanism. |
785 | | */ |
786 | 0 | lex_ungetch(ch, lex_stack); |
787 | |
|
788 | 0 | yytext[i] = '\0'; |
789 | 0 | } while (i == 0); |
790 | | |
791 | | /* Now return the desired token */ |
792 | | |
793 | | /* First make sure that the parser is *not* expecting a string |
794 | | * as the next token (based on the previous token that was |
795 | | * returned) and that we haven't read a string. |
796 | | */ |
797 | | |
798 | 0 | if (followedby == FOLLBY_TOKEN && !instring) { |
799 | 0 | token = is_keyword(yytext, &followedby); |
800 | 0 | if (token) { |
801 | | /* |
802 | | * T_Server is exceptional as it forces the |
803 | | * following token to be a string in the |
804 | | * non-simulator parts of the configuration, |
805 | | * but in the simulator configuration section, |
806 | | * "server" is followed by "=" which must be |
807 | | * recognized as a token not a string. |
808 | | */ |
809 | 0 | if (T_Server == token && !old_config_style) |
810 | 0 | followedby = FOLLBY_TOKEN; |
811 | 0 | goto normal_return; |
812 | 0 | } else if (is_integer(yytext)) { |
813 | 0 | yylval_was_set = TRUE; |
814 | 0 | errno = 0; |
815 | 0 | if ((yylval.Integer = strtol(yytext, NULL, 10)) == 0 |
816 | 0 | && ((errno == EINVAL) || (errno == ERANGE))) { |
817 | 0 | msyslog(LOG_ERR, |
818 | 0 | "Integer cannot be represented: %s", |
819 | 0 | yytext); |
820 | 0 | if (lex_from_file()) { |
821 | 0 | exit(1); |
822 | 0 | } else { |
823 | | /* force end of parsing */ |
824 | 0 | yylval.Integer = 0; |
825 | 0 | return 0; |
826 | 0 | } |
827 | 0 | } |
828 | 0 | token = T_Integer; |
829 | 0 | goto normal_return; |
830 | 0 | } else if (is_u_int(yytext)) { |
831 | 0 | yylval_was_set = TRUE; |
832 | 0 | if ('0' == yytext[0] && |
833 | 0 | 'x' == tolower((unsigned long)yytext[1])) |
834 | 0 | converted = sscanf(&yytext[2], "%x", |
835 | 0 | &yylval.U_int); |
836 | 0 | else |
837 | 0 | converted = sscanf(yytext, "%u", |
838 | 0 | &yylval.U_int); |
839 | 0 | if (1 != converted) { |
840 | 0 | msyslog(LOG_ERR, |
841 | 0 | "U_int cannot be represented: %s", |
842 | 0 | yytext); |
843 | 0 | if (lex_from_file()) { |
844 | 0 | exit(1); |
845 | 0 | } else { |
846 | | /* force end of parsing */ |
847 | 0 | yylval.Integer = 0; |
848 | 0 | return 0; |
849 | 0 | } |
850 | 0 | } |
851 | 0 | token = T_U_int; |
852 | 0 | goto normal_return; |
853 | 0 | } else if (is_double(yytext)) { |
854 | 0 | yylval_was_set = TRUE; |
855 | 0 | errno = 0; |
856 | 0 | if ((yylval.Double = atof(yytext)) == 0 && errno == ERANGE) { |
857 | 0 | msyslog(LOG_ERR, |
858 | 0 | "Double too large to represent: %s", |
859 | 0 | yytext); |
860 | 0 | exit(1); |
861 | 0 | } else { |
862 | 0 | token = T_Double; |
863 | 0 | goto normal_return; |
864 | 0 | } |
865 | 0 | } else { |
866 | | /* Default: Everything is a string */ |
867 | 0 | yylval_was_set = TRUE; |
868 | 0 | token = create_string_token(yytext); |
869 | 0 | goto normal_return; |
870 | 0 | } |
871 | 0 | } |
872 | | |
873 | | /* |
874 | | * Either followedby is not FOLLBY_TOKEN or this lexeme is part |
875 | | * of a string. Hence, we need to return T_String. |
876 | | * |
877 | | * _Except_ we might have a -4 or -6 flag on a an association |
878 | | * configuration line (server, peer, pool, etc.). |
879 | | * |
880 | | * This is a terrible hack, but the grammar is ambiguous so we |
881 | | * don't have a choice. [SK] |
882 | | * |
883 | | * The ambiguity is in the keyword scanner, not ntp_parser.y. |
884 | | * We do not require server addresses be quoted in ntp.conf, |
885 | | * complicating the scanner's job. To avoid trying (and |
886 | | * failing) to match an IP address or DNS name to a keyword, |
887 | | * the association keywords use FOLLBY_STRING in the keyword |
888 | | * table, which tells the scanner to force the next token to be |
889 | | * a T_String, so it does not try to match a keyword but rather |
890 | | * expects a string when -4/-6 modifiers to server, peer, etc. |
891 | | * are encountered. |
892 | | * restrict -4 and restrict -6 parsing works correctly without |
893 | | * this hack, as restrict uses FOLLBY_TOKEN. [DH] |
894 | | */ |
895 | 0 | if ('-' == yytext[0]) { |
896 | 0 | if ('4' == yytext[1]) { |
897 | 0 | token = T_Ipv4_flag; |
898 | 0 | goto normal_return; |
899 | 0 | } else if ('6' == yytext[1]) { |
900 | 0 | token = T_Ipv6_flag; |
901 | 0 | goto normal_return; |
902 | 0 | } |
903 | 0 | } |
904 | | |
905 | 0 | if (FOLLBY_STRING == followedby) |
906 | 0 | followedby = FOLLBY_TOKEN; |
907 | |
|
908 | 0 | yylval_was_set = TRUE; |
909 | 0 | token = create_string_token(yytext); |
910 | |
|
911 | 0 | normal_return: |
912 | 0 | if (T_EOC == token) |
913 | 0 | DPRINTF(10, ("\t<end of command>\n")); |
914 | 0 | else |
915 | 0 | DPRINTF(10, ("yylex: lexeme '%s' -> %s\n", yytext, |
916 | 0 | token_name(token))); |
917 | |
|
918 | 0 | if (!yylval_was_set) |
919 | 0 | yylval.Integer = token; |
920 | |
|
921 | 0 | return token; |
922 | | |
923 | 0 | lex_too_long: |
924 | | /* |
925 | | * DLH: What is the purpose of the limit of 50? |
926 | | * Is there any reason for yytext[] to be bigger? |
927 | | */ |
928 | 0 | yytext[min(sizeof(yytext) - 1, 50)] = 0; |
929 | 0 | msyslog(LOG_ERR, |
930 | 0 | "configuration item on line %d longer than limit of %lu, began with '%s'", |
931 | 0 | lex_stack->curpos.nline, (u_long)min(sizeof(yytext) - 1, 50), |
932 | 0 | yytext); |
933 | | |
934 | | /* |
935 | | * If we hit the length limit reading the startup configuration |
936 | | * file, abort. |
937 | | */ |
938 | 0 | if (lex_from_file()) |
939 | 0 | exit(sizeof(yytext) - 1); |
940 | | |
941 | | /* |
942 | | * If it's runtime configuration via ntpq :config treat it as |
943 | | * if the configuration text ended before the too-long lexeme, |
944 | | * hostname, or string. |
945 | | */ |
946 | 0 | yylval.Integer = 0; |
947 | 0 | return 0; |
948 | 0 | } |