Coverage Report

Created: 2026-04-12 06:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libpcap/optimize.c
Line
Count
Source
1
/*
2
 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3
 *  The Regents of the University of California.  All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that: (1) source code distributions
7
 * retain the above copyright notice and this paragraph in its entirety, (2)
8
 * distributions including binary code include the above copyright notice and
9
 * this paragraph in its entirety in the documentation or other materials
10
 * provided with the distribution, and (3) all advertising materials mentioning
11
 * features or use of this software display the following acknowledgement:
12
 * ``This product includes software developed by the University of California,
13
 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14
 * the University nor the names of its contributors may be used to endorse
15
 * or promote products derived from this software without specific prior
16
 * written permission.
17
 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18
 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20
 *
21
 *  Optimization module for BPF code intermediate representation.
22
 */
23
24
#include <config.h>
25
26
#include <pcap-types.h>
27
28
#include <stdio.h>
29
#include <stdlib.h>
30
#include <memory.h>
31
#include <setjmp.h>
32
#include <string.h>
33
#include <limits.h> /* for SIZE_MAX */
34
#include <errno.h>
35
36
#include "pcap-int.h"
37
38
#include "gencode.h"
39
#include "optimize.h"
40
#include "diag-control.h"
41
42
#ifdef HAVE_OS_PROTO_H
43
#include "os-proto.h"
44
#endif
45
46
#ifdef BDEBUG
47
/*
48
 * The internal "debug printout" flag for the filter expression optimizer.
49
 * The code to print that stuff is present only if BDEBUG is defined, so
50
 * the flag, and the routine to set it, are defined only if BDEBUG is
51
 * defined.
52
 */
53
static int pcap_optimizer_debug;
54
55
/*
56
 * Routine to set that flag.
57
 *
58
 * This is intended for libpcap developers, not for general use.
59
 * If you want to set these in a program, you'll have to declare this
60
 * routine yourself, with the appropriate DLL import attribute on Windows;
61
 * it's not declared in any header file, and won't be declared in any
62
 * header file provided by libpcap.
63
 */
64
PCAP_API void pcap_set_optimizer_debug(int value);
65
66
PCAP_API_DEF void
67
pcap_set_optimizer_debug(int value)
68
{
69
  pcap_optimizer_debug = value;
70
}
71
72
/*
73
 * The internal "print dot graph" flag for the filter expression optimizer.
74
 * The code to print that stuff is present only if BDEBUG is defined, so
75
 * the flag, and the routine to set it, are defined only if BDEBUG is
76
 * defined.
77
 */
78
static int pcap_print_dot_graph;
79
80
/*
81
 * Routine to set that flag.
82
 *
83
 * This is intended for libpcap developers, not for general use.
84
 * If you want to set these in a program, you'll have to declare this
85
 * routine yourself, with the appropriate DLL import attribute on Windows;
86
 * it's not declared in any header file, and won't be declared in any
87
 * header file provided by libpcap.
88
 */
89
PCAP_API void pcap_set_print_dot_graph(int value);
90
91
PCAP_API_DEF void
92
pcap_set_print_dot_graph(int value)
93
{
94
  pcap_print_dot_graph = value;
95
}
96
97
#endif
98
99
/*
100
 * lowest_set_bit().
101
 *
102
 * Takes a 32-bit integer as an argument.
103
 *
104
 * If handed a non-zero value, returns the index of the lowest set bit,
105
 * counting upwards from zero.
106
 *
107
 * If handed zero, the results are platform- and compiler-dependent.
108
 * Keep it out of the light, don't give it any water, don't feed it
109
 * after midnight, and don't pass zero to it.
110
 *
111
 * This is the same as the count of trailing zeroes in the word.
112
 */
113
#if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
114
  /*
115
   * GCC 3.4 and later; we have __builtin_ctz().
116
   */
117
0
  #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
118
#elif defined(_MSC_VER)
119
  /*
120
   * Visual Studio; we support only 2015 and later, so use
121
   * _BitScanForward().
122
   */
123
#include <intrin.h>
124
125
#ifndef __clang__
126
#pragma intrinsic(_BitScanForward)
127
#endif
128
129
static __forceinline u_int
130
lowest_set_bit(int mask)
131
{
132
  unsigned long bit;
133
134
  /*
135
   * Don't sign-extend mask if long is longer than int.
136
   * (It's currently not, in MSVC, even on 64-bit platforms, but....)
137
   */
138
  if (_BitScanForward(&bit, (unsigned int)mask) == 0)
139
    abort();  /* mask is zero */
140
  return (u_int)bit;
141
}
142
#else
143
  /*
144
   * POSIX.1-2001 says ffs() is in <strings.h>.  Every supported non-Windows OS
145
   * (including Linux with musl libc and uclibc-ng) has the header and (except
146
   * HP-UX) declares the function there.  HP-UX declares the function in
147
   * <string.h>, which has already been included.
148
   */
149
  #include <strings.h>
150
  #define lowest_set_bit(mask)  ((u_int)(ffs((mask)) - 1))
151
#endif
152
153
/*
154
 * Represents a deleted instruction.
155
 */
156
0
#define NOP -1
157
158
/*
159
 * Register numbers for use-def values.
160
 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
161
 * location.  A_ATOM is the accumulator and X_ATOM is the index
162
 * register.
163
 */
164
0
#define A_ATOM BPF_MEMWORDS
165
0
#define X_ATOM (BPF_MEMWORDS+1)
166
167
/*
168
 * This define is used to represent *both* the accumulator and
169
 * x register in use-def computations.
170
 * Currently, the use-def code assumes only one definition per instruction.
171
 */
172
0
#define AX_ATOM N_ATOMS
173
174
/*
175
 * These data structures are used in a Cocke and Schwartz style
176
 * value numbering scheme.  Since the flowgraph is acyclic,
177
 * exit values can be propagated from a node's predecessors
178
 * provided it is uniquely defined.
179
 */
180
struct valnode {
181
  int code;
182
  bpf_u_int32 v0, v1;
183
  int val;    /* the value number */
184
  struct valnode *next;
185
};
186
187
/* Integer constants mapped with the load immediate opcode. */
188
0
#define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
189
190
struct vmapinfo {
191
  int is_const;
192
  bpf_u_int32 const_val;
193
};
194
195
typedef struct {
196
  /*
197
   * Place to longjmp to on an error.
198
   */
199
  jmp_buf top_ctx;
200
201
  /*
202
   * The buffer into which to put error message.
203
   */
204
  char *errbuf;
205
206
  /*
207
   * A flag to indicate that further optimization is needed.
208
   * Iterative passes are continued until a given pass yields no
209
   * code simplification or branch movement.
210
   */
211
  int done;
212
213
  /*
214
   * XXX - detect loops that do nothing but repeated AND/OR pullups
215
   * and edge moves.
216
   * If 100 passes in a row do nothing but that, treat that as a
217
   * sign that we're in a loop that just shuffles in a cycle in
218
   * which each pass just shuffles the code and we eventually
219
   * get back to the original configuration.
220
   *
221
   * XXX - we need a non-heuristic way of detecting, or preventing,
222
   * such a cycle.
223
   */
224
  int non_branch_movement_performed;
225
226
  u_int n_blocks;   /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
227
  struct block **blocks;
228
  u_int n_edges;    /* twice n_blocks, so guaranteed to be > 0 */
229
  struct edge **edges;
230
231
  /*
232
   * A bit vector set representation of the dominators.
233
   * We round up the set size to the next power of two.
234
   */
235
  u_int nodewords;  /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
236
  u_int edgewords;  /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
237
  struct block **levels;
238
  bpf_u_int32 *space;
239
240
0
#define BITS_PER_WORD (8*sizeof(bpf_u_int32))
241
/*
242
 * True if a is in uset {p}
243
 */
244
0
#define SET_MEMBER(p, a) \
245
0
((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
246
247
/*
248
 * Add 'a' to uset p.
249
 */
250
0
#define SET_INSERT(p, a) \
251
0
(p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
252
253
/*
254
 * Delete 'a' from uset p.
255
 */
256
#define SET_DELETE(p, a) \
257
(p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
258
259
/*
260
 * a := a intersect b
261
 * n must be guaranteed to be > 0
262
 */
263
0
#define SET_INTERSECT(a, b, n)\
264
0
{\
265
0
  bpf_u_int32 *_x = a, *_y = b;\
266
0
  u_int _n = n;\
267
0
  do *_x++ &= *_y++; while (--_n != 0);\
268
0
}
269
270
/*
271
 * a := a - b
272
 * n must be guaranteed to be > 0
273
 */
274
#define SET_SUBTRACT(a, b, n)\
275
{\
276
  bpf_u_int32 *_x = a, *_y = b;\
277
  u_int _n = n;\
278
  do *_x++ &=~ *_y++; while (--_n != 0);\
279
}
280
281
/*
282
 * a := a union b
283
 * n must be guaranteed to be > 0
284
 */
285
0
#define SET_UNION(a, b, n)\
286
0
{\
287
0
  bpf_u_int32 *_x = a, *_y = b;\
288
0
  u_int _n = n;\
289
0
  do *_x++ |= *_y++; while (--_n != 0);\
290
0
}
291
292
  uset all_dom_sets;
293
  uset all_closure_sets;
294
  uset all_edge_sets;
295
296
0
#define MODULUS 213
297
  struct valnode *hashtbl[MODULUS];
298
  bpf_u_int32 curval;
299
  bpf_u_int32 maxval;
300
301
  struct vmapinfo *vmap;
302
  struct valnode *vnode_base;
303
  struct valnode *next_vnode;
304
} opt_state_t;
305
306
typedef struct {
307
  /*
308
   * Place to longjmp to on an error.
309
   */
310
  jmp_buf top_ctx;
311
312
  /*
313
   * The buffer into which to put error message.
314
   */
315
  char *errbuf;
316
317
  /*
318
   * Some pointers used to convert the basic block form of the code,
319
   * into the array form that BPF requires.  'fstart' will point to
320
   * the malloc'd array while 'ftail' is used during the recursive
321
   * traversal.
322
   */
323
  struct bpf_insn *fstart;
324
  struct bpf_insn *ftail;
325
} conv_state_t;
326
327
static void opt_init(opt_state_t *, struct icode *);
328
static void opt_cleanup(opt_state_t *);
329
static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
330
    PCAP_PRINTFLIKE(2, 3);
331
static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
332
    PCAP_PRINTFLIKE(2, 3);
333
334
static void intern_blocks(opt_state_t *, struct icode *);
335
336
static void find_inedges(opt_state_t *, struct block *);
337
#ifdef BDEBUG
338
static void opt_dump(opt_state_t *, struct icode *);
339
#endif
340
341
static void
342
find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
343
0
{
344
0
  int level;
345
346
0
  if (isMarked(ic, b))
347
0
    return;
348
349
0
  Mark(ic, b);
350
0
  b->link = 0;
351
352
0
  if (JT(b)) {
353
0
    find_levels_r(opt_state, ic, JT(b));
354
0
    find_levels_r(opt_state, ic, JF(b));
355
0
    level = max(JT(b)->level, JF(b)->level) + 1;
356
0
  } else
357
0
    level = 0;
358
0
  b->level = level;
359
0
  b->link = opt_state->levels[level];
360
0
  opt_state->levels[level] = b;
361
0
}
362
363
/*
364
 * Level graph.  The levels go from 0 at the leaves to
365
 * N_LEVELS at the root.  The opt_state->levels[] array points to the
366
 * first node of the level list, whose elements are linked
367
 * with the 'link' field of the struct block.
368
 */
369
static void
370
find_levels(opt_state_t *opt_state, struct icode *ic)
371
0
{
372
0
  memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
373
0
  unMarkAll(ic);
374
0
  find_levels_r(opt_state, ic, ic->root);
375
0
}
376
377
/*
378
 * Find dominator relationships.
379
 * Assumes graph has been leveled.
380
 */
381
static void
382
find_dom(opt_state_t *opt_state, struct block *root)
383
0
{
384
0
  u_int i;
385
0
  int level;
386
0
  struct block *b;
387
0
  bpf_u_int32 *x;
388
389
  /*
390
   * Initialize sets to contain all nodes.
391
   */
392
0
  x = opt_state->all_dom_sets;
393
  /*
394
   * In opt_init(), we've made sure the product doesn't overflow.
395
   */
396
0
  i = opt_state->n_blocks * opt_state->nodewords;
397
0
  while (i != 0) {
398
0
    --i;
399
0
    *x++ = 0xFFFFFFFFU;
400
0
  }
401
  /* Root starts off empty. */
402
0
  for (i = opt_state->nodewords; i != 0;) {
403
0
    --i;
404
0
    root->dom[i] = 0;
405
0
  }
406
407
  /* root->level is the highest level no found. */
408
0
  for (level = root->level; level >= 0; --level) {
409
0
    for (b = opt_state->levels[level]; b; b = b->link) {
410
0
      SET_INSERT(b->dom, b->id);
411
0
      if (JT(b) == 0)
412
0
        continue;
413
0
      SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
414
0
      SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
415
0
    }
416
0
  }
417
0
}
418
419
static void
420
propedom(opt_state_t *opt_state, struct edge *ep)
421
0
{
422
0
  SET_INSERT(ep->edom, ep->id);
423
0
  if (ep->succ) {
424
0
    SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
425
0
    SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
426
0
  }
427
0
}
428
429
/*
430
 * Compute edge dominators.
431
 * Assumes graph has been leveled and predecessors established.
432
 */
433
static void
434
find_edom(opt_state_t *opt_state, struct block *root)
435
0
{
436
0
  u_int i;
437
0
  uset x;
438
0
  int level;
439
0
  struct block *b;
440
441
0
  x = opt_state->all_edge_sets;
442
  /*
443
   * In opt_init(), we've made sure the product doesn't overflow.
444
   */
445
0
  for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
446
0
    --i;
447
0
    x[i] = 0xFFFFFFFFU;
448
0
  }
449
450
  /* root->level is the highest level no found. */
451
0
  memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
452
0
  memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
453
0
  for (level = root->level; level >= 0; --level) {
454
0
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
455
0
      propedom(opt_state, &b->et);
456
0
      propedom(opt_state, &b->ef);
457
0
    }
458
0
  }
459
0
}
460
461
/*
462
 * Find the backwards transitive closure of the flow graph.  These sets
463
 * are backwards in the sense that we find the set of nodes that reach
464
 * a given node, not the set of nodes that can be reached by a node.
465
 *
466
 * Assumes graph has been leveled.
467
 */
468
static void
469
find_closure(opt_state_t *opt_state, struct block *root)
470
0
{
471
0
  int level;
472
0
  struct block *b;
473
474
  /*
475
   * Initialize sets to contain no nodes.
476
   */
477
0
  memset((char *)opt_state->all_closure_sets, 0,
478
0
        opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
479
480
  /* root->level is the highest level no found. */
481
0
  for (level = root->level; level >= 0; --level) {
482
0
    for (b = opt_state->levels[level]; b; b = b->link) {
483
0
      SET_INSERT(b->closure, b->id);
484
0
      if (JT(b) == 0)
485
0
        continue;
486
0
      SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
487
0
      SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
488
0
    }
489
0
  }
490
0
}
491
492
/*
493
 * Return the register number that is used by s.
494
 *
495
 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
496
 * are used, the scratch memory location's number if a scratch memory
497
 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
498
 *
499
 * The implementation should probably change to an array access.
500
 */
501
static int
502
atomuse(struct stmt *s)
503
0
{
504
0
  int c = s->code;
505
506
0
  if (c == NOP)
507
0
    return -1;
508
509
0
  switch (BPF_CLASS(c)) {
510
511
0
  case BPF_RET:
512
0
    return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
513
0
      (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
514
515
0
  case BPF_LD:
516
0
  case BPF_LDX:
517
    /*
518
     * As there are fewer than 2^31 memory locations,
519
     * s->k should be convertible to int without problems.
520
     */
521
0
    return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
522
0
      (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
523
524
0
  case BPF_ST:
525
0
    return A_ATOM;
526
527
0
  case BPF_STX:
528
0
    return X_ATOM;
529
530
0
  case BPF_JMP:
531
0
  case BPF_ALU:
532
0
    if (BPF_SRC(c) == BPF_X)
533
0
      return AX_ATOM;
534
0
    return A_ATOM;
535
536
0
  case BPF_MISC:
537
0
    return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
538
0
  }
539
0
  abort();
540
  /* NOTREACHED */
541
0
}
542
543
/*
544
 * Return the register number that is defined by 's'.  We assume that
545
 * a single stmt cannot define more than one register.  If no register
546
 * is defined, return -1.
547
 *
548
 * The implementation should probably change to an array access.
549
 */
550
static int
551
atomdef(struct stmt *s)
552
0
{
553
0
  if (s->code == NOP)
554
0
    return -1;
555
556
0
  switch (BPF_CLASS(s->code)) {
557
558
0
  case BPF_LD:
559
0
  case BPF_ALU:
560
0
    return A_ATOM;
561
562
0
  case BPF_LDX:
563
0
    return X_ATOM;
564
565
0
  case BPF_ST:
566
0
  case BPF_STX:
567
0
    return s->k;
568
569
0
  case BPF_MISC:
570
0
    return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
571
0
  }
572
0
  return -1;
573
0
}
574
575
/*
576
 * Compute the sets of registers used, defined, and killed by 'b'.
577
 *
578
 * "Used" means that a statement in 'b' uses the register before any
579
 * statement in 'b' defines it, i.e. it uses the value left in
580
 * that register by a predecessor block of this block.
581
 * "Defined" means that a statement in 'b' defines it.
582
 * "Killed" means that a statement in 'b' defines it before any
583
 * statement in 'b' uses it, i.e. it kills the value left in that
584
 * register by a predecessor block of this block.
585
 */
586
static void
587
compute_local_ud(struct block *b)
588
0
{
589
0
  struct slist *s;
590
0
  atomset def = 0, use = 0, killed = 0;
591
0
  int atom;
592
593
0
  for (s = b->stmts; s; s = s->next) {
594
0
    if (s->s.code == NOP)
595
0
      continue;
596
0
    atom = atomuse(&s->s);
597
0
    if (atom >= 0) {
598
0
      if (atom == AX_ATOM) {
599
0
        if (!ATOMELEM(def, X_ATOM))
600
0
          use |= ATOMMASK(X_ATOM);
601
0
        if (!ATOMELEM(def, A_ATOM))
602
0
          use |= ATOMMASK(A_ATOM);
603
0
      }
604
0
      else if (atom < N_ATOMS) {
605
0
        if (!ATOMELEM(def, atom))
606
0
          use |= ATOMMASK(atom);
607
0
      }
608
0
      else
609
0
        abort();
610
0
    }
611
0
    atom = atomdef(&s->s);
612
0
    if (atom >= 0) {
613
0
      if (!ATOMELEM(use, atom))
614
0
        killed |= ATOMMASK(atom);
615
0
      def |= ATOMMASK(atom);
616
0
    }
617
0
  }
618
0
  if (BPF_CLASS(b->s.code) == BPF_JMP) {
619
    /*
620
     * XXX - what about RET?
621
     */
622
0
    atom = atomuse(&b->s);
623
0
    if (atom >= 0) {
624
0
      if (atom == AX_ATOM) {
625
0
        if (!ATOMELEM(def, X_ATOM))
626
0
          use |= ATOMMASK(X_ATOM);
627
0
        if (!ATOMELEM(def, A_ATOM))
628
0
          use |= ATOMMASK(A_ATOM);
629
0
      }
630
0
      else if (atom < N_ATOMS) {
631
0
        if (!ATOMELEM(def, atom))
632
0
          use |= ATOMMASK(atom);
633
0
      }
634
0
      else
635
0
        abort();
636
0
    }
637
0
  }
638
639
0
  b->def = def;
640
0
  b->kill = killed;
641
0
  b->in_use = use;
642
0
}
643
644
/*
645
 * Assume graph is already leveled.
646
 */
647
static void
648
find_ud(opt_state_t *opt_state, struct block *root)
649
0
{
650
0
  int i, maxlevel;
651
0
  struct block *p;
652
653
  /*
654
   * root->level is the highest level no found;
655
   * count down from there.
656
   */
657
0
  maxlevel = root->level;
658
0
  for (i = maxlevel; i >= 0; --i)
659
0
    for (p = opt_state->levels[i]; p; p = p->link) {
660
0
      compute_local_ud(p);
661
0
      p->out_use = 0;
662
0
    }
663
664
0
  for (i = 1; i <= maxlevel; ++i) {
665
0
    for (p = opt_state->levels[i]; p; p = p->link) {
666
0
      p->out_use |= JT(p)->in_use | JF(p)->in_use;
667
0
      p->in_use |= p->out_use &~ p->kill;
668
0
    }
669
0
  }
670
0
}
671
static void
672
init_val(opt_state_t *opt_state)
673
0
{
674
0
  opt_state->curval = 0;
675
0
  opt_state->next_vnode = opt_state->vnode_base;
676
0
  memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
677
0
  memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
678
0
}
679
680
/*
681
 * Because we really don't have an IR, this stuff is a little messy.
682
 *
683
 * This routine looks in the table of existing value number for a value
684
 * with generated from an operation with the specified opcode and
685
 * the specified values.  If it finds it, it returns its value number,
686
 * otherwise it makes a new entry in the table and returns the
687
 * value number of that entry.
688
 */
689
static bpf_u_int32
690
F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
691
0
{
692
0
  u_int hash;
693
0
  bpf_u_int32 val;
694
0
  struct valnode *p;
695
696
0
  hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
697
0
  hash %= MODULUS;
698
699
0
  for (p = opt_state->hashtbl[hash]; p; p = p->next)
700
0
    if (p->code == code && p->v0 == v0 && p->v1 == v1)
701
0
      return p->val;
702
703
  /*
704
   * Not found.  Allocate a new value, and assign it a new
705
   * value number.
706
   *
707
   * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
708
   * increment it before using it as the new value number, which
709
   * means we never assign VAL_UNKNOWN.
710
   *
711
   * XXX - unless we overflow, but we probably won't have 2^32-1
712
   * values; we treat 32 bits as effectively infinite.
713
   */
714
0
  val = ++opt_state->curval;
715
0
  if (BPF_MODE(code) == BPF_IMM &&
716
0
      (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
717
0
    opt_state->vmap[val].const_val = v0;
718
0
    opt_state->vmap[val].is_const = 1;
719
0
  }
720
0
  p = opt_state->next_vnode++;
721
0
  p->val = val;
722
0
  p->code = code;
723
0
  p->v0 = v0;
724
0
  p->v1 = v1;
725
0
  p->next = opt_state->hashtbl[hash];
726
0
  opt_state->hashtbl[hash] = p;
727
728
0
  return val;
729
0
}
730
731
static inline void
732
vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
733
0
{
734
0
  if (alter && newval != VAL_UNKNOWN && *valp == newval)
735
0
    s->code = NOP;
736
0
  else
737
0
    *valp = newval;
738
0
}
739
740
/*
741
 * Do constant-folding on binary operators.
742
 * (Unary operators are handled elsewhere.)
743
 */
744
static void
745
fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
746
0
{
747
0
  bpf_u_int32 a, b;
748
749
0
  a = opt_state->vmap[v0].const_val;
750
0
  b = opt_state->vmap[v1].const_val;
751
752
0
  switch (BPF_OP(s->code)) {
753
0
  case BPF_ADD:
754
0
    a += b;
755
0
    break;
756
757
0
  case BPF_SUB:
758
0
    a -= b;
759
0
    break;
760
761
0
  case BPF_MUL:
762
0
    a *= b;
763
0
    break;
764
765
0
  case BPF_DIV:
766
0
    if (b == 0)
767
0
      opt_error(opt_state, "division by zero");
768
0
    a /= b;
769
0
    break;
770
771
0
  case BPF_MOD:
772
0
    if (b == 0)
773
0
      opt_error(opt_state, "modulus by zero");
774
0
    a %= b;
775
0
    break;
776
777
0
  case BPF_AND:
778
0
    a &= b;
779
0
    break;
780
781
0
  case BPF_OR:
782
0
    a |= b;
783
0
    break;
784
785
0
  case BPF_XOR:
786
0
    a ^= b;
787
0
    break;
788
789
0
  case BPF_LSH:
790
    /*
791
     * A left shift of more than the width of the type
792
     * is undefined in C; we'll just treat it as shifting
793
     * all the bits out.
794
     *
795
     * XXX - the BPF interpreter doesn't check for this,
796
     * so its behavior is dependent on the behavior of
797
     * the processor on which it's running.  There are
798
     * processors on which it shifts all the bits out
799
     * and processors on which it does no shift.
800
     */
801
0
    if (b < 32)
802
0
      a <<= b;
803
0
    else
804
0
      a = 0;
805
0
    break;
806
807
0
  case BPF_RSH:
808
    /*
809
     * A right shift of more than the width of the type
810
     * is undefined in C; we'll just treat it as shifting
811
     * all the bits out.
812
     *
813
     * XXX - the BPF interpreter doesn't check for this,
814
     * so its behavior is dependent on the behavior of
815
     * the processor on which it's running.  There are
816
     * processors on which it shifts all the bits out
817
     * and processors on which it does no shift.
818
     */
819
0
    if (b < 32)
820
0
      a >>= b;
821
0
    else
822
0
      a = 0;
823
0
    break;
824
825
0
  default:
826
0
    abort();
827
0
  }
828
0
  s->k = a;
829
0
  s->code = BPF_LD|BPF_IMM;
830
0
  opt_state->done = 0;
831
  /*
832
   * XXX - optimizer loop detection.
833
   */
834
0
  opt_state->non_branch_movement_performed = 1;
835
0
}
836
837
static inline struct slist *
838
this_op(struct slist *s)
839
0
{
840
0
  while (s != 0 && s->s.code == NOP)
841
0
    s = s->next;
842
0
  return s;
843
0
}
844
845
static void
846
opt_not(struct block *b)
847
0
{
848
0
  struct block *tmp = JT(b);
849
850
0
  JT(b) = JF(b);
851
0
  JF(b) = tmp;
852
0
}
853
854
static void
855
opt_peep(opt_state_t *opt_state, struct block *b)
856
0
{
857
0
  struct slist *s;
858
0
  struct slist *next, *last;
859
0
  bpf_u_int32 val;
860
861
0
  s = b->stmts;
862
0
  if (s == 0)
863
0
    return;
864
865
0
  last = s;
866
0
  for (/*empty*/; /*empty*/; s = next) {
867
    /*
868
     * Skip over nops.
869
     */
870
0
    s = this_op(s);
871
0
    if (s == 0)
872
0
      break; /* nothing left in the block */
873
874
    /*
875
     * Find the next real instruction after that one
876
     * (skipping nops).
877
     */
878
0
    next = this_op(s->next);
879
0
    if (next == 0)
880
0
      break; /* no next instruction */
881
0
    last = next;
882
883
    /*
884
     * st  M[k] --> st  M[k]
885
     * ldx M[k]   tax
886
     */
887
0
    if (s->s.code == BPF_ST &&
888
0
        next->s.code == (BPF_LDX|BPF_MEM) &&
889
0
        s->s.k == next->s.k) {
890
0
      opt_state->done = 0;
891
0
      next->s.code = BPF_MISC|BPF_TAX;
892
      /*
893
       * XXX - optimizer loop detection.
894
       */
895
0
      opt_state->non_branch_movement_performed = 1;
896
0
    }
897
    /*
898
     * ld  #k --> ldx  #k
899
     * tax      txa
900
     */
901
0
    if (s->s.code == (BPF_LD|BPF_IMM) &&
902
0
        next->s.code == (BPF_MISC|BPF_TAX)) {
903
0
      s->s.code = BPF_LDX|BPF_IMM;
904
0
      next->s.code = BPF_MISC|BPF_TXA;
905
0
      opt_state->done = 0;
906
      /*
907
       * XXX - optimizer loop detection.
908
       */
909
0
      opt_state->non_branch_movement_performed = 1;
910
0
    }
911
    /*
912
     * This is an ugly special case, but it happens
913
     * when you say tcp[k] or udp[k] where k is a constant.
914
     */
915
0
    if (s->s.code == (BPF_LD|BPF_IMM)) {
916
0
      struct slist *add, *tax, *ild;
917
918
      /*
919
       * Check that X isn't used on exit from this
920
       * block (which the optimizer might cause).
921
       * We know the code generator won't generate
922
       * any local dependencies.
923
       */
924
0
      if (ATOMELEM(b->out_use, X_ATOM))
925
0
        continue;
926
927
      /*
928
       * Check that the instruction following the ldi
929
       * is an addx, or it's an ldxms with an addx
930
       * following it (with 0 or more nops between the
931
       * ldxms and addx).
932
       */
933
0
      if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
934
0
        add = next;
935
0
      else
936
0
        add = this_op(next->next);
937
0
      if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
938
0
        continue;
939
940
      /*
941
       * Check that a tax follows that (with 0 or more
942
       * nops between them).
943
       */
944
0
      tax = this_op(add->next);
945
0
      if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
946
0
        continue;
947
948
      /*
949
       * Check that an ild follows that (with 0 or more
950
       * nops between them).
951
       */
952
0
      ild = this_op(tax->next);
953
0
      if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
954
0
          BPF_MODE(ild->s.code) != BPF_IND)
955
0
        continue;
956
      /*
957
       * We want to turn this sequence:
958
       *
959
       * (004) ldi     #0x2   {s}
960
       * (005) ldxms   [14]   {next}  -- optional
961
       * (006) addx     {add}
962
       * (007) tax      {tax}
963
       * (008) ild     [x+0]    {ild}
964
       *
965
       * into this sequence:
966
       *
967
       * (004) nop
968
       * (005) ldxms   [14]
969
       * (006) nop
970
       * (007) nop
971
       * (008) ild     [x+2]
972
       *
973
       * XXX We need to check that X is not
974
       * subsequently used, because we want to change
975
       * what'll be in it after this sequence.
976
       *
977
       * We know we can eliminate the accumulator
978
       * modifications earlier in the sequence since
979
       * it is defined by the last stmt of this sequence
980
       * (i.e., the last statement of the sequence loads
981
       * a value into the accumulator, so we can eliminate
982
       * earlier operations on the accumulator).
983
       */
984
0
      ild->s.k += s->s.k;
985
0
      s->s.code = NOP;
986
0
      add->s.code = NOP;
987
0
      tax->s.code = NOP;
988
0
      opt_state->done = 0;
989
      /*
990
       * XXX - optimizer loop detection.
991
       */
992
0
      opt_state->non_branch_movement_performed = 1;
993
0
    }
994
0
  }
995
  /*
996
   * If the comparison at the end of a block is an equality
997
   * comparison against a constant, and nobody uses the value
998
   * we leave in the A register at the end of a block, and
999
   * the operation preceding the comparison is an arithmetic
1000
   * operation, we can sometime optimize it away.
1001
   */
1002
0
  if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1003
0
      !ATOMELEM(b->out_use, A_ATOM)) {
1004
    /*
1005
     * We can optimize away certain subtractions of the
1006
     * X register.
1007
     */
1008
0
    if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1009
0
      val = b->val[X_ATOM];
1010
0
      if (opt_state->vmap[val].is_const) {
1011
        /*
1012
         * If we have a subtract to do a comparison,
1013
         * and the X register is a known constant,
1014
         * we can merge this value into the
1015
         * comparison:
1016
         *
1017
         * sub x  ->  nop
1018
         * jeq #y jeq #(x+y)
1019
         */
1020
0
        b->s.k += opt_state->vmap[val].const_val;
1021
0
        last->s.code = NOP;
1022
0
        opt_state->done = 0;
1023
        /*
1024
         * XXX - optimizer loop detection.
1025
         */
1026
0
        opt_state->non_branch_movement_performed = 1;
1027
0
      } else if (b->s.k == 0) {
1028
        /*
1029
         * If the X register isn't a constant,
1030
         * and the comparison in the test is
1031
         * against 0, we can compare with the
1032
         * X register, instead:
1033
         *
1034
         * sub x  ->  nop
1035
         * jeq #0 jeq x
1036
         */
1037
0
        last->s.code = NOP;
1038
0
        b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1039
0
        opt_state->done = 0;
1040
        /*
1041
         * XXX - optimizer loop detection.
1042
         */
1043
0
        opt_state->non_branch_movement_performed = 1;
1044
0
      }
1045
0
    }
1046
    /*
1047
     * Likewise, a constant subtract can be simplified:
1048
     *
1049
     * sub #x ->  nop
1050
     * jeq #y ->  jeq #(x+y)
1051
     */
1052
0
    else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1053
0
      last->s.code = NOP;
1054
0
      b->s.k += last->s.k;
1055
0
      opt_state->done = 0;
1056
      /*
1057
       * XXX - optimizer loop detection.
1058
       */
1059
0
      opt_state->non_branch_movement_performed = 1;
1060
0
    }
1061
    /*
1062
     * And, similarly, a constant AND can be simplified
1063
     * if we're testing against 0, i.e.:
1064
     *
1065
     * and #k nop
1066
     * jeq #0  -> jset #k
1067
     */
1068
0
    else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1069
0
        b->s.k == 0) {
1070
0
      b->s.k = last->s.k;
1071
0
      b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1072
0
      last->s.code = NOP;
1073
0
      opt_state->done = 0;
1074
0
      opt_not(b);
1075
      /*
1076
       * XXX - optimizer loop detection.
1077
       */
1078
0
      opt_state->non_branch_movement_performed = 1;
1079
0
    }
1080
0
  }
1081
  /*
1082
   * jset #0        ->   never
1083
   * jset #ffffffff ->   always
1084
   */
1085
0
  if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1086
0
    if (b->s.k == 0)
1087
0
      JT(b) = JF(b);
1088
0
    if (b->s.k == 0xffffffffU)
1089
0
      JF(b) = JT(b);
1090
0
  }
1091
  /*
1092
   * If we're comparing against the index register, and the index
1093
   * register is a known constant, we can just compare against that
1094
   * constant.
1095
   */
1096
0
  val = b->val[X_ATOM];
1097
0
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1098
0
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1099
0
    b->s.code &= ~BPF_X;
1100
0
    b->s.k = v;
1101
0
  }
1102
  /*
1103
   * If the accumulator is a known constant, we can compute the
1104
   * comparison result.
1105
   */
1106
0
  val = b->val[A_ATOM];
1107
0
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1108
0
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1109
0
    switch (BPF_OP(b->s.code)) {
1110
1111
0
    case BPF_JEQ:
1112
0
      v = v == b->s.k;
1113
0
      break;
1114
1115
0
    case BPF_JGT:
1116
0
      v = v > b->s.k;
1117
0
      break;
1118
1119
0
    case BPF_JGE:
1120
0
      v = v >= b->s.k;
1121
0
      break;
1122
1123
0
    case BPF_JSET:
1124
0
      v &= b->s.k;
1125
0
      break;
1126
1127
0
    default:
1128
0
      abort();
1129
0
    }
1130
0
    if (JF(b) != JT(b)) {
1131
0
      opt_state->done = 0;
1132
      /*
1133
       * XXX - optimizer loop detection.
1134
       */
1135
0
      opt_state->non_branch_movement_performed = 1;
1136
0
    }
1137
0
    if (v)
1138
0
      JF(b) = JT(b);
1139
0
    else
1140
0
      JT(b) = JF(b);
1141
0
  }
1142
0
}
1143
1144
/*
1145
 * Compute the symbolic value of expression of 's', and update
1146
 * anything it defines in the value table 'val'.  If 'alter' is true,
1147
 * do various optimizations.  This code would be cleaner if symbolic
1148
 * evaluation and code transformations weren't folded together.
1149
 */
1150
static void
1151
opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1152
0
{
1153
0
  int op;
1154
0
  bpf_u_int32 v;
1155
1156
0
  switch (s->code) {
1157
1158
0
  case BPF_LD|BPF_ABS|BPF_W:
1159
0
  case BPF_LD|BPF_ABS|BPF_H:
1160
0
  case BPF_LD|BPF_ABS|BPF_B:
1161
0
    v = F(opt_state, s->code, s->k, 0L);
1162
0
    vstore(s, &val[A_ATOM], v, alter);
1163
0
    break;
1164
1165
0
  case BPF_LD|BPF_IND|BPF_W:
1166
0
  case BPF_LD|BPF_IND|BPF_H:
1167
0
  case BPF_LD|BPF_IND|BPF_B:
1168
0
    v = val[X_ATOM];
1169
0
    if (alter && opt_state->vmap[v].is_const) {
1170
0
      s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1171
0
      s->k += opt_state->vmap[v].const_val;
1172
0
      v = F(opt_state, s->code, s->k, 0L);
1173
0
      opt_state->done = 0;
1174
      /*
1175
       * XXX - optimizer loop detection.
1176
       */
1177
0
      opt_state->non_branch_movement_performed = 1;
1178
0
    }
1179
0
    else
1180
0
      v = F(opt_state, s->code, s->k, v);
1181
0
    vstore(s, &val[A_ATOM], v, alter);
1182
0
    break;
1183
1184
0
  case BPF_LD|BPF_LEN:
1185
0
    v = F(opt_state, s->code, 0L, 0L);
1186
0
    vstore(s, &val[A_ATOM], v, alter);
1187
0
    break;
1188
1189
0
  case BPF_LD|BPF_IMM:
1190
0
    v = K(s->k);
1191
0
    vstore(s, &val[A_ATOM], v, alter);
1192
0
    break;
1193
1194
0
  case BPF_LDX|BPF_IMM:
1195
0
    v = K(s->k);
1196
0
    vstore(s, &val[X_ATOM], v, alter);
1197
0
    break;
1198
1199
0
  case BPF_LDX|BPF_MSH|BPF_B:
1200
0
    v = F(opt_state, s->code, s->k, 0L);
1201
0
    vstore(s, &val[X_ATOM], v, alter);
1202
0
    break;
1203
1204
0
  case BPF_ALU|BPF_NEG:
1205
0
    if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1206
0
      s->code = BPF_LD|BPF_IMM;
1207
      /*
1208
       * Do this negation as unsigned arithmetic; that's
1209
       * what modern BPF engines do, and it guarantees
1210
       * that all possible values can be negated.  (Yeah,
1211
       * negating 0x80000000, the minimum signed 32-bit
1212
       * two's-complement value, results in 0x80000000,
1213
       * so it's still negative, but we *should* be doing
1214
       * all unsigned arithmetic here, to match what
1215
       * modern BPF engines do.)
1216
       *
1217
       * Express it as 0U - (unsigned value) so that we
1218
       * don't get compiler warnings about negating an
1219
       * unsigned value and don't get UBSan warnings
1220
       * about the result of negating 0x80000000 being
1221
       * undefined.
1222
       */
1223
0
      s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1224
0
      val[A_ATOM] = K(s->k);
1225
0
    }
1226
0
    else
1227
0
      val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1228
0
    break;
1229
1230
0
  case BPF_ALU|BPF_ADD|BPF_K:
1231
0
  case BPF_ALU|BPF_SUB|BPF_K:
1232
0
  case BPF_ALU|BPF_MUL|BPF_K:
1233
0
  case BPF_ALU|BPF_DIV|BPF_K:
1234
0
  case BPF_ALU|BPF_MOD|BPF_K:
1235
0
  case BPF_ALU|BPF_AND|BPF_K:
1236
0
  case BPF_ALU|BPF_OR|BPF_K:
1237
0
  case BPF_ALU|BPF_XOR|BPF_K:
1238
0
  case BPF_ALU|BPF_LSH|BPF_K:
1239
0
  case BPF_ALU|BPF_RSH|BPF_K:
1240
0
    op = BPF_OP(s->code);
1241
0
    if (alter) {
1242
0
      if (s->k == 0) {
1243
        /*
1244
         * Optimize operations where the constant
1245
         * is zero.
1246
         *
1247
         * Don't optimize away "sub #0"
1248
         * as it may be needed later to
1249
         * fixup the generated math code.
1250
         *
1251
         * Fail if we're dividing by zero or taking
1252
         * a modulus by zero.
1253
         */
1254
0
        if (op == BPF_ADD ||
1255
0
            op == BPF_LSH || op == BPF_RSH ||
1256
0
            op == BPF_OR || op == BPF_XOR) {
1257
0
          s->code = NOP;
1258
0
          break;
1259
0
        }
1260
0
        if (op == BPF_MUL || op == BPF_AND) {
1261
0
          s->code = BPF_LD|BPF_IMM;
1262
0
          val[A_ATOM] = K(s->k);
1263
0
          break;
1264
0
        }
1265
0
        if (op == BPF_DIV)
1266
0
          opt_error(opt_state,
1267
0
              "division by zero");
1268
0
        if (op == BPF_MOD)
1269
0
          opt_error(opt_state,
1270
0
              "modulus by zero");
1271
0
      }
1272
0
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1273
0
        fold_op(opt_state, s, val[A_ATOM], K(s->k));
1274
0
        val[A_ATOM] = K(s->k);
1275
0
        break;
1276
0
      }
1277
0
    }
1278
0
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1279
0
    break;
1280
1281
0
  case BPF_ALU|BPF_ADD|BPF_X:
1282
0
  case BPF_ALU|BPF_SUB|BPF_X:
1283
0
  case BPF_ALU|BPF_MUL|BPF_X:
1284
0
  case BPF_ALU|BPF_DIV|BPF_X:
1285
0
  case BPF_ALU|BPF_MOD|BPF_X:
1286
0
  case BPF_ALU|BPF_AND|BPF_X:
1287
0
  case BPF_ALU|BPF_OR|BPF_X:
1288
0
  case BPF_ALU|BPF_XOR|BPF_X:
1289
0
  case BPF_ALU|BPF_LSH|BPF_X:
1290
0
  case BPF_ALU|BPF_RSH|BPF_X:
1291
0
    op = BPF_OP(s->code);
1292
0
    if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1293
0
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1294
0
        fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1295
0
        val[A_ATOM] = K(s->k);
1296
0
      }
1297
0
      else {
1298
0
        s->code = BPF_ALU|BPF_K|op;
1299
0
        s->k = opt_state->vmap[val[X_ATOM]].const_val;
1300
0
        if ((op == BPF_LSH || op == BPF_RSH) &&
1301
0
            s->k > 31)
1302
0
          opt_error(opt_state,
1303
0
              "shift by more than 31 bits");
1304
0
        opt_state->done = 0;
1305
0
        val[A_ATOM] =
1306
0
          F(opt_state, s->code, val[A_ATOM], K(s->k));
1307
        /*
1308
         * XXX - optimizer loop detection.
1309
         */
1310
0
        opt_state->non_branch_movement_performed = 1;
1311
0
      }
1312
0
      break;
1313
0
    }
1314
    /*
1315
     * Check if we're doing something to an accumulator
1316
     * that is 0, and simplify.  This may not seem like
1317
     * much of a simplification but it could open up further
1318
     * optimizations.
1319
     * XXX We could also check for mul by 1, etc.
1320
     */
1321
0
    if (alter && opt_state->vmap[val[A_ATOM]].is_const
1322
0
        && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1323
0
      if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1324
0
        s->code = BPF_MISC|BPF_TXA;
1325
0
        vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1326
0
        break;
1327
0
      }
1328
0
      else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1329
0
         op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1330
0
        s->code = BPF_LD|BPF_IMM;
1331
0
        s->k = 0;
1332
0
        vstore(s, &val[A_ATOM], K(s->k), alter);
1333
0
        break;
1334
0
      }
1335
0
      else if (op == BPF_NEG) {
1336
0
        s->code = NOP;
1337
0
        break;
1338
0
      }
1339
0
    }
1340
0
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1341
0
    break;
1342
1343
0
  case BPF_MISC|BPF_TXA:
1344
0
    vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1345
0
    break;
1346
1347
0
  case BPF_LD|BPF_MEM:
1348
0
    v = val[s->k];
1349
0
    if (alter && opt_state->vmap[v].is_const) {
1350
0
      s->code = BPF_LD|BPF_IMM;
1351
0
      s->k = opt_state->vmap[v].const_val;
1352
0
      opt_state->done = 0;
1353
      /*
1354
       * XXX - optimizer loop detection.
1355
       */
1356
0
      opt_state->non_branch_movement_performed = 1;
1357
0
    }
1358
0
    vstore(s, &val[A_ATOM], v, alter);
1359
0
    break;
1360
1361
0
  case BPF_MISC|BPF_TAX:
1362
0
    vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1363
0
    break;
1364
1365
0
  case BPF_LDX|BPF_MEM:
1366
0
    v = val[s->k];
1367
0
    if (alter && opt_state->vmap[v].is_const) {
1368
0
      s->code = BPF_LDX|BPF_IMM;
1369
0
      s->k = opt_state->vmap[v].const_val;
1370
0
      opt_state->done = 0;
1371
      /*
1372
       * XXX - optimizer loop detection.
1373
       */
1374
0
      opt_state->non_branch_movement_performed = 1;
1375
0
    }
1376
0
    vstore(s, &val[X_ATOM], v, alter);
1377
0
    break;
1378
1379
0
  case BPF_ST:
1380
0
    vstore(s, &val[s->k], val[A_ATOM], alter);
1381
0
    break;
1382
1383
0
  case BPF_STX:
1384
0
    vstore(s, &val[s->k], val[X_ATOM], alter);
1385
0
    break;
1386
0
  }
1387
0
}
1388
1389
static void
1390
deadstmt(opt_state_t *opt_state, struct stmt *s, struct stmt *last[])
1391
0
{
1392
0
  int atom;
1393
1394
0
  atom = atomuse(s);
1395
0
  if (atom >= 0) {
1396
0
    if (atom == AX_ATOM) {
1397
0
      last[X_ATOM] = 0;
1398
0
      last[A_ATOM] = 0;
1399
0
    }
1400
0
    else
1401
0
      last[atom] = 0;
1402
0
  }
1403
0
  atom = atomdef(s);
1404
0
  if (atom >= 0) {
1405
0
    if (last[atom]) {
1406
0
      opt_state->done = 0;
1407
0
      last[atom]->code = NOP;
1408
      /*
1409
       * XXX - optimizer loop detection.
1410
       */
1411
0
      opt_state->non_branch_movement_performed = 1;
1412
0
    }
1413
0
    last[atom] = s;
1414
0
  }
1415
0
}
1416
1417
static void
1418
opt_deadstores(opt_state_t *opt_state, struct block *b)
1419
0
{
1420
0
  struct slist *s;
1421
0
  int atom;
1422
0
  struct stmt *last[N_ATOMS];
1423
1424
0
  memset((char *)last, 0, sizeof last);
1425
1426
0
  for (s = b->stmts; s != 0; s = s->next)
1427
0
    deadstmt(opt_state, &s->s, last);
1428
0
  deadstmt(opt_state, &b->s, last);
1429
1430
0
  for (atom = 0; atom < N_ATOMS; ++atom)
1431
0
    if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1432
0
      last[atom]->code = NOP;
1433
      /*
1434
       * The store was removed as it's dead,
1435
       * so the value stored into now has
1436
       * an unknown value.
1437
       */
1438
0
      vstore(0, &b->val[atom], VAL_UNKNOWN, 0);
1439
0
      opt_state->done = 0;
1440
      /*
1441
       * XXX - optimizer loop detection.
1442
       */
1443
0
      opt_state->non_branch_movement_performed = 1;
1444
0
    }
1445
0
}
1446
1447
static void
1448
opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1449
0
{
1450
0
  struct slist *s;
1451
0
  struct edge *p;
1452
0
  int i;
1453
0
  bpf_u_int32 aval, xval;
1454
1455
#if 0
1456
  for (s = b->stmts; s && s->next; s = s->next)
1457
    if (BPF_CLASS(s->s.code) == BPF_JMP) {
1458
      do_stmts = 0;
1459
      break;
1460
    }
1461
#endif
1462
1463
  /*
1464
   * Initialize the atom values.
1465
   */
1466
0
  p = b->in_edges;
1467
0
  if (p == 0) {
1468
    /*
1469
     * We have no predecessors, so everything is undefined
1470
     * upon entry to this block.
1471
     */
1472
0
    memset((char *)b->val, 0, sizeof(b->val));
1473
0
  } else {
1474
    /*
1475
     * Inherit values from our predecessors.
1476
     *
1477
     * First, get the values from the predecessor along the
1478
     * first edge leading to this node.
1479
     */
1480
0
    memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1481
    /*
1482
     * Now look at all the other nodes leading to this node.
1483
     * If, for the predecessor along that edge, a register
1484
     * has a different value from the one we have (i.e.,
1485
     * control paths are merging, and the merging paths
1486
     * assign different values to that register), give the
1487
     * register the undefined value of 0.
1488
     */
1489
0
    while ((p = p->next) != NULL) {
1490
0
      for (i = 0; i < N_ATOMS; ++i)
1491
0
        if (b->val[i] != p->pred->val[i])
1492
0
          b->val[i] = 0;
1493
0
    }
1494
0
  }
1495
0
  aval = b->val[A_ATOM];
1496
0
  xval = b->val[X_ATOM];
1497
0
  for (s = b->stmts; s; s = s->next)
1498
0
    opt_stmt(opt_state, &s->s, b->val, do_stmts);
1499
1500
  /*
1501
   * This is a special case: if we don't use anything from this
1502
   * block, and we load the accumulator or index register with a
1503
   * value that is already there, or if this block is a return,
1504
   * eliminate all the statements.
1505
   *
1506
   * XXX - what if it does a store?  Presumably that falls under
1507
   * the heading of "if we don't use anything from this block",
1508
   * i.e., if we use any memory location set to a different
1509
   * value by this block, then we use something from this block.
1510
   *
1511
   * XXX - why does it matter whether we use anything from this
1512
   * block?  If the accumulator or index register doesn't change
1513
   * its value, isn't that OK even if we use that value?
1514
   *
1515
   * XXX - if we load the accumulator with a different value,
1516
   * and the block ends with a conditional branch, we obviously
1517
   * can't eliminate it, as the branch depends on that value.
1518
   * For the index register, the conditional branch only depends
1519
   * on the index register value if the test is against the index
1520
   * register value rather than a constant; if nothing uses the
1521
   * value we put into the index register, and we're not testing
1522
   * against the index register's value, and there aren't any
1523
   * other problems that would keep us from eliminating this
1524
   * block, can we eliminate it?
1525
   */
1526
0
  if (do_stmts &&
1527
0
      ((b->out_use == 0 &&
1528
0
        aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1529
0
        xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1530
0
       BPF_CLASS(b->s.code) == BPF_RET)) {
1531
0
    if (b->stmts != 0) {
1532
0
      b->stmts = 0;
1533
0
      opt_state->done = 0;
1534
      /*
1535
       * XXX - optimizer loop detection.
1536
       */
1537
0
      opt_state->non_branch_movement_performed = 1;
1538
0
    }
1539
0
  } else {
1540
0
    opt_peep(opt_state, b);
1541
0
    opt_deadstores(opt_state, b);
1542
0
  }
1543
  /*
1544
   * Set up values for branch optimizer.
1545
   */
1546
0
  if (BPF_SRC(b->s.code) == BPF_K)
1547
0
    b->oval = K(b->s.k);
1548
0
  else
1549
0
    b->oval = b->val[X_ATOM];
1550
0
  b->et.code = b->s.code;
1551
0
  b->ef.code = -b->s.code;
1552
0
}
1553
1554
/*
1555
 * Return true if any register that is used on exit from 'succ', has
1556
 * an exit value that is different from the corresponding exit value
1557
 * from 'b'.
1558
 */
1559
static int
1560
use_conflict(struct block *b, struct block *succ)
1561
0
{
1562
0
  int atom;
1563
0
  atomset use = succ->out_use;
1564
1565
0
  if (use == 0)
1566
0
    return 0;
1567
1568
0
  for (atom = 0; atom < N_ATOMS; ++atom)
1569
0
    if (ATOMELEM(use, atom))
1570
0
      if (b->val[atom] != succ->val[atom])
1571
0
        return 1;
1572
0
  return 0;
1573
0
}
1574
1575
/*
1576
 * Given a block that is the successor of an edge, and an edge that
1577
 * dominates that edge, return either a pointer to a child of that
1578
 * block (a block to which that block jumps) if that block is a
1579
 * candidate to replace the successor of the latter edge or NULL
1580
 * if neither of the children of the first block are candidates.
1581
 */
1582
static struct block *
1583
fold_edge(struct block *child, struct edge *ep)
1584
0
{
1585
0
  int sense;
1586
0
  bpf_u_int32 aval0, aval1, oval0, oval1;
1587
0
  int code = ep->code;
1588
1589
0
  if (code < 0) {
1590
    /*
1591
     * This edge is a "branch if false" edge.
1592
     */
1593
0
    code = -code;
1594
0
    sense = 0;
1595
0
  } else {
1596
    /*
1597
     * This edge is a "branch if true" edge.
1598
     */
1599
0
    sense = 1;
1600
0
  }
1601
1602
  /*
1603
   * If the opcode for the branch at the end of the block we
1604
   * were handed isn't the same as the opcode for the branch
1605
   * to which the edge we were handed corresponds, the tests
1606
   * for those branches aren't testing the same conditions,
1607
   * so the blocks to which the first block branches aren't
1608
   * candidates to replace the successor of the edge.
1609
   */
1610
0
  if (child->s.code != code)
1611
0
    return 0;
1612
1613
0
  aval0 = child->val[A_ATOM];
1614
0
  oval0 = child->oval;
1615
0
  aval1 = ep->pred->val[A_ATOM];
1616
0
  oval1 = ep->pred->oval;
1617
1618
  /*
1619
   * If the A register value on exit from the successor block
1620
   * isn't the same as the A register value on exit from the
1621
   * predecessor of the edge, the blocks to which the first
1622
   * block branches aren't candidates to replace the successor
1623
   * of the edge.
1624
   */
1625
0
  if (aval0 != aval1)
1626
0
    return 0;
1627
1628
0
  if (oval0 == oval1)
1629
    /*
1630
     * The operands of the branch instructions are
1631
     * identical, so the branches are testing the
1632
     * same condition, and the result is true if a true
1633
     * branch was taken to get here, otherwise false.
1634
     */
1635
0
    return sense ? JT(child) : JF(child);
1636
1637
0
  if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1638
    /*
1639
     * At this point, we only know the comparison if we
1640
     * came down the true branch, and it was an equality
1641
     * comparison with a constant.
1642
     *
1643
     * I.e., if we came down the true branch, and the branch
1644
     * was an equality comparison with a constant, we know the
1645
     * accumulator contains that constant.  If we came down
1646
     * the false branch, or the comparison wasn't with a
1647
     * constant, we don't know what was in the accumulator.
1648
     *
1649
     * We rely on the fact that distinct constants have distinct
1650
     * value numbers.
1651
     */
1652
0
    return JF(child);
1653
1654
0
  return 0;
1655
0
}
1656
1657
/*
1658
 * If we can make this edge go directly to a child of the edge's current
1659
 * successor, do so.
1660
 */
1661
static void
1662
opt_j(opt_state_t *opt_state, struct edge *ep)
1663
0
{
1664
0
  u_int i, k;
1665
0
  struct block *target;
1666
1667
  /*
1668
   * Does this edge go to a block where, if the test
1669
   * at the end of it succeeds, it goes to a block
1670
   * that's a leaf node of the DAG, i.e. a return
1671
   * statement?
1672
   * If so, there's nothing to optimize.
1673
   */
1674
0
  if (JT(ep->succ) == 0)
1675
0
    return;
1676
1677
  /*
1678
   * Does this edge go to a block that goes, in turn, to
1679
   * the same block regardless of whether the test at the
1680
   * end succeeds or fails?
1681
   */
1682
0
  if (JT(ep->succ) == JF(ep->succ)) {
1683
    /*
1684
     * Common branch targets can be eliminated, provided
1685
     * there is no data dependency.
1686
     *
1687
     * Check whether any register used on exit from the
1688
     * block to which the successor of this edge goes
1689
     * has a value at that point that's different from
1690
     * the value it has on exit from the predecessor of
1691
     * this edge.  If not, the predecessor of this edge
1692
     * can just go to the block to which the successor
1693
     * of this edge goes, bypassing the successor of this
1694
     * edge, as the successor of this edge isn't doing
1695
     * any calculations whose results are different
1696
     * from what the blocks before it did and isn't
1697
     * doing any tests the results of which matter.
1698
     */
1699
0
    if (!use_conflict(ep->pred, JT(ep->succ))) {
1700
      /*
1701
       * No, there isn't.
1702
       * Make this edge go to the block to
1703
       * which the successor of that edge
1704
       * goes.
1705
       */
1706
0
      opt_state->done = 0;
1707
0
      ep->succ = JT(ep->succ);
1708
      /*
1709
       * XXX - optimizer loop detection.
1710
       */
1711
0
      opt_state->non_branch_movement_performed = 1;
1712
0
    }
1713
0
  }
1714
  /*
1715
   * For each edge dominator that matches the successor of this
1716
   * edge, promote the edge successor to the its grandchild.
1717
   *
1718
   * XXX We violate the set abstraction here in favor a reasonably
1719
   * efficient loop.
1720
   */
1721
0
 top:
1722
0
  for (i = 0; i < opt_state->edgewords; ++i) {
1723
    /* i'th word in the bitset of dominators */
1724
0
    bpf_u_int32 x = ep->edom[i];
1725
1726
0
    while (x != 0) {
1727
      /* Find the next dominator in that word and mark it as found */
1728
0
      k = lowest_set_bit(x);
1729
0
      x &=~ ((bpf_u_int32)1 << k);
1730
0
      k += i * BITS_PER_WORD;
1731
1732
0
      target = fold_edge(ep->succ, opt_state->edges[k]);
1733
      /*
1734
       * We have a candidate to replace the successor
1735
       * of ep.
1736
       *
1737
       * Check that there is no data dependency between
1738
       * nodes that will be violated if we move the edge;
1739
       * i.e., if any register used on exit from the
1740
       * candidate has a value at that point different
1741
       * from the value it has when we exit the
1742
       * predecessor of that edge, there's a data
1743
       * dependency that will be violated.
1744
       */
1745
0
      if (target != 0 && !use_conflict(ep->pred, target)) {
1746
        /*
1747
         * It's safe to replace the successor of
1748
         * ep; do so, and note that we've made
1749
         * at least one change.
1750
         *
1751
         * XXX - this is one of the operations that
1752
         * happens when the optimizer gets into
1753
         * one of those infinite loops.
1754
         */
1755
0
        opt_state->done = 0;
1756
0
        ep->succ = target;
1757
0
        if (JT(target) != 0)
1758
          /*
1759
           * Start over unless we hit a leaf.
1760
           */
1761
0
          goto top;
1762
0
        return;
1763
0
      }
1764
0
    }
1765
0
  }
1766
0
}
1767
1768
/*
1769
 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1770
 * "Predicate Assertion Propagation" in the BPF+ paper?
1771
 *
1772
 * Note that this looks at block dominators, not edge dominators.
1773
 * Don't think so.
1774
 *
1775
 * "A or B" compiles into
1776
 *
1777
 *          A
1778
 *       t / \ f
1779
 *        /   B
1780
 *       / t / \ f
1781
 *      \   /
1782
 *       \ /
1783
 *        X
1784
 *
1785
 *
1786
 */
1787
static void
1788
or_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
1789
0
{
1790
0
  bpf_u_int32 val;
1791
0
  int at_top;
1792
0
  struct block *pull;
1793
0
  struct block **diffp, **samep;
1794
0
  struct edge *ep;
1795
1796
0
  ep = b->in_edges;
1797
0
  if (ep == 0)
1798
0
    return;
1799
1800
  /*
1801
   * Make sure each predecessor loads the same value.
1802
   * XXX why?
1803
   */
1804
0
  val = ep->pred->val[A_ATOM];
1805
0
  for (ep = ep->next; ep != 0; ep = ep->next)
1806
0
    if (val != ep->pred->val[A_ATOM])
1807
0
      return;
1808
1809
  /*
1810
   * For the first edge in the list of edges coming into this block,
1811
   * see whether the predecessor of that edge comes here via a true
1812
   * branch or a false branch.
1813
   */
1814
0
  if (JT(b->in_edges->pred) == b)
1815
0
    diffp = &JT(b->in_edges->pred);  /* jt */
1816
0
  else
1817
0
    diffp = &JF(b->in_edges->pred);  /* jf */
1818
1819
  /*
1820
   * diffp is a pointer to a pointer to the block.
1821
   *
1822
   * Go down the false chain looking as far as you can,
1823
   * making sure that each jump-compare is doing the
1824
   * same as the original block.
1825
   *
1826
   * If you reach the bottom before you reach a
1827
   * different jump-compare, just exit.  There's nothing
1828
   * to do here.  XXX - no, this version is checking for
1829
   * the value leaving the block; that's from the BPF+
1830
   * pullup routine.
1831
   */
1832
0
  at_top = 1;
1833
0
  for (;;) {
1834
    /*
1835
     * Done if that's not going anywhere XXX
1836
     */
1837
0
    if (*diffp == 0)
1838
0
      return;
1839
1840
    /*
1841
     * Done if that predecessor blah blah blah isn't
1842
     * going the same place we're going XXX
1843
     *
1844
     * Does the true edge of this block point to the same
1845
     * location as the true edge of b?
1846
     */
1847
0
    if (JT(*diffp) != JT(b))
1848
0
      return;
1849
1850
    /*
1851
     * Done if this node isn't a dominator of that
1852
     * node blah blah blah XXX
1853
     *
1854
     * Does b dominate diffp?
1855
     */
1856
0
    if (!SET_MEMBER((*diffp)->dom, b->id))
1857
0
      return;
1858
1859
    /*
1860
     * Break out of the loop if that node's value of A
1861
     * isn't the value of A above XXX
1862
     */
1863
0
    if ((*diffp)->val[A_ATOM] != val)
1864
0
      break;
1865
1866
    /*
1867
     * Get the JF for that node XXX
1868
     * Go down the false path.
1869
     */
1870
0
    diffp = &JF(*diffp);
1871
0
    at_top = 0;
1872
0
  }
1873
1874
  /*
1875
   * Now that we've found a different jump-compare in a chain
1876
   * below b, search further down until we find another
1877
   * jump-compare that looks at the original value.  This
1878
   * jump-compare should get pulled up.  XXX again we're
1879
   * comparing values not jump-compares.
1880
   */
1881
0
  samep = &JF(*diffp);
1882
0
  for (;;) {
1883
    /*
1884
     * Done if that's not going anywhere XXX
1885
     */
1886
0
    if (*samep == 0)
1887
0
      return;
1888
1889
    /*
1890
     * Done if that predecessor blah blah blah isn't
1891
     * going the same place we're going XXX
1892
     */
1893
0
    if (JT(*samep) != JT(b))
1894
0
      return;
1895
1896
    /*
1897
     * Done if this node isn't a dominator of that
1898
     * node blah blah blah XXX
1899
     *
1900
     * Does b dominate samep?
1901
     */
1902
0
    if (!SET_MEMBER((*samep)->dom, b->id))
1903
0
      return;
1904
1905
    /*
1906
     * Break out of the loop if that node's value of A
1907
     * is the value of A above XXX
1908
     */
1909
0
    if ((*samep)->val[A_ATOM] == val)
1910
0
      break;
1911
1912
    /* XXX Need to check that there are no data dependencies
1913
       between dp0 and dp1.  Currently, the code generator
1914
       will not produce such dependencies. */
1915
0
    samep = &JF(*samep);
1916
0
  }
1917
#ifdef notdef
1918
  /* XXX This doesn't cover everything. */
1919
  for (i = 0; i < N_ATOMS; ++i)
1920
    if ((*samep)->val[i] != pred->val[i])
1921
      return;
1922
#endif
1923
  /* Pull up the node. */
1924
0
  pull = *samep;
1925
0
  *samep = JF(pull);
1926
0
  JF(pull) = *diffp;
1927
1928
  /*
1929
   * At the top of the chain, each predecessor needs to point at the
1930
   * pulled up node.  Inside the chain, there is only one predecessor
1931
   * to worry about.
1932
   */
1933
0
  if (at_top) {
1934
0
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
1935
0
      if (JT(ep->pred) == b)
1936
0
        JT(ep->pred) = pull;
1937
0
      else
1938
0
        JF(ep->pred) = pull;
1939
0
    }
1940
0
  }
1941
0
  else
1942
0
    *diffp = pull;
1943
1944
  /*
1945
   * XXX - this is one of the operations that happens when the
1946
   * optimizer gets into one of those infinite loops.
1947
   */
1948
0
  opt_state->done = 0;
1949
1950
  /*
1951
   * Recompute dominator sets as control flow graph has changed.
1952
   */
1953
0
  find_dom(opt_state, root);
1954
0
}
1955
1956
static void
1957
and_pullup(opt_state_t *opt_state, struct block *b, struct block *root)
1958
0
{
1959
0
  bpf_u_int32 val;
1960
0
  int at_top;
1961
0
  struct block *pull;
1962
0
  struct block **diffp, **samep;
1963
0
  struct edge *ep;
1964
1965
0
  ep = b->in_edges;
1966
0
  if (ep == 0)
1967
0
    return;
1968
1969
  /*
1970
   * Make sure each predecessor loads the same value.
1971
   */
1972
0
  val = ep->pred->val[A_ATOM];
1973
0
  for (ep = ep->next; ep != 0; ep = ep->next)
1974
0
    if (val != ep->pred->val[A_ATOM])
1975
0
      return;
1976
1977
0
  if (JT(b->in_edges->pred) == b)
1978
0
    diffp = &JT(b->in_edges->pred);
1979
0
  else
1980
0
    diffp = &JF(b->in_edges->pred);
1981
1982
0
  at_top = 1;
1983
0
  for (;;) {
1984
0
    if (*diffp == 0)
1985
0
      return;
1986
1987
0
    if (JF(*diffp) != JF(b))
1988
0
      return;
1989
1990
0
    if (!SET_MEMBER((*diffp)->dom, b->id))
1991
0
      return;
1992
1993
0
    if ((*diffp)->val[A_ATOM] != val)
1994
0
      break;
1995
1996
0
    diffp = &JT(*diffp);
1997
0
    at_top = 0;
1998
0
  }
1999
0
  samep = &JT(*diffp);
2000
0
  for (;;) {
2001
0
    if (*samep == 0)
2002
0
      return;
2003
2004
0
    if (JF(*samep) != JF(b))
2005
0
      return;
2006
2007
0
    if (!SET_MEMBER((*samep)->dom, b->id))
2008
0
      return;
2009
2010
0
    if ((*samep)->val[A_ATOM] == val)
2011
0
      break;
2012
2013
    /* XXX Need to check that there are no data dependencies
2014
       between diffp and samep.  Currently, the code generator
2015
       will not produce such dependencies. */
2016
0
    samep = &JT(*samep);
2017
0
  }
2018
#ifdef notdef
2019
  /* XXX This doesn't cover everything. */
2020
  for (i = 0; i < N_ATOMS; ++i)
2021
    if ((*samep)->val[i] != pred->val[i])
2022
      return;
2023
#endif
2024
  /* Pull up the node. */
2025
0
  pull = *samep;
2026
0
  *samep = JT(pull);
2027
0
  JT(pull) = *diffp;
2028
2029
  /*
2030
   * At the top of the chain, each predecessor needs to point at the
2031
   * pulled up node.  Inside the chain, there is only one predecessor
2032
   * to worry about.
2033
   */
2034
0
  if (at_top) {
2035
0
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
2036
0
      if (JT(ep->pred) == b)
2037
0
        JT(ep->pred) = pull;
2038
0
      else
2039
0
        JF(ep->pred) = pull;
2040
0
    }
2041
0
  }
2042
0
  else
2043
0
    *diffp = pull;
2044
2045
  /*
2046
   * XXX - this is one of the operations that happens when the
2047
   * optimizer gets into one of those infinite loops.
2048
   */
2049
0
  opt_state->done = 0;
2050
2051
  /*
2052
   * Recompute dominator sets as control flow graph has changed.
2053
   */
2054
0
  find_dom(opt_state, root);
2055
0
}
2056
2057
static void
2058
opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2059
0
{
2060
0
  int i, maxlevel;
2061
0
  struct block *p;
2062
2063
0
  init_val(opt_state);
2064
0
  maxlevel = ic->root->level;
2065
2066
0
  find_inedges(opt_state, ic->root);
2067
0
  for (i = maxlevel; i >= 0; --i)
2068
0
    for (p = opt_state->levels[i]; p; p = p->link)
2069
0
      opt_blk(opt_state, p, do_stmts);
2070
2071
0
  if (do_stmts)
2072
    /*
2073
     * No point trying to move branches; it can't possibly
2074
     * make a difference at this point.
2075
     *
2076
     * XXX - this might be after we detect a loop where
2077
     * we were just looping infinitely moving branches
2078
     * in such a fashion that we went through two or more
2079
     * versions of the machine code, eventually returning
2080
     * to the first version.  (We're really not doing a
2081
     * full loop detection, we're just testing for two
2082
     * passes in a row where we do nothing but
2083
     * move branches.)
2084
     */
2085
0
    return;
2086
2087
  /*
2088
   * Is this what the BPF+ paper describes in sections 6.1.1,
2089
   * 6.1.2, and 6.1.3?
2090
   */
2091
0
  for (i = 1; i <= maxlevel; ++i) {
2092
0
    for (p = opt_state->levels[i]; p; p = p->link) {
2093
0
      opt_j(opt_state, &p->et);
2094
0
      opt_j(opt_state, &p->ef);
2095
0
    }
2096
0
  }
2097
2098
0
  find_inedges(opt_state, ic->root);
2099
0
  for (i = 1; i <= maxlevel; ++i) {
2100
0
    for (p = opt_state->levels[i]; p; p = p->link) {
2101
0
      or_pullup(opt_state, p, ic->root);
2102
0
      and_pullup(opt_state, p, ic->root);
2103
0
    }
2104
0
  }
2105
0
}
2106
2107
static inline void
2108
link_inedge(struct edge *parent, struct block *child)
2109
0
{
2110
0
  parent->next = child->in_edges;
2111
0
  child->in_edges = parent;
2112
0
}
2113
2114
static void
2115
find_inedges(opt_state_t *opt_state, struct block *root)
2116
0
{
2117
0
  u_int i;
2118
0
  int level;
2119
0
  struct block *b;
2120
2121
0
  for (i = 0; i < opt_state->n_blocks; ++i)
2122
0
    opt_state->blocks[i]->in_edges = 0;
2123
2124
  /*
2125
   * Traverse the graph, adding each edge to the predecessor
2126
   * list of its successors.  Skip the leaves (i.e. level 0).
2127
   */
2128
0
  for (level = root->level; level > 0; --level) {
2129
0
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
2130
0
      link_inedge(&b->et, JT(b));
2131
0
      link_inedge(&b->ef, JF(b));
2132
0
    }
2133
0
  }
2134
0
}
2135
2136
static void
2137
opt_root(struct block **b)
2138
0
{
2139
0
  struct slist *tmp, *s;
2140
2141
0
  s = (*b)->stmts;
2142
0
  (*b)->stmts = 0;
2143
0
  while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2144
0
    *b = JT(*b);
2145
2146
0
  tmp = (*b)->stmts;
2147
0
  if (tmp != 0)
2148
0
    sappend(s, tmp);
2149
0
  (*b)->stmts = s;
2150
2151
  /*
2152
   * If the root node is a return, then there is no
2153
   * point executing any statements (since the bpf machine
2154
   * has no side effects).
2155
   */
2156
0
  if (BPF_CLASS((*b)->s.code) == BPF_RET)
2157
0
    (*b)->stmts = 0;
2158
0
}
2159
2160
static void
2161
opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2162
0
{
2163
2164
#ifdef BDEBUG
2165
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2166
    printf("%s(root, %d) begin\n", __func__, do_stmts);
2167
    opt_dump(opt_state, ic);
2168
  }
2169
#endif
2170
2171
  /*
2172
   * XXX - optimizer loop detection.
2173
   */
2174
0
  int loop_count = 0;
2175
0
  for (;;) {
2176
    /*
2177
     * XXX - optimizer loop detection.
2178
     */
2179
0
    opt_state->non_branch_movement_performed = 0;
2180
0
    opt_state->done = 1;
2181
0
    find_levels(opt_state, ic);
2182
0
    find_dom(opt_state, ic->root);
2183
0
    find_closure(opt_state, ic->root);
2184
0
    find_ud(opt_state, ic->root);
2185
0
    find_edom(opt_state, ic->root);
2186
0
    opt_blks(opt_state, ic, do_stmts);
2187
#ifdef BDEBUG
2188
    if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2189
      printf("%s(root, %d) bottom, done=%d\n", __func__, do_stmts, opt_state->done);
2190
      opt_dump(opt_state, ic);
2191
    }
2192
#endif
2193
2194
    /*
2195
     * Was anything done in this optimizer pass?
2196
     */
2197
0
    if (opt_state->done) {
2198
      /*
2199
       * No, so we've reached a fixed point.
2200
       * We're done.
2201
       */
2202
0
      break;
2203
0
    }
2204
2205
    /*
2206
     * XXX - was anything done other than branch movement
2207
     * in this pass?
2208
     */
2209
0
    if (opt_state->non_branch_movement_performed) {
2210
      /*
2211
       * Yes.  Clear any loop-detection counter;
2212
       * we're making some form of progress (assuming
2213
       * we can't get into a cycle doing *other*
2214
       * optimizations...).
2215
       */
2216
0
      loop_count = 0;
2217
0
    } else {
2218
      /*
2219
       * No - increment the counter, and quit if
2220
       * it's up to 100.
2221
       */
2222
0
      loop_count++;
2223
0
      if (loop_count >= 100) {
2224
        /*
2225
         * We've done nothing but branch movement
2226
         * for 100 passes; we're probably
2227
         * in a cycle and will never reach a
2228
         * fixed point.
2229
         *
2230
         * XXX - yes, we really need a non-
2231
         * heuristic way of detecting a cycle.
2232
         */
2233
0
        opt_state->done = 1;
2234
0
        break;
2235
0
      }
2236
0
    }
2237
0
  }
2238
0
}
2239
2240
/*
2241
 * Optimize the filter code in its dag representation.
2242
 * Return 0 on success, -1 on error.
2243
 */
2244
int
2245
bpf_optimize(struct icode *ic, char *errbuf)
2246
0
{
2247
0
  opt_state_t opt_state;
2248
2249
0
  memset(&opt_state, 0, sizeof(opt_state));
2250
0
  opt_state.errbuf = errbuf;
2251
0
  if (setjmp(opt_state.top_ctx)) {
2252
0
    opt_cleanup(&opt_state);
2253
0
    return -1;
2254
0
  }
2255
0
  opt_init(&opt_state, ic);
2256
0
  opt_loop(&opt_state, ic, 0);
2257
0
  opt_loop(&opt_state, ic, 1);
2258
0
  intern_blocks(&opt_state, ic);
2259
#ifdef BDEBUG
2260
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2261
    printf("after intern_blocks()\n");
2262
    opt_dump(&opt_state, ic);
2263
  }
2264
#endif
2265
0
  opt_root(&ic->root);
2266
#ifdef BDEBUG
2267
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2268
    printf("after opt_root()\n");
2269
    opt_dump(&opt_state, ic);
2270
  }
2271
#endif
2272
0
  opt_cleanup(&opt_state);
2273
0
  return 0;
2274
0
}
2275
2276
static void
2277
make_marks(struct icode *ic, struct block *p)
2278
0
{
2279
0
  if (!isMarked(ic, p)) {
2280
0
    Mark(ic, p);
2281
0
    if (BPF_CLASS(p->s.code) != BPF_RET) {
2282
0
      make_marks(ic, JT(p));
2283
0
      make_marks(ic, JF(p));
2284
0
    }
2285
0
  }
2286
0
}
2287
2288
/*
2289
 * Mark code array such that isMarked(ic->cur_mark, i) is true
2290
 * only for nodes that are alive.
2291
 */
2292
static void
2293
mark_code(struct icode *ic)
2294
0
{
2295
0
  ic->cur_mark += 1;
2296
0
  make_marks(ic, ic->root);
2297
0
}
2298
2299
/*
2300
 * True iff the two stmt lists load the same value from the packet into
2301
 * the accumulator.
2302
 */
2303
static int
2304
eq_slist(struct slist *x, struct slist *y)
2305
0
{
2306
0
  for (;;) {
2307
0
    while (x && x->s.code == NOP)
2308
0
      x = x->next;
2309
0
    while (y && y->s.code == NOP)
2310
0
      y = y->next;
2311
0
    if (x == 0)
2312
0
      return y == 0;
2313
0
    if (y == 0)
2314
0
      return x == 0;
2315
0
    if (x->s.code != y->s.code || x->s.k != y->s.k)
2316
0
      return 0;
2317
0
    x = x->next;
2318
0
    y = y->next;
2319
0
  }
2320
0
}
2321
2322
static inline int
2323
eq_blk(struct block *b0, struct block *b1)
2324
0
{
2325
0
  if (b0->s.code == b1->s.code &&
2326
0
      b0->s.k == b1->s.k &&
2327
0
      b0->et.succ == b1->et.succ &&
2328
0
      b0->ef.succ == b1->ef.succ)
2329
0
    return eq_slist(b0->stmts, b1->stmts);
2330
0
  return 0;
2331
0
}
2332
2333
static void
2334
intern_blocks(opt_state_t *opt_state, struct icode *ic)
2335
0
{
2336
0
  struct block *p;
2337
0
  u_int i, j;
2338
0
  int done1; /* don't shadow global */
2339
0
 top:
2340
0
  done1 = 1;
2341
0
  for (i = 0; i < opt_state->n_blocks; ++i)
2342
0
    opt_state->blocks[i]->link = 0;
2343
2344
0
  mark_code(ic);
2345
2346
0
  for (i = opt_state->n_blocks - 1; i != 0; ) {
2347
0
    --i;
2348
0
    if (!isMarked(ic, opt_state->blocks[i]))
2349
0
      continue;
2350
0
    for (j = i + 1; j < opt_state->n_blocks; ++j) {
2351
0
      if (!isMarked(ic, opt_state->blocks[j]))
2352
0
        continue;
2353
0
      if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2354
0
        opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2355
0
          opt_state->blocks[j]->link : opt_state->blocks[j];
2356
0
        break;
2357
0
      }
2358
0
    }
2359
0
  }
2360
0
  for (i = 0; i < opt_state->n_blocks; ++i) {
2361
0
    p = opt_state->blocks[i];
2362
0
    if (JT(p) == 0)
2363
0
      continue;
2364
0
    if (JT(p)->link) {
2365
0
      done1 = 0;
2366
0
      JT(p) = JT(p)->link;
2367
0
    }
2368
0
    if (JF(p)->link) {
2369
0
      done1 = 0;
2370
0
      JF(p) = JF(p)->link;
2371
0
    }
2372
0
  }
2373
0
  if (!done1)
2374
0
    goto top;
2375
0
}
2376
2377
static void
2378
opt_cleanup(opt_state_t *opt_state)
2379
0
{
2380
0
  free((void *)opt_state->vnode_base);
2381
0
  free((void *)opt_state->vmap);
2382
0
  free((void *)opt_state->edges);
2383
0
  free((void *)opt_state->space);
2384
0
  free((void *)opt_state->levels);
2385
0
  free((void *)opt_state->blocks);
2386
0
}
2387
2388
/*
2389
 * For optimizer errors.
2390
 */
2391
static void PCAP_NORETURN
2392
opt_error(opt_state_t *opt_state, const char *fmt, ...)
2393
0
{
2394
0
  va_list ap;
2395
2396
0
  if (opt_state->errbuf != NULL) {
2397
0
    va_start(ap, fmt);
2398
0
    (void)vsnprintf(opt_state->errbuf,
2399
0
        PCAP_ERRBUF_SIZE, fmt, ap);
2400
0
    va_end(ap);
2401
0
  }
2402
0
  longjmp(opt_state->top_ctx, 1);
2403
  /* NOTREACHED */
2404
#ifdef _AIX
2405
  PCAP_UNREACHABLE
2406
#endif /* _AIX */
2407
0
}
2408
2409
/*
2410
 * Return the number of stmts in 's'.
2411
 */
2412
static u_int
2413
slength(struct slist *s)
2414
0
{
2415
0
  u_int n = 0;
2416
2417
0
  for (; s; s = s->next)
2418
0
    if (s->s.code != NOP)
2419
0
      ++n;
2420
0
  return n;
2421
0
}
2422
2423
/*
2424
 * Return the number of nodes reachable by 'p'.
2425
 * All nodes should be initially unmarked.
2426
 */
2427
static int
2428
count_blocks(struct icode *ic, struct block *p)
2429
0
{
2430
0
  if (p == 0 || isMarked(ic, p))
2431
0
    return 0;
2432
0
  Mark(ic, p);
2433
0
  return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2434
0
}
2435
2436
/*
2437
 * Do a depth first search on the flow graph, numbering the
2438
 * the basic blocks, and entering them into the 'blocks' array.`
2439
 */
2440
static void
2441
number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2442
0
{
2443
0
  u_int n;
2444
2445
0
  if (p == 0 || isMarked(ic, p))
2446
0
    return;
2447
2448
0
  Mark(ic, p);
2449
0
  n = opt_state->n_blocks++;
2450
0
  if (opt_state->n_blocks == 0) {
2451
    /*
2452
     * Overflow.
2453
     */
2454
0
    opt_error(opt_state, "filter is too complex to optimize");
2455
0
  }
2456
0
  p->id = n;
2457
0
  opt_state->blocks[n] = p;
2458
2459
0
  number_blks_r(opt_state, ic, JT(p));
2460
0
  number_blks_r(opt_state, ic, JF(p));
2461
0
}
2462
2463
/*
2464
 * Return the number of stmts in the flowgraph reachable by 'p'.
2465
 * The nodes should be unmarked before calling.
2466
 *
2467
 * Note that "stmts" means "instructions", and that this includes
2468
 *
2469
 *  side-effect statements in 'p' (slength(p->stmts));
2470
 *
2471
 *  statements in the true branch from 'p' (count_stmts(JT(p)));
2472
 *
2473
 *  statements in the false branch from 'p' (count_stmts(JF(p)));
2474
 *
2475
 *  the conditional jump itself (1);
2476
 *
2477
 *  an extra long jump if the true branch requires it (p->longjt);
2478
 *
2479
 *  an extra long jump if the false branch requires it (p->longjf).
2480
 */
2481
static u_int
2482
count_stmts(struct icode *ic, struct block *p)
2483
0
{
2484
0
  u_int n;
2485
2486
0
  if (p == 0 || isMarked(ic, p))
2487
0
    return 0;
2488
0
  Mark(ic, p);
2489
0
  n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2490
0
  return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2491
0
}
2492
2493
/*
2494
 * Allocate memory.  All allocation is done before optimization
2495
 * is begun.  A linear bound on the size of all data structures is computed
2496
 * from the total number of blocks and/or statements.
2497
 */
2498
static void
2499
opt_init(opt_state_t *opt_state, struct icode *ic)
2500
0
{
2501
0
  bpf_u_int32 *p;
2502
0
  int i, n, max_stmts;
2503
0
  u_int product;
2504
0
  size_t block_memsize, edge_memsize;
2505
2506
  /*
2507
   * First, count the blocks, so we can malloc an array to map
2508
   * block number to block.  Then, put the blocks into the array.
2509
   */
2510
0
  unMarkAll(ic);
2511
0
  n = count_blocks(ic, ic->root);
2512
0
  opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2513
0
  if (opt_state->blocks == NULL)
2514
0
    opt_error(opt_state, "malloc");
2515
0
  unMarkAll(ic);
2516
0
  opt_state->n_blocks = 0;
2517
0
  number_blks_r(opt_state, ic, ic->root);
2518
2519
  /*
2520
   * This "should not happen".
2521
   */
2522
0
  if (opt_state->n_blocks == 0)
2523
0
    opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2524
2525
0
  opt_state->n_edges = 2 * opt_state->n_blocks;
2526
0
  if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2527
    /*
2528
     * Overflow.
2529
     */
2530
0
    opt_error(opt_state, "filter is too complex to optimize");
2531
0
  }
2532
0
  opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2533
0
  if (opt_state->edges == NULL) {
2534
0
    opt_error(opt_state, "malloc");
2535
0
  }
2536
2537
  /*
2538
   * The number of levels is bounded by the number of nodes.
2539
   */
2540
0
  opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2541
0
  if (opt_state->levels == NULL) {
2542
0
    opt_error(opt_state, "malloc");
2543
0
  }
2544
2545
0
  opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2546
0
  opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2547
2548
  /*
2549
   * Make sure opt_state->n_blocks * opt_state->nodewords fits
2550
   * in a u_int; we use it as a u_int number-of-iterations
2551
   * value.
2552
   */
2553
0
  product = opt_state->n_blocks * opt_state->nodewords;
2554
0
  if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2555
    /*
2556
     * XXX - just punt and don't try to optimize?
2557
     * In practice, this is unlikely to happen with
2558
     * a normal filter.
2559
     */
2560
0
    opt_error(opt_state, "filter is too complex to optimize");
2561
0
  }
2562
2563
  /*
2564
   * Make sure the total memory required for that doesn't
2565
   * overflow.
2566
   */
2567
0
  block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2568
0
  if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2569
0
    opt_error(opt_state, "filter is too complex to optimize");
2570
0
  }
2571
2572
  /*
2573
   * Make sure opt_state->n_edges * opt_state->edgewords fits
2574
   * in a u_int; we use it as a u_int number-of-iterations
2575
   * value.
2576
   */
2577
0
  product = opt_state->n_edges * opt_state->edgewords;
2578
0
  if ((product / opt_state->n_edges) != opt_state->edgewords) {
2579
0
    opt_error(opt_state, "filter is too complex to optimize");
2580
0
  }
2581
2582
  /*
2583
   * Make sure the total memory required for that doesn't
2584
   * overflow.
2585
   */
2586
0
  edge_memsize = (size_t)product * sizeof(*opt_state->space);
2587
0
  if (edge_memsize / product != sizeof(*opt_state->space)) {
2588
0
    opt_error(opt_state, "filter is too complex to optimize");
2589
0
  }
2590
2591
  /*
2592
   * Make sure the total memory required for both of them doesn't
2593
   * overflow.
2594
   */
2595
0
  if (block_memsize > SIZE_MAX - edge_memsize) {
2596
0
    opt_error(opt_state, "filter is too complex to optimize");
2597
0
  }
2598
2599
  /* XXX */
2600
0
  opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2601
0
  if (opt_state->space == NULL) {
2602
0
    opt_error(opt_state, "malloc");
2603
0
  }
2604
0
  p = opt_state->space;
2605
0
  opt_state->all_dom_sets = p;
2606
0
  for (i = 0; i < n; ++i) {
2607
0
    opt_state->blocks[i]->dom = p;
2608
0
    p += opt_state->nodewords;
2609
0
  }
2610
0
  opt_state->all_closure_sets = p;
2611
0
  for (i = 0; i < n; ++i) {
2612
0
    opt_state->blocks[i]->closure = p;
2613
0
    p += opt_state->nodewords;
2614
0
  }
2615
0
  opt_state->all_edge_sets = p;
2616
0
  for (i = 0; i < n; ++i) {
2617
0
    struct block *b = opt_state->blocks[i];
2618
2619
0
    b->et.edom = p;
2620
0
    p += opt_state->edgewords;
2621
0
    b->ef.edom = p;
2622
0
    p += opt_state->edgewords;
2623
0
    b->et.id = i;
2624
0
    opt_state->edges[i] = &b->et;
2625
0
    b->ef.id = opt_state->n_blocks + i;
2626
0
    opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2627
0
    b->et.pred = b;
2628
0
    b->ef.pred = b;
2629
0
  }
2630
0
  max_stmts = 0;
2631
0
  for (i = 0; i < n; ++i)
2632
0
    max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2633
  /*
2634
   * We allocate at most 3 value numbers per statement,
2635
   * so this is an upper bound on the number of valnodes
2636
   * we'll need.
2637
   */
2638
0
  opt_state->maxval = 3 * max_stmts;
2639
0
  opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2640
0
  if (opt_state->vmap == NULL) {
2641
0
    opt_error(opt_state, "malloc");
2642
0
  }
2643
0
  opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2644
0
  if (opt_state->vnode_base == NULL) {
2645
0
    opt_error(opt_state, "malloc");
2646
0
  }
2647
0
}
2648
2649
/*
2650
 * This is only used when supporting optimizer debugging.  It is
2651
 * global state, so do *not* do more than one compile in parallel
2652
 * and expect it to provide meaningful information.
2653
 */
2654
#ifdef BDEBUG
2655
int bids[NBIDS];
2656
#endif
2657
2658
/*
2659
 * Returns true if successful.  Returns false if a branch has
2660
 * an offset that is too large.  If so, we have marked that
2661
 * branch so that on a subsequent iteration, it will be treated
2662
 * properly.
2663
 */
2664
static int
2665
convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2666
0
{
2667
0
  struct bpf_insn *dst;
2668
0
  struct slist *src;
2669
0
  u_int slen;
2670
0
  u_int off;
2671
0
  struct slist **offset = NULL;
2672
2673
0
  if (p == 0 || isMarked(ic, p))
2674
0
    return (1);
2675
0
  Mark(ic, p);
2676
2677
0
  if (convert_code_r(conv_state, ic, JF(p)) == 0)
2678
0
    return (0);
2679
0
  if (convert_code_r(conv_state, ic, JT(p)) == 0)
2680
0
    return (0);
2681
2682
0
  slen = slength(p->stmts);
2683
0
  dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2684
    /* inflate length by any extra jumps */
2685
2686
0
  p->offset = (int)(dst - conv_state->fstart);
2687
2688
  /* generate offset[] for convenience  */
2689
0
  if (slen) {
2690
0
    offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2691
0
    if (!offset) {
2692
0
      conv_error(conv_state, "not enough core");
2693
      /*NOTREACHED*/
2694
0
    }
2695
0
  }
2696
0
  src = p->stmts;
2697
0
  for (off = 0; off < slen && src; off++) {
2698
#if 0
2699
    printf("off=%d src=%x\n", off, src);
2700
#endif
2701
0
    offset[off] = src;
2702
0
    src = src->next;
2703
0
  }
2704
2705
0
  off = 0;
2706
0
  for (src = p->stmts; src; src = src->next) {
2707
0
    if (src->s.code == NOP)
2708
0
      continue;
2709
0
    dst->code = (u_short)src->s.code;
2710
0
    dst->k = src->s.k;
2711
2712
    /* fill block-local relative jump */
2713
0
    if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2714
#if 0
2715
      if (src->s.jt || src->s.jf) {
2716
        free(offset);
2717
        conv_error(conv_state, "illegal jmp destination");
2718
        /*NOTREACHED*/
2719
      }
2720
#endif
2721
0
      goto filled;
2722
0
    }
2723
0
    if (off == slen - 2) /*???*/
2724
0
      goto filled;
2725
2726
0
      {
2727
0
    u_int i;
2728
0
    int jt, jf;
2729
0
    const char ljerr[] = "%s for block-local relative jump: off=%d";
2730
2731
#if 0
2732
    printf("code=%x off=%d %x %x\n", src->s.code,
2733
      off, src->s.jt, src->s.jf);
2734
#endif
2735
2736
0
    if (!src->s.jt || !src->s.jf) {
2737
0
      free(offset);
2738
0
      conv_error(conv_state, ljerr, "no jmp destination", off);
2739
      /*NOTREACHED*/
2740
0
    }
2741
2742
0
    jt = jf = 0;
2743
0
    for (i = 0; i < slen; i++) {
2744
0
      if (offset[i] == src->s.jt) {
2745
0
        if (jt) {
2746
0
          free(offset);
2747
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2748
          /*NOTREACHED*/
2749
0
        }
2750
2751
0
        if (i - off - 1 >= 256) {
2752
0
          free(offset);
2753
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2754
          /*NOTREACHED*/
2755
0
        }
2756
0
        dst->jt = (u_char)(i - off - 1);
2757
0
        jt++;
2758
0
      }
2759
0
      if (offset[i] == src->s.jf) {
2760
0
        if (jf) {
2761
0
          free(offset);
2762
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2763
          /*NOTREACHED*/
2764
0
        }
2765
0
        if (i - off - 1 >= 256) {
2766
0
          free(offset);
2767
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2768
          /*NOTREACHED*/
2769
0
        }
2770
0
        dst->jf = (u_char)(i - off - 1);
2771
0
        jf++;
2772
0
      }
2773
0
    }
2774
0
    if (!jt || !jf) {
2775
0
      free(offset);
2776
0
      conv_error(conv_state, ljerr, "no destination found", off);
2777
      /*NOTREACHED*/
2778
0
    }
2779
0
      }
2780
0
filled:
2781
0
    ++dst;
2782
0
    ++off;
2783
0
  }
2784
0
  if (offset)
2785
0
    free(offset);
2786
2787
#ifdef BDEBUG
2788
  if (dst - conv_state->fstart < NBIDS)
2789
    bids[dst - conv_state->fstart] = p->id + 1;
2790
#endif
2791
0
  dst->code = (u_short)p->s.code;
2792
0
  dst->k = p->s.k;
2793
0
  if (JT(p)) {
2794
    /* number of extra jumps inserted */
2795
0
    u_char extrajmps = 0;
2796
0
    off = JT(p)->offset - (p->offset + slen) - 1;
2797
0
    if (off >= 256) {
2798
        /* offset too large for branch, must add a jump */
2799
0
        if (p->longjt == 0) {
2800
      /* mark this instruction and retry */
2801
0
      p->longjt++;
2802
0
      return(0);
2803
0
        }
2804
0
        dst->jt = extrajmps;
2805
0
        extrajmps++;
2806
0
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2807
0
        dst[extrajmps].k = off - extrajmps;
2808
0
    }
2809
0
    else
2810
0
        dst->jt = (u_char)off;
2811
0
    off = JF(p)->offset - (p->offset + slen) - 1;
2812
0
    if (off >= 256) {
2813
        /* offset too large for branch, must add a jump */
2814
0
        if (p->longjf == 0) {
2815
      /* mark this instruction and retry */
2816
0
      p->longjf++;
2817
0
      return(0);
2818
0
        }
2819
        /* branch if F to following jump */
2820
        /* if two jumps are inserted, F goes to second one */
2821
0
        dst->jf = extrajmps;
2822
0
        extrajmps++;
2823
0
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2824
0
        dst[extrajmps].k = off - extrajmps;
2825
0
    }
2826
0
    else
2827
0
        dst->jf = (u_char)off;
2828
0
  }
2829
0
  return (1);
2830
0
}
2831
2832
2833
/*
2834
 * Convert flowgraph intermediate representation to the
2835
 * BPF array representation.  Set *lenp to the number of instructions.
2836
 *
2837
 * This routine does *NOT* leak the memory pointed to by fp.  It *must
2838
 * not* do free(fp) before returning fp; doing so would make no sense,
2839
 * as the BPF array pointed to by the return value of icode_to_fcode()
2840
 * must be valid - it's being returned for use in a bpf_program structure.
2841
 *
2842
 * If it appears that icode_to_fcode() is leaking, the problem is that
2843
 * the program using pcap_compile() is failing to free the memory in
2844
 * the BPF program when it's done - the leak is in the program, not in
2845
 * the routine that happens to be allocating the memory.  (By analogy, if
2846
 * a program calls fopen() without ever calling fclose() on the FILE *,
2847
 * it will leak the FILE structure; the leak is not in fopen(), it's in
2848
 * the program.)  Change the program to use pcap_freecode() when it's
2849
 * done with the filter program.  See the pcap man page.
2850
 */
2851
struct bpf_insn *
2852
icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2853
    char *errbuf)
2854
0
{
2855
0
  u_int n;
2856
0
  struct bpf_insn *fp;
2857
0
  conv_state_t conv_state;
2858
2859
0
  conv_state.fstart = NULL;
2860
0
  conv_state.errbuf = errbuf;
2861
0
  if (setjmp(conv_state.top_ctx) != 0) {
2862
0
    free(conv_state.fstart);
2863
0
    return NULL;
2864
0
  }
2865
2866
  /*
2867
   * Loop doing convert_code_r() until no branches remain
2868
   * with too-large offsets.
2869
   */
2870
0
  for (;;) {
2871
0
      unMarkAll(ic);
2872
0
      n = *lenp = count_stmts(ic, root);
2873
2874
0
      fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2875
0
      if (fp == NULL) {
2876
0
    (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2877
0
        "malloc");
2878
0
    return NULL;
2879
0
      }
2880
0
      memset((char *)fp, 0, sizeof(*fp) * n);
2881
0
      conv_state.fstart = fp;
2882
0
      conv_state.ftail = fp + n;
2883
2884
0
      unMarkAll(ic);
2885
0
      if (convert_code_r(&conv_state, ic, root))
2886
0
    break;
2887
0
      free(fp);
2888
0
  }
2889
2890
0
  return fp;
2891
0
}
2892
2893
/*
2894
 * For iconv_to_fconv() errors.
2895
 */
2896
static void PCAP_NORETURN
2897
conv_error(conv_state_t *conv_state, const char *fmt, ...)
2898
0
{
2899
0
  va_list ap;
2900
2901
0
  va_start(ap, fmt);
2902
0
  (void)vsnprintf(conv_state->errbuf,
2903
0
      PCAP_ERRBUF_SIZE, fmt, ap);
2904
0
  va_end(ap);
2905
0
  longjmp(conv_state->top_ctx, 1);
2906
  /* NOTREACHED */
2907
#ifdef _AIX
2908
  PCAP_UNREACHABLE
2909
#endif /* _AIX */
2910
0
}
2911
2912
/*
2913
 * Make a copy of a BPF program and put it in the "fcode" member of
2914
 * a "pcap_t".
2915
 *
2916
 * If we fail to allocate memory for the copy, fill in the "errbuf"
2917
 * member of the "pcap_t" with an error message, and return -1;
2918
 * otherwise, return 0.
2919
 */
2920
int
2921
pcapint_install_bpf_program(pcap_t *p, struct bpf_program *fp)
2922
0
{
2923
0
  size_t prog_size;
2924
2925
  /*
2926
   * Validate the program.
2927
   */
2928
0
  if (!pcapint_validate_filter(fp->bf_insns, fp->bf_len)) {
2929
0
    snprintf(p->errbuf, sizeof(p->errbuf),
2930
0
      "BPF program is not valid");
2931
0
    return (-1);
2932
0
  }
2933
2934
  /*
2935
   * Free up any already installed program.
2936
   */
2937
0
  pcap_freecode(&p->fcode);
2938
2939
0
  prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2940
0
  p->fcode.bf_len = fp->bf_len;
2941
0
  p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2942
0
  if (p->fcode.bf_insns == NULL) {
2943
0
    pcapint_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2944
0
        errno, "malloc");
2945
0
    return (-1);
2946
0
  }
2947
0
  memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2948
0
  return (0);
2949
0
}
2950
2951
#ifdef BDEBUG
2952
static void
2953
dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2954
    FILE *out)
2955
{
2956
  int icount, noffset;
2957
  int i;
2958
2959
  if (block == NULL || isMarked(ic, block))
2960
    return;
2961
  Mark(ic, block);
2962
2963
  icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2964
  noffset = min(block->offset + icount, (int)prog->bf_len);
2965
2966
  fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2967
  for (i = block->offset; i < noffset; i++) {
2968
    fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2969
  }
2970
  fprintf(out, "\" tooltip=\"");
2971
  for (i = 0; i < BPF_MEMWORDS; i++)
2972
    if (block->val[i] != VAL_UNKNOWN)
2973
      fprintf(out, "val[%d]=%d ", i, block->val[i]);
2974
  fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
2975
  fprintf(out, "val[X]=%d", block->val[X_ATOM]);
2976
  fprintf(out, "\"");
2977
  if (JT(block) == NULL)
2978
    fprintf(out, ", peripheries=2");
2979
  fprintf(out, "];\n");
2980
2981
  dot_dump_node(ic, JT(block), prog, out);
2982
  dot_dump_node(ic, JF(block), prog, out);
2983
}
2984
2985
static void
2986
dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
2987
{
2988
  if (block == NULL || isMarked(ic, block))
2989
    return;
2990
  Mark(ic, block);
2991
2992
  if (JT(block)) {
2993
    fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
2994
        block->id, JT(block)->id);
2995
    fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
2996
         block->id, JF(block)->id);
2997
  }
2998
  dot_dump_edge(ic, JT(block), out);
2999
  dot_dump_edge(ic, JF(block), out);
3000
}
3001
3002
/* Output the block CFG using graphviz/DOT language
3003
 * In the CFG, block's code, value index for each registers at EXIT,
3004
 * and the jump relationship is show.
3005
 *
3006
 * example DOT for BPF `ip src host 1.1.1.1' is:
3007
    digraph BPF {
3008
  block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh      [12]\n(001) jeq      #0x800           jt 2  jf 5" tooltip="val[A]=0 val[X]=0"];
3009
  block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld       [26]\n(003) jeq      #0x1010101       jt 4  jf 5" tooltip="val[A]=0 val[X]=0"];
3010
  block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret      #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3011
  block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret      #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3012
  "block0":se -> "block1":n [label="T"];
3013
  "block0":sw -> "block3":n [label="F"];
3014
  "block1":se -> "block2":n [label="T"];
3015
  "block1":sw -> "block3":n [label="F"];
3016
    }
3017
 *
3018
 *  After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3019
 *  and run `dot -Tpng -O bpf.dot' to draw the graph.
3020
 */
3021
static int
3022
dot_dump(struct icode *ic, char *errbuf)
3023
{
3024
  struct bpf_program f;
3025
  FILE *out = stdout;
3026
3027
  memset(bids, 0, sizeof bids);
3028
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3029
  if (f.bf_insns == NULL)
3030
    return -1;
3031
3032
  fprintf(out, "digraph BPF {\n");
3033
  unMarkAll(ic);
3034
  dot_dump_node(ic, ic->root, &f, out);
3035
  unMarkAll(ic);
3036
  dot_dump_edge(ic, ic->root, out);
3037
  fprintf(out, "}\n");
3038
3039
  free((char *)f.bf_insns);
3040
  return 0;
3041
}
3042
3043
static int
3044
plain_dump(struct icode *ic, char *errbuf)
3045
{
3046
  struct bpf_program f;
3047
3048
  memset(bids, 0, sizeof bids);
3049
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3050
  if (f.bf_insns == NULL)
3051
    return -1;
3052
  bpf_dump(&f, 1);
3053
  putchar('\n');
3054
  free((char *)f.bf_insns);
3055
  return 0;
3056
}
3057
3058
static void
3059
opt_dump(opt_state_t *opt_state, struct icode *ic)
3060
{
3061
  int status;
3062
  char errbuf[PCAP_ERRBUF_SIZE];
3063
3064
  /*
3065
   * If the CFG, in DOT format, is requested, output it rather than
3066
   * the code that would be generated from that graph.
3067
   */
3068
  if (pcap_print_dot_graph)
3069
    status = dot_dump(ic, errbuf);
3070
  else
3071
    status = plain_dump(ic, errbuf);
3072
  if (status == -1)
3073
    opt_error(opt_state, "%s: icode_to_fcode failed: %s", __func__, errbuf);
3074
}
3075
#endif