Coverage Report

Created: 2024-03-28 05:35

/src/libpcap/optimize.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3
 *  The Regents of the University of California.  All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that: (1) source code distributions
7
 * retain the above copyright notice and this paragraph in its entirety, (2)
8
 * distributions including binary code include the above copyright notice and
9
 * this paragraph in its entirety in the documentation or other materials
10
 * provided with the distribution, and (3) all advertising materials mentioning
11
 * features or use of this software display the following acknowledgement:
12
 * ``This product includes software developed by the University of California,
13
 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14
 * the University nor the names of its contributors may be used to endorse
15
 * or promote products derived from this software without specific prior
16
 * written permission.
17
 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18
 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20
 *
21
 *  Optimization module for BPF code intermediate representation.
22
 */
23
24
#ifdef HAVE_CONFIG_H
25
#include <config.h>
26
#endif
27
28
#include <pcap-types.h>
29
30
#include <stdio.h>
31
#include <stdlib.h>
32
#include <memory.h>
33
#include <setjmp.h>
34
#include <string.h>
35
#include <limits.h> /* for SIZE_MAX */
36
#include <errno.h>
37
38
#include "pcap-int.h"
39
40
#include "gencode.h"
41
#include "optimize.h"
42
#include "diag-control.h"
43
44
#ifdef HAVE_OS_PROTO_H
45
#include "os-proto.h"
46
#endif
47
48
#ifdef BDEBUG
49
/*
50
 * The internal "debug printout" flag for the filter expression optimizer.
51
 * The code to print that stuff is present only if BDEBUG is defined, so
52
 * the flag, and the routine to set it, are defined only if BDEBUG is
53
 * defined.
54
 */
55
static int pcap_optimizer_debug;
56
57
/*
58
 * Routine to set that flag.
59
 *
60
 * This is intended for libpcap developers, not for general use.
61
 * If you want to set these in a program, you'll have to declare this
62
 * routine yourself, with the appropriate DLL import attribute on Windows;
63
 * it's not declared in any header file, and won't be declared in any
64
 * header file provided by libpcap.
65
 */
66
PCAP_API void pcap_set_optimizer_debug(int value);
67
68
PCAP_API_DEF void
69
pcap_set_optimizer_debug(int value)
70
{
71
  pcap_optimizer_debug = value;
72
}
73
74
/*
75
 * The internal "print dot graph" flag for the filter expression optimizer.
76
 * The code to print that stuff is present only if BDEBUG is defined, so
77
 * the flag, and the routine to set it, are defined only if BDEBUG is
78
 * defined.
79
 */
80
static int pcap_print_dot_graph;
81
82
/*
83
 * Routine to set that flag.
84
 *
85
 * This is intended for libpcap developers, not for general use.
86
 * If you want to set these in a program, you'll have to declare this
87
 * routine yourself, with the appropriate DLL import attribute on Windows;
88
 * it's not declared in any header file, and won't be declared in any
89
 * header file provided by libpcap.
90
 */
91
PCAP_API void pcap_set_print_dot_graph(int value);
92
93
PCAP_API_DEF void
94
pcap_set_print_dot_graph(int value)
95
{
96
  pcap_print_dot_graph = value;
97
}
98
99
#endif
100
101
/*
102
 * lowest_set_bit().
103
 *
104
 * Takes a 32-bit integer as an argument.
105
 *
106
 * If handed a non-zero value, returns the index of the lowest set bit,
107
 * counting upwards from zero.
108
 *
109
 * If handed zero, the results are platform- and compiler-dependent.
110
 * Keep it out of the light, don't give it any water, don't feed it
111
 * after midnight, and don't pass zero to it.
112
 *
113
 * This is the same as the count of trailing zeroes in the word.
114
 */
115
#if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116
  /*
117
   * GCC 3.4 and later; we have __builtin_ctz().
118
   */
119
4.30M
  #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120
#elif defined(_MSC_VER)
121
  /*
122
   * Visual Studio; we support only 2005 and later, so use
123
   * _BitScanForward().
124
   */
125
#include <intrin.h>
126
127
#ifndef __clang__
128
#pragma intrinsic(_BitScanForward)
129
#endif
130
131
static __forceinline u_int
132
lowest_set_bit(int mask)
133
{
134
  unsigned long bit;
135
136
  /*
137
   * Don't sign-extend mask if long is longer than int.
138
   * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139
   */
140
  if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141
    abort();  /* mask is zero */
142
  return (u_int)bit;
143
}
144
#elif defined(MSDOS) && defined(__DJGPP__)
145
  /*
146
   * MS-DOS with DJGPP, which declares ffs() in <string.h>, which
147
   * we've already included.
148
   */
149
  #define lowest_set_bit(mask)  ((u_int)(ffs((mask)) - 1))
150
#elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
151
  /*
152
   * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
153
   * or some other platform (UN*X conforming to a sufficient recent version
154
   * of the Single UNIX Specification).
155
   */
156
  #include <strings.h>
157
  #define lowest_set_bit(mask)  (u_int)((ffs((mask)) - 1))
158
#else
159
/*
160
 * None of the above.
161
 * Use a perfect-hash-function-based function.
162
 */
163
static u_int
164
lowest_set_bit(int mask)
165
{
166
  unsigned int v = (unsigned int)mask;
167
168
  static const u_int MultiplyDeBruijnBitPosition[32] = {
169
    0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
170
    31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
171
  };
172
173
  /*
174
   * We strip off all but the lowermost set bit (v & ~v),
175
   * and perform a minimal perfect hash on it to look up the
176
   * number of low-order zero bits in a table.
177
   *
178
   * See:
179
   *
180
   *  http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
181
   *
182
   *  http://supertech.csail.mit.edu/papers/debruijn.pdf
183
   */
184
  return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
185
}
186
#endif
187
188
/*
189
 * Represents a deleted instruction.
190
 */
191
48.8M
#define NOP -1
192
193
/*
194
 * Register numbers for use-def values.
195
 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
196
 * location.  A_ATOM is the accumulator and X_ATOM is the index
197
 * register.
198
 */
199
26.6M
#define A_ATOM BPF_MEMWORDS
200
5.42M
#define X_ATOM (BPF_MEMWORDS+1)
201
202
/*
203
 * This define is used to represent *both* the accumulator and
204
 * x register in use-def computations.
205
 * Currently, the use-def code assumes only one definition per instruction.
206
 */
207
8.00M
#define AX_ATOM N_ATOMS
208
209
/*
210
 * These data structures are used in a Cocke and Shwarz style
211
 * value numbering scheme.  Since the flowgraph is acyclic,
212
 * exit values can be propagated from a node's predecessors
213
 * provided it is uniquely defined.
214
 */
215
struct valnode {
216
  int code;
217
  bpf_u_int32 v0, v1;
218
  int val;    /* the value number */
219
  struct valnode *next;
220
};
221
222
/* Integer constants mapped with the load immediate opcode. */
223
2.41M
#define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
224
225
struct vmapinfo {
226
  int is_const;
227
  bpf_u_int32 const_val;
228
};
229
230
typedef struct {
231
  /*
232
   * Place to longjmp to on an error.
233
   */
234
  jmp_buf top_ctx;
235
236
  /*
237
   * The buffer into which to put error message.
238
   */
239
  char *errbuf;
240
241
  /*
242
   * A flag to indicate that further optimization is needed.
243
   * Iterative passes are continued until a given pass yields no
244
   * code simplification or branch movement.
245
   */
246
  int done;
247
248
  /*
249
   * XXX - detect loops that do nothing but repeated AND/OR pullups
250
   * and edge moves.
251
   * If 100 passes in a row do nothing but that, treat that as a
252
   * sign that we're in a loop that just shuffles in a cycle in
253
   * which each pass just shuffles the code and we eventually
254
   * get back to the original configuration.
255
   *
256
   * XXX - we need a non-heuristic way of detecting, or preventing,
257
   * such a cycle.
258
   */
259
  int non_branch_movement_performed;
260
261
  u_int n_blocks;   /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
262
  struct block **blocks;
263
  u_int n_edges;    /* twice n_blocks, so guaranteed to be > 0 */
264
  struct edge **edges;
265
266
  /*
267
   * A bit vector set representation of the dominators.
268
   * We round up the set size to the next power of two.
269
   */
270
  u_int nodewords;  /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
271
  u_int edgewords;  /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
272
  struct block **levels;
273
  bpf_u_int32 *space;
274
275
18.2M
#define BITS_PER_WORD (8*sizeof(bpf_u_int32))
276
/*
277
 * True if a is in uset {p}
278
 */
279
1.23M
#define SET_MEMBER(p, a) \
280
1.23M
((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
281
282
/*
283
 * Add 'a' to uset p.
284
 */
285
5.68M
#define SET_INSERT(p, a) \
286
5.68M
(p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
287
288
/*
289
 * Delete 'a' from uset p.
290
 */
291
#define SET_DELETE(p, a) \
292
(p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
293
294
/*
295
 * a := a intersect b
296
 * n must be guaranteed to be > 0
297
 */
298
7.05M
#define SET_INTERSECT(a, b, n)\
299
7.05M
{\
300
7.05M
  register bpf_u_int32 *_x = a, *_y = b;\
301
7.05M
  register u_int _n = n;\
302
25.3M
  do *_x++ &= *_y++; while (--_n != 0);\
303
7.05M
}
304
305
/*
306
 * a := a - b
307
 * n must be guaranteed to be > 0
308
 */
309
#define SET_SUBTRACT(a, b, n)\
310
{\
311
  register bpf_u_int32 *_x = a, *_y = b;\
312
  register u_int _n = n;\
313
  do *_x++ &=~ *_y++; while (--_n != 0);\
314
}
315
316
/*
317
 * a := a union b
318
 * n must be guaranteed to be > 0
319
 */
320
2.35M
#define SET_UNION(a, b, n)\
321
2.35M
{\
322
2.35M
  register bpf_u_int32 *_x = a, *_y = b;\
323
2.35M
  register u_int _n = n;\
324
5.59M
  do *_x++ |= *_y++; while (--_n != 0);\
325
2.35M
}
326
327
  uset all_dom_sets;
328
  uset all_closure_sets;
329
  uset all_edge_sets;
330
331
3.98M
#define MODULUS 213
332
  struct valnode *hashtbl[MODULUS];
333
  bpf_u_int32 curval;
334
  bpf_u_int32 maxval;
335
336
  struct vmapinfo *vmap;
337
  struct valnode *vnode_base;
338
  struct valnode *next_vnode;
339
} opt_state_t;
340
341
typedef struct {
342
  /*
343
   * Place to longjmp to on an error.
344
   */
345
  jmp_buf top_ctx;
346
347
  /*
348
   * The buffer into which to put error message.
349
   */
350
  char *errbuf;
351
352
  /*
353
   * Some pointers used to convert the basic block form of the code,
354
   * into the array form that BPF requires.  'fstart' will point to
355
   * the malloc'd array while 'ftail' is used during the recursive
356
   * traversal.
357
   */
358
  struct bpf_insn *fstart;
359
  struct bpf_insn *ftail;
360
} conv_state_t;
361
362
static void opt_init(opt_state_t *, struct icode *);
363
static void opt_cleanup(opt_state_t *);
364
static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
365
    PCAP_PRINTFLIKE(2, 3);
366
367
static void intern_blocks(opt_state_t *, struct icode *);
368
369
static void find_inedges(opt_state_t *, struct block *);
370
#ifdef BDEBUG
371
static void opt_dump(opt_state_t *, struct icode *);
372
#endif
373
374
#ifndef MAX
375
1.17M
#define MAX(a,b) ((a)>(b)?(a):(b))
376
#endif
377
378
static void
379
find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
380
2.49M
{
381
2.49M
  int level;
382
383
2.49M
  if (isMarked(ic, b))
384
1.07M
    return;
385
386
1.42M
  Mark(ic, b);
387
1.42M
  b->link = 0;
388
389
1.42M
  if (JT(b)) {
390
1.17M
    find_levels_r(opt_state, ic, JT(b));
391
1.17M
    find_levels_r(opt_state, ic, JF(b));
392
1.17M
    level = MAX(JT(b)->level, JF(b)->level) + 1;
393
1.17M
  } else
394
246k
    level = 0;
395
1.42M
  b->level = level;
396
1.42M
  b->link = opt_state->levels[level];
397
1.42M
  opt_state->levels[level] = b;
398
1.42M
}
399
400
/*
401
 * Level graph.  The levels go from 0 at the leaves to
402
 * N_LEVELS at the root.  The opt_state->levels[] array points to the
403
 * first node of the level list, whose elements are linked
404
 * with the 'link' field of the struct block.
405
 */
406
static void
407
find_levels(opt_state_t *opt_state, struct icode *ic)
408
147k
{
409
147k
  memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
410
147k
  unMarkAll(ic);
411
147k
  find_levels_r(opt_state, ic, ic->root);
412
147k
}
413
414
/*
415
 * Find dominator relationships.
416
 * Assumes graph has been leveled.
417
 */
418
static void
419
find_dom(opt_state_t *opt_state, struct block *root)
420
147k
{
421
147k
  u_int i;
422
147k
  int level;
423
147k
  struct block *b;
424
147k
  bpf_u_int32 *x;
425
426
  /*
427
   * Initialize sets to contain all nodes.
428
   */
429
147k
  x = opt_state->all_dom_sets;
430
  /*
431
   * In opt_init(), we've made sure the product doesn't overflow.
432
   */
433
147k
  i = opt_state->n_blocks * opt_state->nodewords;
434
7.02M
  while (i != 0) {
435
6.87M
    --i;
436
6.87M
    *x++ = 0xFFFFFFFFU;
437
6.87M
  }
438
  /* Root starts off empty. */
439
334k
  for (i = opt_state->nodewords; i != 0;) {
440
186k
    --i;
441
186k
    root->dom[i] = 0;
442
186k
  }
443
444
  /* root->level is the highest level no found. */
445
1.38M
  for (level = root->level; level >= 0; --level) {
446
2.66M
    for (b = opt_state->levels[level]; b; b = b->link) {
447
1.42M
      SET_INSERT(b->dom, b->id);
448
1.42M
      if (JT(b) == 0)
449
246k
        continue;
450
1.17M
      SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
451
1.17M
      SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
452
1.17M
    }
453
1.23M
  }
454
147k
}
455
456
static void
457
propedom(opt_state_t *opt_state, struct edge *ep)
458
2.84M
{
459
2.84M
  SET_INSERT(ep->edom, ep->id);
460
2.84M
  if (ep->succ) {
461
2.35M
    SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
462
2.35M
    SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
463
2.35M
  }
464
2.84M
}
465
466
/*
467
 * Compute edge dominators.
468
 * Assumes graph has been leveled and predecessors established.
469
 */
470
static void
471
find_edom(opt_state_t *opt_state, struct block *root)
472
147k
{
473
147k
  u_int i;
474
147k
  uset x;
475
147k
  int level;
476
147k
  struct block *b;
477
478
147k
  x = opt_state->all_edge_sets;
479
  /*
480
   * In opt_init(), we've made sure the product doesn't overflow.
481
   */
482
24.5M
  for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
483
24.4M
    --i;
484
24.4M
    x[i] = 0xFFFFFFFFU;
485
24.4M
  }
486
487
  /* root->level is the highest level no found. */
488
147k
  memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
489
147k
  memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
490
1.38M
  for (level = root->level; level >= 0; --level) {
491
2.66M
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
492
1.42M
      propedom(opt_state, &b->et);
493
1.42M
      propedom(opt_state, &b->ef);
494
1.42M
    }
495
1.23M
  }
496
147k
}
497
498
/*
499
 * Find the backwards transitive closure of the flow graph.  These sets
500
 * are backwards in the sense that we find the set of nodes that reach
501
 * a given node, not the set of nodes that can be reached by a node.
502
 *
503
 * Assumes graph has been leveled.
504
 */
505
static void
506
find_closure(opt_state_t *opt_state, struct block *root)
507
147k
{
508
147k
  int level;
509
147k
  struct block *b;
510
511
  /*
512
   * Initialize sets to contain no nodes.
513
   */
514
147k
  memset((char *)opt_state->all_closure_sets, 0,
515
147k
        opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
516
517
  /* root->level is the highest level no found. */
518
1.38M
  for (level = root->level; level >= 0; --level) {
519
2.66M
    for (b = opt_state->levels[level]; b; b = b->link) {
520
1.42M
      SET_INSERT(b->closure, b->id);
521
1.42M
      if (JT(b) == 0)
522
246k
        continue;
523
1.17M
      SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
524
1.17M
      SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
525
1.17M
    }
526
1.23M
  }
527
147k
}
528
529
/*
530
 * Return the register number that is used by s.
531
 *
532
 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
533
 * are used, the scratch memory location's number if a scratch memory
534
 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
535
 *
536
 * The implementation should probably change to an array access.
537
 */
538
static int
539
atomuse(struct stmt *s)
540
12.6M
{
541
12.6M
  register int c = s->code;
542
543
12.6M
  if (c == NOP)
544
1.92M
    return -1;
545
546
10.6M
  switch (BPF_CLASS(c)) {
547
548
151k
  case BPF_RET:
549
151k
    return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
550
151k
      (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
551
552
4.25M
  case BPF_LD:
553
4.86M
  case BPF_LDX:
554
    /*
555
     * As there are fewer than 2^31 memory locations,
556
     * s->k should be convertible to int without problems.
557
     */
558
4.86M
    return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
559
4.86M
      (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
560
561
1.48M
  case BPF_ST:
562
1.48M
    return A_ATOM;
563
564
0
  case BPF_STX:
565
0
    return X_ATOM;
566
567
2.33M
  case BPF_JMP:
568
3.63M
  case BPF_ALU:
569
3.63M
    if (BPF_SRC(c) == BPF_X)
570
770k
      return AX_ATOM;
571
2.86M
    return A_ATOM;
572
573
541k
  case BPF_MISC:
574
541k
    return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
575
10.6M
  }
576
0
  abort();
577
  /* NOTREACHED */
578
10.6M
}
579
580
/*
581
 * Return the register number that is defined by 's'.  We assume that
582
 * a single stmt cannot define more than one register.  If no register
583
 * is defined, return -1.
584
 *
585
 * The implementation should probably change to an array access.
586
 */
587
static int
588
atomdef(struct stmt *s)
589
11.4M
{
590
11.4M
  if (s->code == NOP)
591
1.92M
    return -1;
592
593
9.50M
  switch (BPF_CLASS(s->code)) {
594
595
4.25M
  case BPF_LD:
596
5.55M
  case BPF_ALU:
597
5.55M
    return A_ATOM;
598
599
617k
  case BPF_LDX:
600
617k
    return X_ATOM;
601
602
1.48M
  case BPF_ST:
603
1.48M
  case BPF_STX:
604
1.48M
    return s->k;
605
606
541k
  case BPF_MISC:
607
541k
    return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
608
9.50M
  }
609
1.30M
  return -1;
610
9.50M
}
611
612
/*
613
 * Compute the sets of registers used, defined, and killed by 'b'.
614
 *
615
 * "Used" means that a statement in 'b' uses the register before any
616
 * statement in 'b' defines it, i.e. it uses the value left in
617
 * that register by a predecessor block of this block.
618
 * "Defined" means that a statement in 'b' defines it.
619
 * "Killed" means that a statement in 'b' defines it before any
620
 * statement in 'b' uses it, i.e. it kills the value left in that
621
 * register by a predecessor block of this block.
622
 */
623
static void
624
compute_local_ud(struct block *b)
625
1.42M
{
626
1.42M
  struct slist *s;
627
1.42M
  atomset def = 0, use = 0, killed = 0;
628
1.42M
  int atom;
629
630
7.44M
  for (s = b->stmts; s; s = s->next) {
631
6.01M
    if (s->s.code == NOP)
632
1.78M
      continue;
633
4.23M
    atom = atomuse(&s->s);
634
4.23M
    if (atom >= 0) {
635
2.60M
      if (atom == AX_ATOM) {
636
348k
        if (!ATOMELEM(def, X_ATOM))
637
11
          use |= ATOMMASK(X_ATOM);
638
348k
        if (!ATOMELEM(def, A_ATOM))
639
1
          use |= ATOMMASK(A_ATOM);
640
348k
      }
641
2.26M
      else if (atom < N_ATOMS) {
642
2.26M
        if (!ATOMELEM(def, atom))
643
93.8k
          use |= ATOMMASK(atom);
644
2.26M
      }
645
0
      else
646
0
        abort();
647
2.60M
    }
648
4.23M
    atom = atomdef(&s->s);
649
4.23M
    if (atom >= 0) {
650
4.23M
      if (!ATOMELEM(use, atom))
651
4.23M
        killed |= ATOMMASK(atom);
652
4.23M
      def |= ATOMMASK(atom);
653
4.23M
    }
654
4.23M
  }
655
1.42M
  if (BPF_CLASS(b->s.code) == BPF_JMP) {
656
    /*
657
     * XXX - what about RET?
658
     */
659
1.17M
    atom = atomuse(&b->s);
660
1.17M
    if (atom >= 0) {
661
1.17M
      if (atom == AX_ATOM) {
662
89.3k
        if (!ATOMELEM(def, X_ATOM))
663
1.27k
          use |= ATOMMASK(X_ATOM);
664
89.3k
        if (!ATOMELEM(def, A_ATOM))
665
1.27k
          use |= ATOMMASK(A_ATOM);
666
89.3k
      }
667
1.08M
      else if (atom < N_ATOMS) {
668
1.08M
        if (!ATOMELEM(def, atom))
669
34.0k
          use |= ATOMMASK(atom);
670
1.08M
      }
671
0
      else
672
0
        abort();
673
1.17M
    }
674
1.17M
  }
675
676
1.42M
  b->def = def;
677
1.42M
  b->kill = killed;
678
1.42M
  b->in_use = use;
679
1.42M
}
680
681
/*
682
 * Assume graph is already leveled.
683
 */
684
static void
685
find_ud(opt_state_t *opt_state, struct block *root)
686
147k
{
687
147k
  int i, maxlevel;
688
147k
  struct block *p;
689
690
  /*
691
   * root->level is the highest level no found;
692
   * count down from there.
693
   */
694
147k
  maxlevel = root->level;
695
1.38M
  for (i = maxlevel; i >= 0; --i)
696
2.66M
    for (p = opt_state->levels[i]; p; p = p->link) {
697
1.42M
      compute_local_ud(p);
698
1.42M
      p->out_use = 0;
699
1.42M
    }
700
701
1.23M
  for (i = 1; i <= maxlevel; ++i) {
702
2.26M
    for (p = opt_state->levels[i]; p; p = p->link) {
703
1.17M
      p->out_use |= JT(p)->in_use | JF(p)->in_use;
704
1.17M
      p->in_use |= p->out_use &~ p->kill;
705
1.17M
    }
706
1.09M
  }
707
147k
}
708
static void
709
init_val(opt_state_t *opt_state)
710
147k
{
711
147k
  opt_state->curval = 0;
712
147k
  opt_state->next_vnode = opt_state->vnode_base;
713
147k
  memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
714
147k
  memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
715
147k
}
716
717
/*
718
 * Because we really don't have an IR, this stuff is a little messy.
719
 *
720
 * This routine looks in the table of existing value number for a value
721
 * with generated from an operation with the specified opcode and
722
 * the specified values.  If it finds it, it returns its value number,
723
 * otherwise it makes a new entry in the table and returns the
724
 * value number of that entry.
725
 */
726
static bpf_u_int32
727
F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
728
3.98M
{
729
3.98M
  u_int hash;
730
3.98M
  bpf_u_int32 val;
731
3.98M
  struct valnode *p;
732
733
3.98M
  hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
734
3.98M
  hash %= MODULUS;
735
736
4.23M
  for (p = opt_state->hashtbl[hash]; p; p = p->next)
737
2.44M
    if (p->code == code && p->v0 == v0 && p->v1 == v1)
738
2.19M
      return p->val;
739
740
  /*
741
   * Not found.  Allocate a new value, and assign it a new
742
   * value number.
743
   *
744
   * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
745
   * increment it before using it as the new value number, which
746
   * means we never assign VAL_UNKNOWN.
747
   *
748
   * XXX - unless we overflow, but we probably won't have 2^32-1
749
   * values; we treat 32 bits as effectively infinite.
750
   */
751
1.79M
  val = ++opt_state->curval;
752
1.79M
  if (BPF_MODE(code) == BPF_IMM &&
753
1.79M
      (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
754
1.05M
    opt_state->vmap[val].const_val = v0;
755
1.05M
    opt_state->vmap[val].is_const = 1;
756
1.05M
  }
757
1.79M
  p = opt_state->next_vnode++;
758
1.79M
  p->val = val;
759
1.79M
  p->code = code;
760
1.79M
  p->v0 = v0;
761
1.79M
  p->v1 = v1;
762
1.79M
  p->next = opt_state->hashtbl[hash];
763
1.79M
  opt_state->hashtbl[hash] = p;
764
765
1.79M
  return val;
766
3.98M
}
767
768
static inline void
769
vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
770
3.50M
{
771
3.50M
  if (alter && newval != VAL_UNKNOWN && *valp == newval)
772
180k
    s->code = NOP;
773
3.32M
  else
774
3.32M
    *valp = newval;
775
3.50M
}
776
777
/*
778
 * Do constant-folding on binary operators.
779
 * (Unary operators are handled elsewhere.)
780
 */
781
static void
782
fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
783
61.0k
{
784
61.0k
  bpf_u_int32 a, b;
785
786
61.0k
  a = opt_state->vmap[v0].const_val;
787
61.0k
  b = opt_state->vmap[v1].const_val;
788
789
61.0k
  switch (BPF_OP(s->code)) {
790
4.79k
  case BPF_ADD:
791
4.79k
    a += b;
792
4.79k
    break;
793
794
3.59k
  case BPF_SUB:
795
3.59k
    a -= b;
796
3.59k
    break;
797
798
11.8k
  case BPF_MUL:
799
11.8k
    a *= b;
800
11.8k
    break;
801
802
8.52k
  case BPF_DIV:
803
8.52k
    if (b == 0)
804
184
      opt_error(opt_state, "division by zero");
805
8.34k
    a /= b;
806
8.34k
    break;
807
808
8.77k
  case BPF_MOD:
809
8.77k
    if (b == 0)
810
1.21k
      opt_error(opt_state, "modulus by zero");
811
7.55k
    a %= b;
812
7.55k
    break;
813
814
12.6k
  case BPF_AND:
815
12.6k
    a &= b;
816
12.6k
    break;
817
818
6.90k
  case BPF_OR:
819
6.90k
    a |= b;
820
6.90k
    break;
821
822
2.09k
  case BPF_XOR:
823
2.09k
    a ^= b;
824
2.09k
    break;
825
826
563
  case BPF_LSH:
827
    /*
828
     * A left shift of more than the width of the type
829
     * is undefined in C; we'll just treat it as shifting
830
     * all the bits out.
831
     *
832
     * XXX - the BPF interpreter doesn't check for this,
833
     * so its behavior is dependent on the behavior of
834
     * the processor on which it's running.  There are
835
     * processors on which it shifts all the bits out
836
     * and processors on which it does no shift.
837
     */
838
563
    if (b < 32)
839
413
      a <<= b;
840
150
    else
841
150
      a = 0;
842
563
    break;
843
844
1.31k
  case BPF_RSH:
845
    /*
846
     * A right shift of more than the width of the type
847
     * is undefined in C; we'll just treat it as shifting
848
     * all the bits out.
849
     *
850
     * XXX - the BPF interpreter doesn't check for this,
851
     * so its behavior is dependent on the behavior of
852
     * the processor on which it's running.  There are
853
     * processors on which it shifts all the bits out
854
     * and processors on which it does no shift.
855
     */
856
1.31k
    if (b < 32)
857
1.22k
      a >>= b;
858
86
    else
859
86
      a = 0;
860
1.31k
    break;
861
862
0
  default:
863
0
    abort();
864
61.0k
  }
865
59.5k
  s->k = a;
866
59.5k
  s->code = BPF_LD|BPF_IMM;
867
  /*
868
   * XXX - optimizer loop detection.
869
   */
870
59.5k
  opt_state->non_branch_movement_performed = 1;
871
59.5k
  opt_state->done = 0;
872
59.5k
}
873
874
static inline struct slist *
875
this_op(struct slist *s)
876
8.00M
{
877
9.91M
  while (s != 0 && s->s.code == NOP)
878
1.90M
    s = s->next;
879
8.00M
  return s;
880
8.00M
}
881
882
static void
883
opt_not(struct block *b)
884
2.24k
{
885
2.24k
  struct block *tmp = JT(b);
886
887
2.24k
  JT(b) = JF(b);
888
2.24k
  JF(b) = tmp;
889
2.24k
}
890
891
static void
892
opt_peep(opt_state_t *opt_state, struct block *b)
893
1.30M
{
894
1.30M
  struct slist *s;
895
1.30M
  struct slist *next, *last;
896
1.30M
  bpf_u_int32 val;
897
898
1.30M
  s = b->stmts;
899
1.30M
  if (s == 0)
900
163k
    return;
901
902
1.14M
  last = s;
903
4.02M
  for (/*empty*/; /*empty*/; s = next) {
904
    /*
905
     * Skip over nops.
906
     */
907
4.02M
    s = this_op(s);
908
4.02M
    if (s == 0)
909
47.9k
      break;  /* nothing left in the block */
910
911
    /*
912
     * Find the next real instruction after that one
913
     * (skipping nops).
914
     */
915
3.97M
    next = this_op(s->next);
916
3.97M
    if (next == 0)
917
1.09M
      break;  /* no next instruction */
918
2.88M
    last = next;
919
920
    /*
921
     * st  M[k] --> st  M[k]
922
     * ldx M[k]   tax
923
     */
924
2.88M
    if (s->s.code == BPF_ST &&
925
2.88M
        next->s.code == (BPF_LDX|BPF_MEM) &&
926
2.88M
        s->s.k == next->s.k) {
927
      /*
928
       * XXX - optimizer loop detection.
929
       */
930
115k
      opt_state->non_branch_movement_performed = 1;
931
115k
      opt_state->done = 0;
932
115k
      next->s.code = BPF_MISC|BPF_TAX;
933
115k
    }
934
    /*
935
     * ld  #k --> ldx  #k
936
     * tax      txa
937
     */
938
2.88M
    if (s->s.code == (BPF_LD|BPF_IMM) &&
939
2.88M
        next->s.code == (BPF_MISC|BPF_TAX)) {
940
70.8k
      s->s.code = BPF_LDX|BPF_IMM;
941
70.8k
      next->s.code = BPF_MISC|BPF_TXA;
942
      /*
943
       * XXX - optimizer loop detection.
944
       */
945
70.8k
      opt_state->non_branch_movement_performed = 1;
946
70.8k
      opt_state->done = 0;
947
70.8k
    }
948
    /*
949
     * This is an ugly special case, but it happens
950
     * when you say tcp[k] or udp[k] where k is a constant.
951
     */
952
2.88M
    if (s->s.code == (BPF_LD|BPF_IMM)) {
953
551k
      struct slist *add, *tax, *ild;
954
955
      /*
956
       * Check that X isn't used on exit from this
957
       * block (which the optimizer might cause).
958
       * We know the code generator won't generate
959
       * any local dependencies.
960
       */
961
551k
      if (ATOMELEM(b->out_use, X_ATOM))
962
2.12k
        continue;
963
964
      /*
965
       * Check that the instruction following the ldi
966
       * is an addx, or it's an ldxms with an addx
967
       * following it (with 0 or more nops between the
968
       * ldxms and addx).
969
       */
970
549k
      if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
971
549k
        add = next;
972
0
      else
973
0
        add = this_op(next->next);
974
549k
      if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
975
549k
        continue;
976
977
      /*
978
       * Check that a tax follows that (with 0 or more
979
       * nops between them).
980
       */
981
807
      tax = this_op(add->next);
982
807
      if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
983
384
        continue;
984
985
      /*
986
       * Check that an ild follows that (with 0 or more
987
       * nops between them).
988
       */
989
423
      ild = this_op(tax->next);
990
423
      if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
991
423
          BPF_MODE(ild->s.code) != BPF_IND)
992
423
        continue;
993
      /*
994
       * We want to turn this sequence:
995
       *
996
       * (004) ldi     #0x2   {s}
997
       * (005) ldxms   [14]   {next}  -- optional
998
       * (006) addx     {add}
999
       * (007) tax      {tax}
1000
       * (008) ild     [x+0]    {ild}
1001
       *
1002
       * into this sequence:
1003
       *
1004
       * (004) nop
1005
       * (005) ldxms   [14]
1006
       * (006) nop
1007
       * (007) nop
1008
       * (008) ild     [x+2]
1009
       *
1010
       * XXX We need to check that X is not
1011
       * subsequently used, because we want to change
1012
       * what'll be in it after this sequence.
1013
       *
1014
       * We know we can eliminate the accumulator
1015
       * modifications earlier in the sequence since
1016
       * it is defined by the last stmt of this sequence
1017
       * (i.e., the last statement of the sequence loads
1018
       * a value into the accumulator, so we can eliminate
1019
       * earlier operations on the accumulator).
1020
       */
1021
0
      ild->s.k += s->s.k;
1022
0
      s->s.code = NOP;
1023
0
      add->s.code = NOP;
1024
0
      tax->s.code = NOP;
1025
      /*
1026
       * XXX - optimizer loop detection.
1027
       */
1028
0
      opt_state->non_branch_movement_performed = 1;
1029
0
      opt_state->done = 0;
1030
0
    }
1031
2.88M
  }
1032
  /*
1033
   * If the comparison at the end of a block is an equality
1034
   * comparison against a constant, and nobody uses the value
1035
   * we leave in the A register at the end of a block, and
1036
   * the operation preceding the comparison is an arithmetic
1037
   * operation, we can sometime optimize it away.
1038
   */
1039
1.14M
  if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1040
1.14M
      !ATOMELEM(b->out_use, A_ATOM)) {
1041
    /*
1042
     * We can optimize away certain subtractions of the
1043
     * X register.
1044
     */
1045
950k
    if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1046
16.2k
      val = b->val[X_ATOM];
1047
16.2k
      if (opt_state->vmap[val].is_const) {
1048
        /*
1049
         * If we have a subtract to do a comparison,
1050
         * and the X register is a known constant,
1051
         * we can merge this value into the
1052
         * comparison:
1053
         *
1054
         * sub x  ->  nop
1055
         * jeq #y jeq #(x+y)
1056
         */
1057
6.54k
        b->s.k += opt_state->vmap[val].const_val;
1058
6.54k
        last->s.code = NOP;
1059
        /*
1060
         * XXX - optimizer loop detection.
1061
         */
1062
6.54k
        opt_state->non_branch_movement_performed = 1;
1063
6.54k
        opt_state->done = 0;
1064
9.74k
      } else if (b->s.k == 0) {
1065
        /*
1066
         * If the X register isn't a constant,
1067
         * and the comparison in the test is
1068
         * against 0, we can compare with the
1069
         * X register, instead:
1070
         *
1071
         * sub x  ->  nop
1072
         * jeq #0 jeq x
1073
         */
1074
9.73k
        last->s.code = NOP;
1075
9.73k
        b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1076
        /*
1077
         * XXX - optimizer loop detection.
1078
         */
1079
9.73k
        opt_state->non_branch_movement_performed = 1;
1080
9.73k
        opt_state->done = 0;
1081
9.73k
      }
1082
16.2k
    }
1083
    /*
1084
     * Likewise, a constant subtract can be simplified:
1085
     *
1086
     * sub #x ->  nop
1087
     * jeq #y ->  jeq #(x+y)
1088
     */
1089
934k
    else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1090
6
      last->s.code = NOP;
1091
6
      b->s.k += last->s.k;
1092
      /*
1093
       * XXX - optimizer loop detection.
1094
       */
1095
6
      opt_state->non_branch_movement_performed = 1;
1096
6
      opt_state->done = 0;
1097
6
    }
1098
    /*
1099
     * And, similarly, a constant AND can be simplified
1100
     * if we're testing against 0, i.e.:
1101
     *
1102
     * and #k nop
1103
     * jeq #0  -> jset #k
1104
     */
1105
934k
    else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1106
934k
        b->s.k == 0) {
1107
2.24k
      b->s.k = last->s.k;
1108
2.24k
      b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1109
2.24k
      last->s.code = NOP;
1110
      /*
1111
       * XXX - optimizer loop detection.
1112
       */
1113
2.24k
      opt_state->non_branch_movement_performed = 1;
1114
2.24k
      opt_state->done = 0;
1115
2.24k
      opt_not(b);
1116
2.24k
    }
1117
950k
  }
1118
  /*
1119
   * jset #0        ->   never
1120
   * jset #ffffffff ->   always
1121
   */
1122
1.14M
  if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1123
6.26k
    if (b->s.k == 0)
1124
462
      JT(b) = JF(b);
1125
6.26k
    if (b->s.k == 0xffffffffU)
1126
3
      JF(b) = JT(b);
1127
6.26k
  }
1128
  /*
1129
   * If we're comparing against the index register, and the index
1130
   * register is a known constant, we can just compare against that
1131
   * constant.
1132
   */
1133
1.14M
  val = b->val[X_ATOM];
1134
1.14M
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1135
26.8k
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1136
26.8k
    b->s.code &= ~BPF_X;
1137
26.8k
    b->s.k = v;
1138
26.8k
  }
1139
  /*
1140
   * If the accumulator is a known constant, we can compute the
1141
   * comparison result.
1142
   */
1143
1.14M
  val = b->val[A_ATOM];
1144
1.14M
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1145
123k
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1146
123k
    switch (BPF_OP(b->s.code)) {
1147
1148
67.4k
    case BPF_JEQ:
1149
67.4k
      v = v == b->s.k;
1150
67.4k
      break;
1151
1152
28.0k
    case BPF_JGT:
1153
28.0k
      v = v > b->s.k;
1154
28.0k
      break;
1155
1156
28.4k
    case BPF_JGE:
1157
28.4k
      v = v >= b->s.k;
1158
28.4k
      break;
1159
1160
0
    case BPF_JSET:
1161
0
      v &= b->s.k;
1162
0
      break;
1163
1164
0
    default:
1165
0
      abort();
1166
123k
    }
1167
123k
    if (JF(b) != JT(b)) {
1168
      /*
1169
       * XXX - optimizer loop detection.
1170
       */
1171
57.2k
      opt_state->non_branch_movement_performed = 1;
1172
57.2k
      opt_state->done = 0;
1173
57.2k
    }
1174
123k
    if (v)
1175
27.7k
      JF(b) = JT(b);
1176
96.1k
    else
1177
96.1k
      JT(b) = JF(b);
1178
123k
  }
1179
1.14M
}
1180
1181
/*
1182
 * Compute the symbolic value of expression of 's', and update
1183
 * anything it defines in the value table 'val'.  If 'alter' is true,
1184
 * do various optimizations.  This code would be cleaner if symbolic
1185
 * evaluation and code transformations weren't folded together.
1186
 */
1187
static void
1188
opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1189
5.98M
{
1190
5.98M
  int op;
1191
5.98M
  bpf_u_int32 v;
1192
1193
5.98M
  switch (s->code) {
1194
1195
241k
  case BPF_LD|BPF_ABS|BPF_W:
1196
411k
  case BPF_LD|BPF_ABS|BPF_H:
1197
843k
  case BPF_LD|BPF_ABS|BPF_B:
1198
843k
    v = F(opt_state, s->code, s->k, 0L);
1199
843k
    vstore(s, &val[A_ATOM], v, alter);
1200
843k
    break;
1201
1202
20.0k
  case BPF_LD|BPF_IND|BPF_W:
1203
20.0k
  case BPF_LD|BPF_IND|BPF_H:
1204
92.6k
  case BPF_LD|BPF_IND|BPF_B:
1205
92.6k
    v = val[X_ATOM];
1206
92.6k
    if (alter && opt_state->vmap[v].is_const) {
1207
6.66k
      s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1208
6.66k
      s->k += opt_state->vmap[v].const_val;
1209
6.66k
      v = F(opt_state, s->code, s->k, 0L);
1210
      /*
1211
       * XXX - optimizer loop detection.
1212
       */
1213
6.66k
      opt_state->non_branch_movement_performed = 1;
1214
6.66k
      opt_state->done = 0;
1215
6.66k
    }
1216
85.9k
    else
1217
85.9k
      v = F(opt_state, s->code, s->k, v);
1218
92.6k
    vstore(s, &val[A_ATOM], v, alter);
1219
92.6k
    break;
1220
1221
15.0k
  case BPF_LD|BPF_LEN:
1222
15.0k
    v = F(opt_state, s->code, 0L, 0L);
1223
15.0k
    vstore(s, &val[A_ATOM], v, alter);
1224
15.0k
    break;
1225
1226
632k
  case BPF_LD|BPF_IMM:
1227
632k
    v = K(s->k);
1228
632k
    vstore(s, &val[A_ATOM], v, alter);
1229
632k
    break;
1230
1231
132k
  case BPF_LDX|BPF_IMM:
1232
132k
    v = K(s->k);
1233
132k
    vstore(s, &val[X_ATOM], v, alter);
1234
132k
    break;
1235
1236
0
  case BPF_LDX|BPF_MSH|BPF_B:
1237
0
    v = F(opt_state, s->code, s->k, 0L);
1238
0
    vstore(s, &val[X_ATOM], v, alter);
1239
0
    break;
1240
1241
146k
  case BPF_ALU|BPF_NEG:
1242
146k
    if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1243
30.2k
      s->code = BPF_LD|BPF_IMM;
1244
      /*
1245
       * Do this negation as unsigned arithmetic; that's
1246
       * what modern BPF engines do, and it guarantees
1247
       * that all possible values can be negated.  (Yeah,
1248
       * negating 0x80000000, the minimum signed 32-bit
1249
       * two's-complement value, results in 0x80000000,
1250
       * so it's still negative, but we *should* be doing
1251
       * all unsigned arithmetic here, to match what
1252
       * modern BPF engines do.)
1253
       *
1254
       * Express it as 0U - (unsigned value) so that we
1255
       * don't get compiler warnings about negating an
1256
       * unsigned value and don't get UBSan warnings
1257
       * about the result of negating 0x80000000 being
1258
       * undefined.
1259
       */
1260
30.2k
      s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1261
30.2k
      val[A_ATOM] = K(s->k);
1262
30.2k
    }
1263
116k
    else
1264
116k
      val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1265
146k
    break;
1266
1267
17.6k
  case BPF_ALU|BPF_ADD|BPF_K:
1268
18.4k
  case BPF_ALU|BPF_SUB|BPF_K:
1269
20.5k
  case BPF_ALU|BPF_MUL|BPF_K:
1270
21.0k
  case BPF_ALU|BPF_DIV|BPF_K:
1271
21.3k
  case BPF_ALU|BPF_MOD|BPF_K:
1272
210k
  case BPF_ALU|BPF_AND|BPF_K:
1273
213k
  case BPF_ALU|BPF_OR|BPF_K:
1274
213k
  case BPF_ALU|BPF_XOR|BPF_K:
1275
214k
  case BPF_ALU|BPF_LSH|BPF_K:
1276
214k
  case BPF_ALU|BPF_RSH|BPF_K:
1277
214k
    op = BPF_OP(s->code);
1278
214k
    if (alter) {
1279
42.8k
      if (s->k == 0) {
1280
        /*
1281
         * Optimize operations where the constant
1282
         * is zero.
1283
         *
1284
         * Don't optimize away "sub #0"
1285
         * as it may be needed later to
1286
         * fixup the generated math code.
1287
         *
1288
         * Fail if we're dividing by zero or taking
1289
         * a modulus by zero.
1290
         */
1291
1.65k
        if (op == BPF_ADD ||
1292
1.65k
            op == BPF_LSH || op == BPF_RSH ||
1293
1.65k
            op == BPF_OR || op == BPF_XOR) {
1294
349
          s->code = NOP;
1295
349
          break;
1296
349
        }
1297
1.30k
        if (op == BPF_MUL || op == BPF_AND) {
1298
788
          s->code = BPF_LD|BPF_IMM;
1299
788
          val[A_ATOM] = K(s->k);
1300
788
          break;
1301
788
        }
1302
515
        if (op == BPF_DIV)
1303
2
          opt_error(opt_state,
1304
2
              "division by zero");
1305
513
        if (op == BPF_MOD)
1306
13
          opt_error(opt_state,
1307
13
              "modulus by zero");
1308
513
      }
1309
41.6k
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1310
1.26k
        fold_op(opt_state, s, val[A_ATOM], K(s->k));
1311
1.26k
        val[A_ATOM] = K(s->k);
1312
1.26k
        break;
1313
1.26k
      }
1314
41.6k
    }
1315
211k
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1316
211k
    break;
1317
1318
25.2k
  case BPF_ALU|BPF_ADD|BPF_X:
1319
59.4k
  case BPF_ALU|BPF_SUB|BPF_X:
1320
120k
  case BPF_ALU|BPF_MUL|BPF_X:
1321
163k
  case BPF_ALU|BPF_DIV|BPF_X:
1322
215k
  case BPF_ALU|BPF_MOD|BPF_X:
1323
283k
  case BPF_ALU|BPF_AND|BPF_X:
1324
324k
  case BPF_ALU|BPF_OR|BPF_X:
1325
334k
  case BPF_ALU|BPF_XOR|BPF_X:
1326
338k
  case BPF_ALU|BPF_LSH|BPF_X:
1327
344k
  case BPF_ALU|BPF_RSH|BPF_X:
1328
344k
    op = BPF_OP(s->code);
1329
344k
    if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1330
63.9k
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1331
59.7k
        fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1332
59.7k
        val[A_ATOM] = K(s->k);
1333
59.7k
      }
1334
4.26k
      else {
1335
4.26k
        s->code = BPF_ALU|BPF_K|op;
1336
4.26k
        s->k = opt_state->vmap[val[X_ATOM]].const_val;
1337
4.26k
        if ((op == BPF_LSH || op == BPF_RSH) &&
1338
4.26k
            s->k > 31)
1339
7
          opt_error(opt_state,
1340
7
              "shift by more than 31 bits");
1341
        /*
1342
         * XXX - optimizer loop detection.
1343
         */
1344
4.25k
        opt_state->non_branch_movement_performed = 1;
1345
4.25k
        opt_state->done = 0;
1346
4.25k
        val[A_ATOM] =
1347
4.25k
          F(opt_state, s->code, val[A_ATOM], K(s->k));
1348
4.25k
      }
1349
63.9k
      break;
1350
63.9k
    }
1351
    /*
1352
     * Check if we're doing something to an accumulator
1353
     * that is 0, and simplify.  This may not seem like
1354
     * much of a simplification but it could open up further
1355
     * optimizations.
1356
     * XXX We could also check for mul by 1, etc.
1357
     */
1358
280k
    if (alter && opt_state->vmap[val[A_ATOM]].is_const
1359
280k
        && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1360
537
      if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1361
130
        s->code = BPF_MISC|BPF_TXA;
1362
130
        vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1363
130
        break;
1364
130
      }
1365
407
      else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1366
407
         op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1367
305
        s->code = BPF_LD|BPF_IMM;
1368
305
        s->k = 0;
1369
305
        vstore(s, &val[A_ATOM], K(s->k), alter);
1370
305
        break;
1371
305
      }
1372
102
      else if (op == BPF_NEG) {
1373
0
        s->code = NOP;
1374
0
        break;
1375
0
      }
1376
537
    }
1377
279k
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1378
279k
    break;
1379
1380
2.06k
  case BPF_MISC|BPF_TXA:
1381
2.06k
    vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1382
2.06k
    break;
1383
1384
612k
  case BPF_LD|BPF_MEM:
1385
612k
    v = val[s->k];
1386
612k
    if (alter && opt_state->vmap[v].is_const) {
1387
117k
      s->code = BPF_LD|BPF_IMM;
1388
117k
      s->k = opt_state->vmap[v].const_val;
1389
      /*
1390
       * XXX - optimizer loop detection.
1391
       */
1392
117k
      opt_state->non_branch_movement_performed = 1;
1393
117k
      opt_state->done = 0;
1394
117k
    }
1395
612k
    vstore(s, &val[A_ATOM], v, alter);
1396
612k
    break;
1397
1398
214k
  case BPF_MISC|BPF_TAX:
1399
214k
    vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1400
214k
    break;
1401
1402
207k
  case BPF_LDX|BPF_MEM:
1403
207k
    v = val[s->k];
1404
207k
    if (alter && opt_state->vmap[v].is_const) {
1405
6.66k
      s->code = BPF_LDX|BPF_IMM;
1406
6.66k
      s->k = opt_state->vmap[v].const_val;
1407
      /*
1408
       * XXX - optimizer loop detection.
1409
       */
1410
6.66k
      opt_state->non_branch_movement_performed = 1;
1411
6.66k
      opt_state->done = 0;
1412
6.66k
    }
1413
207k
    vstore(s, &val[X_ATOM], v, alter);
1414
207k
    break;
1415
1416
751k
  case BPF_ST:
1417
751k
    vstore(s, &val[s->k], val[A_ATOM], alter);
1418
751k
    break;
1419
1420
0
  case BPF_STX:
1421
0
    vstore(s, &val[s->k], val[X_ATOM], alter);
1422
0
    break;
1423
5.98M
  }
1424
5.98M
}
1425
1426
static void
1427
deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1428
7.19M
{
1429
7.19M
  register int atom;
1430
1431
7.19M
  atom = atomuse(s);
1432
7.19M
  if (atom >= 0) {
1433
3.45M
    if (atom == AX_ATOM) {
1434
332k
      last[X_ATOM] = 0;
1435
332k
      last[A_ATOM] = 0;
1436
332k
    }
1437
3.12M
    else
1438
3.12M
      last[atom] = 0;
1439
3.45M
  }
1440
7.19M
  atom = atomdef(s);
1441
7.19M
  if (atom >= 0) {
1442
3.96M
    if (last[atom]) {
1443
      /*
1444
       * XXX - optimizer loop detection.
1445
       */
1446
326k
      opt_state->non_branch_movement_performed = 1;
1447
326k
      opt_state->done = 0;
1448
326k
      last[atom]->code = NOP;
1449
326k
    }
1450
3.96M
    last[atom] = s;
1451
3.96M
  }
1452
7.19M
}
1453
1454
static void
1455
opt_deadstores(opt_state_t *opt_state, register struct block *b)
1456
1.30M
{
1457
1.30M
  register struct slist *s;
1458
1.30M
  register int atom;
1459
1.30M
  struct stmt *last[N_ATOMS];
1460
1461
1.30M
  memset((char *)last, 0, sizeof last);
1462
1463
7.19M
  for (s = b->stmts; s != 0; s = s->next)
1464
5.88M
    deadstmt(opt_state, &s->s, last);
1465
1.30M
  deadstmt(opt_state, &b->s, last);
1466
1467
24.8M
  for (atom = 0; atom < N_ATOMS; ++atom)
1468
23.5M
    if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1469
163k
      last[atom]->code = NOP;
1470
      /*
1471
       * XXX - optimizer loop detection.
1472
       */
1473
163k
      opt_state->non_branch_movement_performed = 1;
1474
163k
      opt_state->done = 0;
1475
163k
    }
1476
1.30M
}
1477
1478
static void
1479
opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1480
1.41M
{
1481
1.41M
  struct slist *s;
1482
1.41M
  struct edge *p;
1483
1.41M
  int i;
1484
1.41M
  bpf_u_int32 aval, xval;
1485
1486
#if 0
1487
  for (s = b->stmts; s && s->next; s = s->next)
1488
    if (BPF_CLASS(s->s.code) == BPF_JMP) {
1489
      do_stmts = 0;
1490
      break;
1491
    }
1492
#endif
1493
1494
  /*
1495
   * Initialize the atom values.
1496
   */
1497
1.41M
  p = b->in_edges;
1498
1.41M
  if (p == 0) {
1499
    /*
1500
     * We have no predecessors, so everything is undefined
1501
     * upon entry to this block.
1502
     */
1503
147k
    memset((char *)b->val, 0, sizeof(b->val));
1504
1.26M
  } else {
1505
    /*
1506
     * Inherit values from our predecessors.
1507
     *
1508
     * First, get the values from the predecessor along the
1509
     * first edge leading to this node.
1510
     */
1511
1.26M
    memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1512
    /*
1513
     * Now look at all the other nodes leading to this node.
1514
     * If, for the predecessor along that edge, a register
1515
     * has a different value from the one we have (i.e.,
1516
     * control paths are merging, and the merging paths
1517
     * assign different values to that register), give the
1518
     * register the undefined value of 0.
1519
     */
1520
2.34M
    while ((p = p->next) != NULL) {
1521
20.4M
      for (i = 0; i < N_ATOMS; ++i)
1522
19.3M
        if (b->val[i] != p->pred->val[i])
1523
1.20M
          b->val[i] = 0;
1524
1.07M
    }
1525
1.26M
  }
1526
1.41M
  aval = b->val[A_ATOM];
1527
1.41M
  xval = b->val[X_ATOM];
1528
7.40M
  for (s = b->stmts; s; s = s->next)
1529
5.98M
    opt_stmt(opt_state, &s->s, b->val, do_stmts);
1530
1531
  /*
1532
   * This is a special case: if we don't use anything from this
1533
   * block, and we load the accumulator or index register with a
1534
   * value that is already there, or if this block is a return,
1535
   * eliminate all the statements.
1536
   *
1537
   * XXX - what if it does a store?  Presumably that falls under
1538
   * the heading of "if we don't use anything from this block",
1539
   * i.e., if we use any memory location set to a different
1540
   * value by this block, then we use something from this block.
1541
   *
1542
   * XXX - why does it matter whether we use anything from this
1543
   * block?  If the accumulator or index register doesn't change
1544
   * its value, isn't that OK even if we use that value?
1545
   *
1546
   * XXX - if we load the accumulator with a different value,
1547
   * and the block ends with a conditional branch, we obviously
1548
   * can't eliminate it, as the branch depends on that value.
1549
   * For the index register, the conditional branch only depends
1550
   * on the index register value if the test is against the index
1551
   * register value rather than a constant; if nothing uses the
1552
   * value we put into the index register, and we're not testing
1553
   * against the index register's value, and there aren't any
1554
   * other problems that would keep us from eliminating this
1555
   * block, can we eliminate it?
1556
   */
1557
1.41M
  if (do_stmts &&
1558
1.41M
      ((b->out_use == 0 &&
1559
379k
        aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1560
379k
        xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1561
379k
       BPF_CLASS(b->s.code) == BPF_RET)) {
1562
108k
    if (b->stmts != 0) {
1563
12.0k
      b->stmts = 0;
1564
      /*
1565
       * XXX - optimizer loop detection.
1566
       */
1567
12.0k
      opt_state->non_branch_movement_performed = 1;
1568
12.0k
      opt_state->done = 0;
1569
12.0k
    }
1570
1.30M
  } else {
1571
1.30M
    opt_peep(opt_state, b);
1572
1.30M
    opt_deadstores(opt_state, b);
1573
1.30M
  }
1574
  /*
1575
   * Set up values for branch optimizer.
1576
   */
1577
1.41M
  if (BPF_SRC(b->s.code) == BPF_K)
1578
1.34M
    b->oval = K(b->s.k);
1579
72.2k
  else
1580
72.2k
    b->oval = b->val[X_ATOM];
1581
1.41M
  b->et.code = b->s.code;
1582
1.41M
  b->ef.code = -b->s.code;
1583
1.41M
}
1584
1585
/*
1586
 * Return true if any register that is used on exit from 'succ', has
1587
 * an exit value that is different from the corresponding exit value
1588
 * from 'b'.
1589
 */
1590
static int
1591
use_conflict(struct block *b, struct block *succ)
1592
728k
{
1593
728k
  int atom;
1594
728k
  atomset use = succ->out_use;
1595
1596
728k
  if (use == 0)
1597
688k
    return 0;
1598
1599
616k
  for (atom = 0; atom < N_ATOMS; ++atom)
1600
589k
    if (ATOMELEM(use, atom))
1601
40.1k
      if (b->val[atom] != succ->val[atom])
1602
13.2k
        return 1;
1603
26.9k
  return 0;
1604
40.1k
}
1605
1606
/*
1607
 * Given a block that is the successor of an edge, and an edge that
1608
 * dominates that edge, return either a pointer to a child of that
1609
 * block (a block to which that block jumps) if that block is a
1610
 * candidate to replace the successor of the latter edge or NULL
1611
 * if neither of the children of the first block are candidates.
1612
 */
1613
static struct block *
1614
fold_edge(struct block *child, struct edge *ep)
1615
4.30M
{
1616
4.30M
  int sense;
1617
4.30M
  bpf_u_int32 aval0, aval1, oval0, oval1;
1618
4.30M
  int code = ep->code;
1619
1620
4.30M
  if (code < 0) {
1621
    /*
1622
     * This edge is a "branch if false" edge.
1623
     */
1624
1.78M
    code = -code;
1625
1.78M
    sense = 0;
1626
2.51M
  } else {
1627
    /*
1628
     * This edge is a "branch if true" edge.
1629
     */
1630
2.51M
    sense = 1;
1631
2.51M
  }
1632
1633
  /*
1634
   * If the opcode for the branch at the end of the block we
1635
   * were handed isn't the same as the opcode for the branch
1636
   * to which the edge we were handed corresponds, the tests
1637
   * for those branches aren't testing the same conditions,
1638
   * so the blocks to which the first block branches aren't
1639
   * candidates to replace the successor of the edge.
1640
   */
1641
4.30M
  if (child->s.code != code)
1642
1.50M
    return 0;
1643
1644
2.79M
  aval0 = child->val[A_ATOM];
1645
2.79M
  oval0 = child->oval;
1646
2.79M
  aval1 = ep->pred->val[A_ATOM];
1647
2.79M
  oval1 = ep->pred->oval;
1648
1649
  /*
1650
   * If the A register value on exit from the successor block
1651
   * isn't the same as the A register value on exit from the
1652
   * predecessor of the edge, the blocks to which the first
1653
   * block branches aren't candidates to replace the successor
1654
   * of the edge.
1655
   */
1656
2.79M
  if (aval0 != aval1)
1657
1.86M
    return 0;
1658
1659
931k
  if (oval0 == oval1)
1660
    /*
1661
     * The operands of the branch instructions are
1662
     * identical, so the branches are testing the
1663
     * same condition, and the result is true if a true
1664
     * branch was taken to get here, otherwise false.
1665
     */
1666
423k
    return sense ? JT(child) : JF(child);
1667
1668
507k
  if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1669
    /*
1670
     * At this point, we only know the comparison if we
1671
     * came down the true branch, and it was an equality
1672
     * comparison with a constant.
1673
     *
1674
     * I.e., if we came down the true branch, and the branch
1675
     * was an equality comparison with a constant, we know the
1676
     * accumulator contains that constant.  If we came down
1677
     * the false branch, or the comparison wasn't with a
1678
     * constant, we don't know what was in the accumulator.
1679
     *
1680
     * We rely on the fact that distinct constants have distinct
1681
     * value numbers.
1682
     */
1683
104k
    return JF(child);
1684
1685
403k
  return 0;
1686
507k
}
1687
1688
/*
1689
 * If we can make this edge go directly to a child of the edge's current
1690
 * successor, do so.
1691
 */
1692
static void
1693
opt_j(opt_state_t *opt_state, struct edge *ep)
1694
1.76M
{
1695
1.76M
  register u_int i, k;
1696
1.76M
  register struct block *target;
1697
1698
  /*
1699
   * Does this edge go to a block where, if the test
1700
   * at the end of it succeeds, it goes to a block
1701
   * that's a leaf node of the DAG, i.e. a return
1702
   * statement?
1703
   * If so, there's nothing to optimize.
1704
   */
1705
1.76M
  if (JT(ep->succ) == 0)
1706
579k
    return;
1707
1708
  /*
1709
   * Does this edge go to a block that goes, in turn, to
1710
   * the same block regardless of whether the test at the
1711
   * end succeeds or fails?
1712
   */
1713
1.18M
  if (JT(ep->succ) == JF(ep->succ)) {
1714
    /*
1715
     * Common branch targets can be eliminated, provided
1716
     * there is no data dependency.
1717
     *
1718
     * Check whether any register used on exit from the
1719
     * block to which the successor of this edge goes
1720
     * has a value at that point that's different from
1721
     * the value it has on exit from the predecessor of
1722
     * this edge.  If not, the predecessor of this edge
1723
     * can just go to the block to which the successor
1724
     * of this edge goes, bypassing the successor of this
1725
     * edge, as the successor of this edge isn't doing
1726
     * any calculations whose results are different
1727
     * from what the blocks before it did and isn't
1728
     * doing any tests the results of which matter.
1729
     */
1730
200k
    if (!use_conflict(ep->pred, JT(ep->succ))) {
1731
      /*
1732
       * No, there isn't.
1733
       * Make this edge go to the block to
1734
       * which the successor of that edge
1735
       * goes.
1736
       *
1737
       * XXX - optimizer loop detection.
1738
       */
1739
192k
      opt_state->non_branch_movement_performed = 1;
1740
192k
      opt_state->done = 0;
1741
192k
      ep->succ = JT(ep->succ);
1742
192k
    }
1743
200k
  }
1744
  /*
1745
   * For each edge dominator that matches the successor of this
1746
   * edge, promote the edge successor to the its grandchild.
1747
   *
1748
   * XXX We violate the set abstraction here in favor a reasonably
1749
   * efficient loop.
1750
   */
1751
1.56M
 top:
1752
7.88M
  for (i = 0; i < opt_state->edgewords; ++i) {
1753
    /* i'th word in the bitset of dominators */
1754
6.84M
    register bpf_u_int32 x = ep->edom[i];
1755
1756
10.6M
    while (x != 0) {
1757
      /* Find the next dominator in that word and mark it as found */
1758
4.30M
      k = lowest_set_bit(x);
1759
4.30M
      x &=~ ((bpf_u_int32)1 << k);
1760
4.30M
      k += i * BITS_PER_WORD;
1761
1762
4.30M
      target = fold_edge(ep->succ, opt_state->edges[k]);
1763
      /*
1764
       * We have a candidate to replace the successor
1765
       * of ep.
1766
       *
1767
       * Check that there is no data dependency between
1768
       * nodes that will be violated if we move the edge;
1769
       * i.e., if any register used on exit from the
1770
       * candidate has a value at that point different
1771
       * from the value it has when we exit the
1772
       * predecessor of that edge, there's a data
1773
       * dependency that will be violated.
1774
       */
1775
4.30M
      if (target != 0 && !use_conflict(ep->pred, target)) {
1776
        /*
1777
         * It's safe to replace the successor of
1778
         * ep; do so, and note that we've made
1779
         * at least one change.
1780
         *
1781
         * XXX - this is one of the operations that
1782
         * happens when the optimizer gets into
1783
         * one of those infinite loops.
1784
         */
1785
523k
        opt_state->done = 0;
1786
523k
        ep->succ = target;
1787
523k
        if (JT(target) != 0)
1788
          /*
1789
           * Start over unless we hit a leaf.
1790
           */
1791
375k
          goto top;
1792
147k
        return;
1793
523k
      }
1794
4.30M
    }
1795
6.84M
  }
1796
1.56M
}
1797
1798
/*
1799
 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1800
 * "Predicate Assertion Propagation" in the BPF+ paper?
1801
 *
1802
 * Note that this looks at block dominators, not edge dominators.
1803
 * Don't think so.
1804
 *
1805
 * "A or B" compiles into
1806
 *
1807
 *          A
1808
 *       t / \ f
1809
 *        /   B
1810
 *       / t / \ f
1811
 *      \   /
1812
 *       \ /
1813
 *        X
1814
 *
1815
 *
1816
 */
1817
static void
1818
or_pullup(opt_state_t *opt_state, struct block *b)
1819
884k
{
1820
884k
  bpf_u_int32 val;
1821
884k
  int at_top;
1822
884k
  struct block *pull;
1823
884k
  struct block **diffp, **samep;
1824
884k
  struct edge *ep;
1825
1826
884k
  ep = b->in_edges;
1827
884k
  if (ep == 0)
1828
294k
    return;
1829
1830
  /*
1831
   * Make sure each predecessor loads the same value.
1832
   * XXX why?
1833
   */
1834
589k
  val = ep->pred->val[A_ATOM];
1835
676k
  for (ep = ep->next; ep != 0; ep = ep->next)
1836
197k
    if (val != ep->pred->val[A_ATOM])
1837
110k
      return;
1838
1839
  /*
1840
   * For the first edge in the list of edges coming into this block,
1841
   * see whether the predecessor of that edge comes here via a true
1842
   * branch or a false branch.
1843
   */
1844
478k
  if (JT(b->in_edges->pred) == b)
1845
266k
    diffp = &JT(b->in_edges->pred); /* jt */
1846
212k
  else
1847
212k
    diffp = &JF(b->in_edges->pred);  /* jf */
1848
1849
  /*
1850
   * diffp is a pointer to a pointer to the block.
1851
   *
1852
   * Go down the false chain looking as far as you can,
1853
   * making sure that each jump-compare is doing the
1854
   * same as the original block.
1855
   *
1856
   * If you reach the bottom before you reach a
1857
   * different jump-compare, just exit.  There's nothing
1858
   * to do here.  XXX - no, this version is checking for
1859
   * the value leaving the block; that's from the BPF+
1860
   * pullup routine.
1861
   */
1862
478k
  at_top = 1;
1863
678k
  for (;;) {
1864
    /*
1865
     * Done if that's not going anywhere XXX
1866
     */
1867
678k
    if (*diffp == 0)
1868
0
      return;
1869
1870
    /*
1871
     * Done if that predecessor blah blah blah isn't
1872
     * going the same place we're going XXX
1873
     *
1874
     * Does the true edge of this block point to the same
1875
     * location as the true edge of b?
1876
     */
1877
678k
    if (JT(*diffp) != JT(b))
1878
138k
      return;
1879
1880
    /*
1881
     * Done if this node isn't a dominator of that
1882
     * node blah blah blah XXX
1883
     *
1884
     * Does b dominate diffp?
1885
     */
1886
539k
    if (!SET_MEMBER((*diffp)->dom, b->id))
1887
1.99k
      return;
1888
1889
    /*
1890
     * Break out of the loop if that node's value of A
1891
     * isn't the value of A above XXX
1892
     */
1893
537k
    if ((*diffp)->val[A_ATOM] != val)
1894
338k
      break;
1895
1896
    /*
1897
     * Get the JF for that node XXX
1898
     * Go down the false path.
1899
     */
1900
199k
    diffp = &JF(*diffp);
1901
199k
    at_top = 0;
1902
199k
  }
1903
1904
  /*
1905
   * Now that we've found a different jump-compare in a chain
1906
   * below b, search further down until we find another
1907
   * jump-compare that looks at the original value.  This
1908
   * jump-compare should get pulled up.  XXX again we're
1909
   * comparing values not jump-compares.
1910
   */
1911
338k
  samep = &JF(*diffp);
1912
414k
  for (;;) {
1913
    /*
1914
     * Done if that's not going anywhere XXX
1915
     */
1916
414k
    if (*samep == 0)
1917
0
      return;
1918
1919
    /*
1920
     * Done if that predecessor blah blah blah isn't
1921
     * going the same place we're going XXX
1922
     */
1923
414k
    if (JT(*samep) != JT(b))
1924
315k
      return;
1925
1926
    /*
1927
     * Done if this node isn't a dominator of that
1928
     * node blah blah blah XXX
1929
     *
1930
     * Does b dominate samep?
1931
     */
1932
98.7k
    if (!SET_MEMBER((*samep)->dom, b->id))
1933
19.7k
      return;
1934
1935
    /*
1936
     * Break out of the loop if that node's value of A
1937
     * is the value of A above XXX
1938
     */
1939
78.9k
    if ((*samep)->val[A_ATOM] == val)
1940
3.09k
      break;
1941
1942
    /* XXX Need to check that there are no data dependencies
1943
       between dp0 and dp1.  Currently, the code generator
1944
       will not produce such dependencies. */
1945
75.8k
    samep = &JF(*samep);
1946
75.8k
  }
1947
#ifdef notdef
1948
  /* XXX This doesn't cover everything. */
1949
  for (i = 0; i < N_ATOMS; ++i)
1950
    if ((*samep)->val[i] != pred->val[i])
1951
      return;
1952
#endif
1953
  /* Pull up the node. */
1954
3.09k
  pull = *samep;
1955
3.09k
  *samep = JF(pull);
1956
3.09k
  JF(pull) = *diffp;
1957
1958
  /*
1959
   * At the top of the chain, each predecessor needs to point at the
1960
   * pulled up node.  Inside the chain, there is only one predecessor
1961
   * to worry about.
1962
   */
1963
3.09k
  if (at_top) {
1964
7.04k
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
1965
4.15k
      if (JT(ep->pred) == b)
1966
1.45k
        JT(ep->pred) = pull;
1967
2.69k
      else
1968
2.69k
        JF(ep->pred) = pull;
1969
4.15k
    }
1970
2.89k
  }
1971
201
  else
1972
201
    *diffp = pull;
1973
1974
  /*
1975
   * XXX - this is one of the operations that happens when the
1976
   * optimizer gets into one of those infinite loops.
1977
   */
1978
3.09k
  opt_state->done = 0;
1979
3.09k
}
1980
1981
static void
1982
and_pullup(opt_state_t *opt_state, struct block *b)
1983
884k
{
1984
884k
  bpf_u_int32 val;
1985
884k
  int at_top;
1986
884k
  struct block *pull;
1987
884k
  struct block **diffp, **samep;
1988
884k
  struct edge *ep;
1989
1990
884k
  ep = b->in_edges;
1991
884k
  if (ep == 0)
1992
294k
    return;
1993
1994
  /*
1995
   * Make sure each predecessor loads the same value.
1996
   */
1997
589k
  val = ep->pred->val[A_ATOM];
1998
676k
  for (ep = ep->next; ep != 0; ep = ep->next)
1999
197k
    if (val != ep->pred->val[A_ATOM])
2000
110k
      return;
2001
2002
478k
  if (JT(b->in_edges->pred) == b)
2003
266k
    diffp = &JT(b->in_edges->pred);
2004
212k
  else
2005
212k
    diffp = &JF(b->in_edges->pred);
2006
2007
478k
  at_top = 1;
2008
622k
  for (;;) {
2009
622k
    if (*diffp == 0)
2010
0
      return;
2011
2012
622k
    if (JF(*diffp) != JF(b))
2013
103k
      return;
2014
2015
518k
    if (!SET_MEMBER((*diffp)->dom, b->id))
2016
15.0k
      return;
2017
2018
503k
    if ((*diffp)->val[A_ATOM] != val)
2019
360k
      break;
2020
2021
143k
    diffp = &JT(*diffp);
2022
143k
    at_top = 0;
2023
143k
  }
2024
360k
  samep = &JT(*diffp);
2025
422k
  for (;;) {
2026
422k
    if (*samep == 0)
2027
0
      return;
2028
2029
422k
    if (JF(*samep) != JF(b))
2030
349k
      return;
2031
2032
72.8k
    if (!SET_MEMBER((*samep)->dom, b->id))
2033
9.44k
      return;
2034
2035
63.4k
    if ((*samep)->val[A_ATOM] == val)
2036
1.51k
      break;
2037
2038
    /* XXX Need to check that there are no data dependencies
2039
       between diffp and samep.  Currently, the code generator
2040
       will not produce such dependencies. */
2041
61.9k
    samep = &JT(*samep);
2042
61.9k
  }
2043
#ifdef notdef
2044
  /* XXX This doesn't cover everything. */
2045
  for (i = 0; i < N_ATOMS; ++i)
2046
    if ((*samep)->val[i] != pred->val[i])
2047
      return;
2048
#endif
2049
  /* Pull up the node. */
2050
1.51k
  pull = *samep;
2051
1.51k
  *samep = JT(pull);
2052
1.51k
  JT(pull) = *diffp;
2053
2054
  /*
2055
   * At the top of the chain, each predecessor needs to point at the
2056
   * pulled up node.  Inside the chain, there is only one predecessor
2057
   * to worry about.
2058
   */
2059
1.51k
  if (at_top) {
2060
3.21k
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
2061
1.70k
      if (JT(ep->pred) == b)
2062
629
        JT(ep->pred) = pull;
2063
1.07k
      else
2064
1.07k
        JF(ep->pred) = pull;
2065
1.70k
    }
2066
1.51k
  }
2067
5
  else
2068
5
    *diffp = pull;
2069
2070
  /*
2071
   * XXX - this is one of the operations that happens when the
2072
   * optimizer gets into one of those infinite loops.
2073
   */
2074
1.51k
  opt_state->done = 0;
2075
1.51k
}
2076
2077
static void
2078
opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2079
147k
{
2080
147k
  int i, maxlevel;
2081
147k
  struct block *p;
2082
2083
147k
  init_val(opt_state);
2084
147k
  maxlevel = ic->root->level;
2085
2086
147k
  find_inedges(opt_state, ic->root);
2087
1.38M
  for (i = maxlevel; i >= 0; --i)
2088
2.65M
    for (p = opt_state->levels[i]; p; p = p->link)
2089
1.41M
      opt_blk(opt_state, p, do_stmts);
2090
2091
147k
  if (do_stmts)
2092
    /*
2093
     * No point trying to move branches; it can't possibly
2094
     * make a difference at this point.
2095
     *
2096
     * XXX - this might be after we detect a loop where
2097
     * we were just looping infinitely moving branches
2098
     * in such a fashion that we went through two or more
2099
     * versions of the machine code, eventually returning
2100
     * to the first version.  (We're really not doing a
2101
     * full loop detection, we're just testing for two
2102
     * passes in a row where we do nothing but
2103
     * move branches.)
2104
     */
2105
61.5k
    return;
2106
2107
  /*
2108
   * Is this what the BPF+ paper describes in sections 6.1.1,
2109
   * 6.1.2, and 6.1.3?
2110
   */
2111
912k
  for (i = 1; i <= maxlevel; ++i) {
2112
1.71M
    for (p = opt_state->levels[i]; p; p = p->link) {
2113
884k
      opt_j(opt_state, &p->et);
2114
884k
      opt_j(opt_state, &p->ef);
2115
884k
    }
2116
827k
  }
2117
2118
85.9k
  find_inedges(opt_state, ic->root);
2119
912k
  for (i = 1; i <= maxlevel; ++i) {
2120
1.71M
    for (p = opt_state->levels[i]; p; p = p->link) {
2121
884k
      or_pullup(opt_state, p);
2122
884k
      and_pullup(opt_state, p);
2123
884k
    }
2124
827k
  }
2125
85.9k
}
2126
2127
static inline void
2128
link_inedge(struct edge *parent, struct block *child)
2129
4.12M
{
2130
4.12M
  parent->next = child->in_edges;
2131
4.12M
  child->in_edges = parent;
2132
4.12M
}
2133
2134
static void
2135
find_inedges(opt_state_t *opt_state, struct block *root)
2136
231k
{
2137
231k
  u_int i;
2138
231k
  int level;
2139
231k
  struct block *b;
2140
2141
4.73M
  for (i = 0; i < opt_state->n_blocks; ++i)
2142
4.50M
    opt_state->blocks[i]->in_edges = 0;
2143
2144
  /*
2145
   * Traverse the graph, adding each edge to the predecessor
2146
   * list of its successors.  Skip the leaves (i.e. level 0).
2147
   */
2148
2.15M
  for (level = root->level; level > 0; --level) {
2149
3.97M
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
2150
2.06M
      link_inedge(&b->et, JT(b));
2151
2.06M
      link_inedge(&b->ef, JF(b));
2152
2.06M
    }
2153
1.91M
  }
2154
231k
}
2155
2156
static void
2157
opt_root(struct block **b)
2158
28.7k
{
2159
28.7k
  struct slist *tmp, *s;
2160
2161
28.7k
  s = (*b)->stmts;
2162
28.7k
  (*b)->stmts = 0;
2163
47.8k
  while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2164
19.1k
    *b = JT(*b);
2165
2166
28.7k
  tmp = (*b)->stmts;
2167
28.7k
  if (tmp != 0)
2168
3.25k
    sappend(s, tmp);
2169
28.7k
  (*b)->stmts = s;
2170
2171
  /*
2172
   * If the root node is a return, then there is no
2173
   * point executing any statements (since the bpf machine
2174
   * has no side effects).
2175
   */
2176
28.7k
  if (BPF_CLASS((*b)->s.code) == BPF_RET)
2177
15.4k
    (*b)->stmts = 0;
2178
28.7k
}
2179
2180
static void
2181
opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2182
60.2k
{
2183
2184
#ifdef BDEBUG
2185
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2186
    printf("opt_loop(root, %d) begin\n", do_stmts);
2187
    opt_dump(opt_state, ic);
2188
  }
2189
#endif
2190
2191
  /*
2192
   * XXX - optimizer loop detection.
2193
   */
2194
60.2k
  int loop_count = 0;
2195
147k
  for (;;) {
2196
147k
    opt_state->done = 1;
2197
    /*
2198
     * XXX - optimizer loop detection.
2199
     */
2200
147k
    opt_state->non_branch_movement_performed = 0;
2201
147k
    find_levels(opt_state, ic);
2202
147k
    find_dom(opt_state, ic->root);
2203
147k
    find_closure(opt_state, ic->root);
2204
147k
    find_ud(opt_state, ic->root);
2205
147k
    find_edom(opt_state, ic->root);
2206
147k
    opt_blks(opt_state, ic, do_stmts);
2207
#ifdef BDEBUG
2208
    if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2209
      printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2210
      opt_dump(opt_state, ic);
2211
    }
2212
#endif
2213
2214
    /*
2215
     * Was anything done in this optimizer pass?
2216
     */
2217
147k
    if (opt_state->done) {
2218
      /*
2219
       * No, so we've reached a fixed point.
2220
       * We're done.
2221
       */
2222
58.8k
      break;
2223
58.8k
    }
2224
2225
    /*
2226
     * XXX - was anything done other than branch movement
2227
     * in this pass?
2228
     */
2229
88.6k
    if (opt_state->non_branch_movement_performed) {
2230
      /*
2231
       * Yes.  Clear any loop-detection counter;
2232
       * we're making some form of progress (assuming
2233
       * we can't get into a cycle doing *other*
2234
       * optimizations...).
2235
       */
2236
81.2k
      loop_count = 0;
2237
81.2k
    } else {
2238
      /*
2239
       * No - increment the counter, and quit if
2240
       * it's up to 100.
2241
       */
2242
7.42k
      loop_count++;
2243
7.42k
      if (loop_count >= 100) {
2244
        /*
2245
         * We've done nothing but branch movement
2246
         * for 100 passes; we're probably
2247
         * in a cycle and will never reach a
2248
         * fixed point.
2249
         *
2250
         * XXX - yes, we really need a non-
2251
         * heuristic way of detecting a cycle.
2252
         */
2253
0
        opt_state->done = 1;
2254
0
        break;
2255
0
      }
2256
7.42k
    }
2257
88.6k
  }
2258
60.2k
}
2259
2260
/*
2261
 * Optimize the filter code in its dag representation.
2262
 * Return 0 on success, -1 on error.
2263
 */
2264
int
2265
bpf_optimize(struct icode *ic, char *errbuf)
2266
30.1k
{
2267
30.1k
  opt_state_t opt_state;
2268
2269
30.1k
  memset(&opt_state, 0, sizeof(opt_state));
2270
30.1k
  opt_state.errbuf = errbuf;
2271
30.1k
  opt_state.non_branch_movement_performed = 0;
2272
30.1k
  if (setjmp(opt_state.top_ctx)) {
2273
1.42k
    opt_cleanup(&opt_state);
2274
1.42k
    return -1;
2275
1.42k
  }
2276
28.7k
  opt_init(&opt_state, ic);
2277
28.7k
  opt_loop(&opt_state, ic, 0);
2278
28.7k
  opt_loop(&opt_state, ic, 1);
2279
28.7k
  intern_blocks(&opt_state, ic);
2280
#ifdef BDEBUG
2281
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2282
    printf("after intern_blocks()\n");
2283
    opt_dump(&opt_state, ic);
2284
  }
2285
#endif
2286
28.7k
  opt_root(&ic->root);
2287
#ifdef BDEBUG
2288
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2289
    printf("after opt_root()\n");
2290
    opt_dump(&opt_state, ic);
2291
  }
2292
#endif
2293
28.7k
  opt_cleanup(&opt_state);
2294
28.7k
  return 0;
2295
30.1k
}
2296
2297
static void
2298
make_marks(struct icode *ic, struct block *p)
2299
632k
{
2300
632k
  if (!isMarked(ic, p)) {
2301
347k
    Mark(ic, p);
2302
347k
    if (BPF_CLASS(p->s.code) != BPF_RET) {
2303
299k
      make_marks(ic, JT(p));
2304
299k
      make_marks(ic, JF(p));
2305
299k
    }
2306
347k
  }
2307
632k
}
2308
2309
/*
2310
 * Mark code array such that isMarked(ic->cur_mark, i) is true
2311
 * only for nodes that are alive.
2312
 */
2313
static void
2314
mark_code(struct icode *ic)
2315
33.0k
{
2316
33.0k
  ic->cur_mark += 1;
2317
33.0k
  make_marks(ic, ic->root);
2318
33.0k
}
2319
2320
/*
2321
 * True iff the two stmt lists load the same value from the packet into
2322
 * the accumulator.
2323
 */
2324
static int
2325
eq_slist(struct slist *x, struct slist *y)
2326
15.1k
{
2327
23.7k
  for (;;) {
2328
27.2k
    while (x && x->s.code == NOP)
2329
3.55k
      x = x->next;
2330
27.4k
    while (y && y->s.code == NOP)
2331
3.73k
      y = y->next;
2332
23.7k
    if (x == 0)
2333
6.43k
      return y == 0;
2334
17.2k
    if (y == 0)
2335
153
      return x == 0;
2336
17.1k
    if (x->s.code != y->s.code || x->s.k != y->s.k)
2337
8.54k
      return 0;
2338
8.57k
    x = x->next;
2339
8.57k
    y = y->next;
2340
8.57k
  }
2341
15.1k
}
2342
2343
static inline int
2344
eq_blk(struct block *b0, struct block *b1)
2345
9.73M
{
2346
9.73M
  if (b0->s.code == b1->s.code &&
2347
9.73M
      b0->s.k == b1->s.k &&
2348
9.73M
      b0->et.succ == b1->et.succ &&
2349
9.73M
      b0->ef.succ == b1->ef.succ)
2350
15.1k
    return eq_slist(b0->stmts, b1->stmts);
2351
9.71M
  return 0;
2352
9.73M
}
2353
2354
static void
2355
intern_blocks(opt_state_t *opt_state, struct icode *ic)
2356
28.7k
{
2357
28.7k
  struct block *p;
2358
28.7k
  u_int i, j;
2359
28.7k
  int done1; /* don't shadow global */
2360
33.0k
 top:
2361
33.0k
  done1 = 1;
2362
915k
  for (i = 0; i < opt_state->n_blocks; ++i)
2363
882k
    opt_state->blocks[i]->link = 0;
2364
2365
33.0k
  mark_code(ic);
2366
2367
882k
  for (i = opt_state->n_blocks - 1; i != 0; ) {
2368
849k
    --i;
2369
849k
    if (!isMarked(ic, opt_state->blocks[i]))
2370
525k
      continue;
2371
17.6M
    for (j = i + 1; j < opt_state->n_blocks; ++j) {
2372
17.3M
      if (!isMarked(ic, opt_state->blocks[j]))
2373
7.64M
        continue;
2374
9.73M
      if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2375
6.22k
        opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2376
4.86k
          opt_state->blocks[j]->link : opt_state->blocks[j];
2377
6.22k
        break;
2378
6.22k
      }
2379
9.73M
    }
2380
323k
  }
2381
915k
  for (i = 0; i < opt_state->n_blocks; ++i) {
2382
882k
    p = opt_state->blocks[i];
2383
882k
    if (JT(p) == 0)
2384
62.2k
      continue;
2385
820k
    if (JT(p)->link) {
2386
7.82k
      done1 = 0;
2387
7.82k
      JT(p) = JT(p)->link;
2388
7.82k
    }
2389
820k
    if (JF(p)->link) {
2390
3.70k
      done1 = 0;
2391
3.70k
      JF(p) = JF(p)->link;
2392
3.70k
    }
2393
820k
  }
2394
33.0k
  if (!done1)
2395
4.34k
    goto top;
2396
33.0k
}
2397
2398
static void
2399
opt_cleanup(opt_state_t *opt_state)
2400
30.1k
{
2401
30.1k
  free((void *)opt_state->vnode_base);
2402
30.1k
  free((void *)opt_state->vmap);
2403
30.1k
  free((void *)opt_state->edges);
2404
30.1k
  free((void *)opt_state->space);
2405
30.1k
  free((void *)opt_state->levels);
2406
30.1k
  free((void *)opt_state->blocks);
2407
30.1k
}
2408
2409
/*
2410
 * For optimizer errors.
2411
 */
2412
static void PCAP_NORETURN
2413
opt_error(opt_state_t *opt_state, const char *fmt, ...)
2414
1.42k
{
2415
1.42k
  va_list ap;
2416
2417
1.42k
  if (opt_state->errbuf != NULL) {
2418
1.42k
    va_start(ap, fmt);
2419
1.42k
    (void)vsnprintf(opt_state->errbuf,
2420
1.42k
        PCAP_ERRBUF_SIZE, fmt, ap);
2421
1.42k
    va_end(ap);
2422
1.42k
  }
2423
1.42k
  longjmp(opt_state->top_ctx, 1);
2424
  /* NOTREACHED */
2425
#ifdef _AIX
2426
  PCAP_UNREACHABLE
2427
#endif /* _AIX */
2428
1.42k
}
2429
2430
/*
2431
 * Return the number of stmts in 's'.
2432
 */
2433
static u_int
2434
slength(struct slist *s)
2435
2.98M
{
2436
2.98M
  u_int n = 0;
2437
2438
9.95M
  for (; s; s = s->next)
2439
6.96M
    if (s->s.code != NOP)
2440
6.39M
      ++n;
2441
2.98M
  return n;
2442
2.98M
}
2443
2444
/*
2445
 * Return the number of nodes reachable by 'p'.
2446
 * All nodes should be initially unmarked.
2447
 */
2448
static int
2449
count_blocks(struct icode *ic, struct block *p)
2450
1.11M
{
2451
1.11M
  if (p == 0 || isMarked(ic, p))
2452
571k
    return 0;
2453
541k
  Mark(ic, p);
2454
541k
  return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2455
1.11M
}
2456
2457
/*
2458
 * Do a depth first search on the flow graph, numbering the
2459
 * the basic blocks, and entering them into the 'blocks' array.`
2460
 */
2461
static void
2462
number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2463
1.11M
{
2464
1.11M
  u_int n;
2465
2466
1.11M
  if (p == 0 || isMarked(ic, p))
2467
571k
    return;
2468
2469
541k
  Mark(ic, p);
2470
541k
  n = opt_state->n_blocks++;
2471
541k
  if (opt_state->n_blocks == 0) {
2472
    /*
2473
     * Overflow.
2474
     */
2475
0
    opt_error(opt_state, "filter is too complex to optimize");
2476
0
  }
2477
541k
  p->id = n;
2478
541k
  opt_state->blocks[n] = p;
2479
2480
541k
  number_blks_r(opt_state, ic, JT(p));
2481
541k
  number_blks_r(opt_state, ic, JF(p));
2482
541k
}
2483
2484
/*
2485
 * Return the number of stmts in the flowgraph reachable by 'p'.
2486
 * The nodes should be unmarked before calling.
2487
 *
2488
 * Note that "stmts" means "instructions", and that this includes
2489
 *
2490
 *  side-effect statements in 'p' (slength(p->stmts));
2491
 *
2492
 *  statements in the true branch from 'p' (count_stmts(JT(p)));
2493
 *
2494
 *  statements in the false branch from 'p' (count_stmts(JF(p)));
2495
 *
2496
 *  the conditional jump itself (1);
2497
 *
2498
 *  an extra long jump if the true branch requires it (p->longjt);
2499
 *
2500
 *  an extra long jump if the false branch requires it (p->longjf).
2501
 */
2502
static u_int
2503
count_stmts(struct icode *ic, struct block *p)
2504
2.81M
{
2505
2.81M
  u_int n;
2506
2507
2.81M
  if (p == 0 || isMarked(ic, p))
2508
1.42M
    return 0;
2509
1.39M
  Mark(ic, p);
2510
1.39M
  n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2511
1.39M
  return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2512
2.81M
}
2513
2514
/*
2515
 * Allocate memory.  All allocation is done before optimization
2516
 * is begun.  A linear bound on the size of all data structures is computed
2517
 * from the total number of blocks and/or statements.
2518
 */
2519
static void
2520
opt_init(opt_state_t *opt_state, struct icode *ic)
2521
30.1k
{
2522
30.1k
  bpf_u_int32 *p;
2523
30.1k
  int i, n, max_stmts;
2524
30.1k
  u_int product;
2525
30.1k
  size_t block_memsize, edge_memsize;
2526
2527
  /*
2528
   * First, count the blocks, so we can malloc an array to map
2529
   * block number to block.  Then, put the blocks into the array.
2530
   */
2531
30.1k
  unMarkAll(ic);
2532
30.1k
  n = count_blocks(ic, ic->root);
2533
30.1k
  opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2534
30.1k
  if (opt_state->blocks == NULL)
2535
0
    opt_error(opt_state, "malloc");
2536
30.1k
  unMarkAll(ic);
2537
30.1k
  opt_state->n_blocks = 0;
2538
30.1k
  number_blks_r(opt_state, ic, ic->root);
2539
2540
  /*
2541
   * This "should not happen".
2542
   */
2543
30.1k
  if (opt_state->n_blocks == 0)
2544
0
    opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2545
2546
30.1k
  opt_state->n_edges = 2 * opt_state->n_blocks;
2547
30.1k
  if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2548
    /*
2549
     * Overflow.
2550
     */
2551
0
    opt_error(opt_state, "filter is too complex to optimize");
2552
0
  }
2553
30.1k
  opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2554
30.1k
  if (opt_state->edges == NULL) {
2555
0
    opt_error(opt_state, "malloc");
2556
0
  }
2557
2558
  /*
2559
   * The number of levels is bounded by the number of nodes.
2560
   */
2561
30.1k
  opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2562
30.1k
  if (opt_state->levels == NULL) {
2563
0
    opt_error(opt_state, "malloc");
2564
0
  }
2565
2566
30.1k
  opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2567
30.1k
  opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2568
2569
  /*
2570
   * Make sure opt_state->n_blocks * opt_state->nodewords fits
2571
   * in a u_int; we use it as a u_int number-of-iterations
2572
   * value.
2573
   */
2574
30.1k
  product = opt_state->n_blocks * opt_state->nodewords;
2575
30.1k
  if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2576
    /*
2577
     * XXX - just punt and don't try to optimize?
2578
     * In practice, this is unlikely to happen with
2579
     * a normal filter.
2580
     */
2581
0
    opt_error(opt_state, "filter is too complex to optimize");
2582
0
  }
2583
2584
  /*
2585
   * Make sure the total memory required for that doesn't
2586
   * overflow.
2587
   */
2588
30.1k
  block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2589
30.1k
  if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2590
0
    opt_error(opt_state, "filter is too complex to optimize");
2591
0
  }
2592
2593
  /*
2594
   * Make sure opt_state->n_edges * opt_state->edgewords fits
2595
   * in a u_int; we use it as a u_int number-of-iterations
2596
   * value.
2597
   */
2598
30.1k
  product = opt_state->n_edges * opt_state->edgewords;
2599
30.1k
  if ((product / opt_state->n_edges) != opt_state->edgewords) {
2600
0
    opt_error(opt_state, "filter is too complex to optimize");
2601
0
  }
2602
2603
  /*
2604
   * Make sure the total memory required for that doesn't
2605
   * overflow.
2606
   */
2607
30.1k
  edge_memsize = (size_t)product * sizeof(*opt_state->space);
2608
30.1k
  if (edge_memsize / product != sizeof(*opt_state->space)) {
2609
0
    opt_error(opt_state, "filter is too complex to optimize");
2610
0
  }
2611
2612
  /*
2613
   * Make sure the total memory required for both of them doesn't
2614
   * overflow.
2615
   */
2616
30.1k
  if (block_memsize > SIZE_MAX - edge_memsize) {
2617
0
    opt_error(opt_state, "filter is too complex to optimize");
2618
0
  }
2619
2620
  /* XXX */
2621
30.1k
  opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2622
30.1k
  if (opt_state->space == NULL) {
2623
0
    opt_error(opt_state, "malloc");
2624
0
  }
2625
30.1k
  p = opt_state->space;
2626
30.1k
  opt_state->all_dom_sets = p;
2627
571k
  for (i = 0; i < n; ++i) {
2628
541k
    opt_state->blocks[i]->dom = p;
2629
541k
    p += opt_state->nodewords;
2630
541k
  }
2631
30.1k
  opt_state->all_closure_sets = p;
2632
571k
  for (i = 0; i < n; ++i) {
2633
541k
    opt_state->blocks[i]->closure = p;
2634
541k
    p += opt_state->nodewords;
2635
541k
  }
2636
30.1k
  opt_state->all_edge_sets = p;
2637
571k
  for (i = 0; i < n; ++i) {
2638
541k
    register struct block *b = opt_state->blocks[i];
2639
2640
541k
    b->et.edom = p;
2641
541k
    p += opt_state->edgewords;
2642
541k
    b->ef.edom = p;
2643
541k
    p += opt_state->edgewords;
2644
541k
    b->et.id = i;
2645
541k
    opt_state->edges[i] = &b->et;
2646
541k
    b->ef.id = opt_state->n_blocks + i;
2647
541k
    opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2648
541k
    b->et.pred = b;
2649
541k
    b->ef.pred = b;
2650
541k
  }
2651
30.1k
  max_stmts = 0;
2652
571k
  for (i = 0; i < n; ++i)
2653
541k
    max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2654
  /*
2655
   * We allocate at most 3 value numbers per statement,
2656
   * so this is an upper bound on the number of valnodes
2657
   * we'll need.
2658
   */
2659
30.1k
  opt_state->maxval = 3 * max_stmts;
2660
30.1k
  opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2661
30.1k
  if (opt_state->vmap == NULL) {
2662
0
    opt_error(opt_state, "malloc");
2663
0
  }
2664
30.1k
  opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2665
30.1k
  if (opt_state->vnode_base == NULL) {
2666
0
    opt_error(opt_state, "malloc");
2667
0
  }
2668
30.1k
}
2669
2670
/*
2671
 * This is only used when supporting optimizer debugging.  It is
2672
 * global state, so do *not* do more than one compile in parallel
2673
 * and expect it to provide meaningful information.
2674
 */
2675
#ifdef BDEBUG
2676
int bids[NBIDS];
2677
#endif
2678
2679
static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2680
    PCAP_PRINTFLIKE(2, 3);
2681
2682
/*
2683
 * Returns true if successful.  Returns false if a branch has
2684
 * an offset that is too large.  If so, we have marked that
2685
 * branch so that on a subsequent iteration, it will be treated
2686
 * properly.
2687
 */
2688
static int
2689
convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2690
2.35M
{
2691
2.35M
  struct bpf_insn *dst;
2692
2.35M
  struct slist *src;
2693
2.35M
  u_int slen;
2694
2.35M
  u_int off;
2695
2.35M
  struct slist **offset = NULL;
2696
2697
2.35M
  if (p == 0 || isMarked(ic, p))
2698
1.11M
    return (1);
2699
1.23M
  Mark(ic, p);
2700
2701
1.23M
  if (convert_code_r(conv_state, ic, JF(p)) == 0)
2702
151k
    return (0);
2703
1.08M
  if (convert_code_r(conv_state, ic, JT(p)) == 0)
2704
38.5k
    return (0);
2705
2706
1.04M
  slen = slength(p->stmts);
2707
1.04M
  dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2708
    /* inflate length by any extra jumps */
2709
2710
1.04M
  p->offset = (int)(dst - conv_state->fstart);
2711
2712
  /* generate offset[] for convenience  */
2713
1.04M
  if (slen) {
2714
951k
    offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2715
951k
    if (!offset) {
2716
0
      conv_error(conv_state, "not enough core");
2717
      /*NOTREACHED*/
2718
0
    }
2719
951k
  }
2720
1.04M
  src = p->stmts;
2721
3.13M
  for (off = 0; off < slen && src; off++) {
2722
#if 0
2723
    printf("off=%d src=%x\n", off, src);
2724
#endif
2725
2.08M
    offset[off] = src;
2726
2.08M
    src = src->next;
2727
2.08M
  }
2728
2729
1.04M
  off = 0;
2730
3.41M
  for (src = p->stmts; src; src = src->next) {
2731
2.36M
    if (src->s.code == NOP)
2732
286k
      continue;
2733
2.08M
    dst->code = (u_short)src->s.code;
2734
2.08M
    dst->k = src->s.k;
2735
2736
    /* fill block-local relative jump */
2737
2.08M
    if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2738
#if 0
2739
      if (src->s.jt || src->s.jf) {
2740
        free(offset);
2741
        conv_error(conv_state, "illegal jmp destination");
2742
        /*NOTREACHED*/
2743
      }
2744
#endif
2745
2.06M
      goto filled;
2746
2.06M
    }
2747
16.9k
    if (off == slen - 2)  /*???*/
2748
0
      goto filled;
2749
2750
16.9k
      {
2751
16.9k
    u_int i;
2752
16.9k
    int jt, jf;
2753
16.9k
    const char ljerr[] = "%s for block-local relative jump: off=%d";
2754
2755
#if 0
2756
    printf("code=%x off=%d %x %x\n", src->s.code,
2757
      off, src->s.jt, src->s.jf);
2758
#endif
2759
2760
16.9k
    if (!src->s.jt || !src->s.jf) {
2761
0
      free(offset);
2762
0
      conv_error(conv_state, ljerr, "no jmp destination", off);
2763
      /*NOTREACHED*/
2764
0
    }
2765
2766
16.9k
    jt = jf = 0;
2767
558k
    for (i = 0; i < slen; i++) {
2768
541k
      if (offset[i] == src->s.jt) {
2769
16.9k
        if (jt) {
2770
0
          free(offset);
2771
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2772
          /*NOTREACHED*/
2773
0
        }
2774
2775
16.9k
        if (i - off - 1 >= 256) {
2776
0
          free(offset);
2777
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2778
          /*NOTREACHED*/
2779
0
        }
2780
16.9k
        dst->jt = (u_char)(i - off - 1);
2781
16.9k
        jt++;
2782
16.9k
      }
2783
541k
      if (offset[i] == src->s.jf) {
2784
16.9k
        if (jf) {
2785
0
          free(offset);
2786
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2787
          /*NOTREACHED*/
2788
0
        }
2789
16.9k
        if (i - off - 1 >= 256) {
2790
0
          free(offset);
2791
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2792
          /*NOTREACHED*/
2793
0
        }
2794
16.9k
        dst->jf = (u_char)(i - off - 1);
2795
16.9k
        jf++;
2796
16.9k
      }
2797
541k
    }
2798
16.9k
    if (!jt || !jf) {
2799
0
      free(offset);
2800
0
      conv_error(conv_state, ljerr, "no destination found", off);
2801
      /*NOTREACHED*/
2802
0
    }
2803
16.9k
      }
2804
2.08M
filled:
2805
2.08M
    ++dst;
2806
2.08M
    ++off;
2807
2.08M
  }
2808
1.04M
  if (offset)
2809
951k
    free(offset);
2810
2811
#ifdef BDEBUG
2812
  if (dst - conv_state->fstart < NBIDS)
2813
    bids[dst - conv_state->fstart] = p->id + 1;
2814
#endif
2815
1.04M
  dst->code = (u_short)p->s.code;
2816
1.04M
  dst->k = p->s.k;
2817
1.04M
  if (JT(p)) {
2818
    /* number of extra jumps inserted */
2819
998k
    u_char extrajmps = 0;
2820
998k
    off = JT(p)->offset - (p->offset + slen) - 1;
2821
998k
    if (off >= 256) {
2822
        /* offset too large for branch, must add a jump */
2823
33.6k
        if (p->longjt == 0) {
2824
      /* mark this instruction and retry */
2825
2.65k
      p->longjt++;
2826
2.65k
      return(0);
2827
2.65k
        }
2828
31.0k
        dst->jt = extrajmps;
2829
31.0k
        extrajmps++;
2830
31.0k
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2831
31.0k
        dst[extrajmps].k = off - extrajmps;
2832
31.0k
    }
2833
964k
    else
2834
964k
        dst->jt = (u_char)off;
2835
995k
    off = JF(p)->offset - (p->offset + slen) - 1;
2836
995k
    if (off >= 256) {
2837
        /* offset too large for branch, must add a jump */
2838
42.2k
        if (p->longjf == 0) {
2839
      /* mark this instruction and retry */
2840
2.59k
      p->longjf++;
2841
2.59k
      return(0);
2842
2.59k
        }
2843
        /* branch if F to following jump */
2844
        /* if two jumps are inserted, F goes to second one */
2845
39.6k
        dst->jf = extrajmps;
2846
39.6k
        extrajmps++;
2847
39.6k
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2848
39.6k
        dst[extrajmps].k = off - extrajmps;
2849
39.6k
    }
2850
953k
    else
2851
953k
        dst->jf = (u_char)off;
2852
995k
  }
2853
1.04M
  return (1);
2854
1.04M
}
2855
2856
2857
/*
2858
 * Convert flowgraph intermediate representation to the
2859
 * BPF array representation.  Set *lenp to the number of instructions.
2860
 *
2861
 * This routine does *NOT* leak the memory pointed to by fp.  It *must
2862
 * not* do free(fp) before returning fp; doing so would make no sense,
2863
 * as the BPF array pointed to by the return value of icode_to_fcode()
2864
 * must be valid - it's being returned for use in a bpf_program structure.
2865
 *
2866
 * If it appears that icode_to_fcode() is leaking, the problem is that
2867
 * the program using pcap_compile() is failing to free the memory in
2868
 * the BPF program when it's done - the leak is in the program, not in
2869
 * the routine that happens to be allocating the memory.  (By analogy, if
2870
 * a program calls fopen() without ever calling fclose() on the FILE *,
2871
 * it will leak the FILE structure; the leak is not in fopen(), it's in
2872
 * the program.)  Change the program to use pcap_freecode() when it's
2873
 * done with the filter program.  See the pcap man page.
2874
 */
2875
struct bpf_insn *
2876
icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2877
    char *errbuf)
2878
24.2k
{
2879
24.2k
  u_int n;
2880
24.2k
  struct bpf_insn *fp;
2881
24.2k
  conv_state_t conv_state;
2882
2883
24.2k
  conv_state.fstart = NULL;
2884
24.2k
  conv_state.errbuf = errbuf;
2885
24.2k
  if (setjmp(conv_state.top_ctx) != 0) {
2886
0
    free(conv_state.fstart);
2887
0
    return NULL;
2888
0
  }
2889
2890
  /*
2891
   * Loop doing convert_code_r() until no branches remain
2892
   * with too-large offsets.
2893
   */
2894
29.5k
  for (;;) {
2895
29.5k
      unMarkAll(ic);
2896
29.5k
      n = *lenp = count_stmts(ic, root);
2897
2898
29.5k
      fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2899
29.5k
      if (fp == NULL) {
2900
0
    (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2901
0
        "malloc");
2902
0
    return NULL;
2903
0
      }
2904
29.5k
      memset((char *)fp, 0, sizeof(*fp) * n);
2905
29.5k
      conv_state.fstart = fp;
2906
29.5k
      conv_state.ftail = fp + n;
2907
2908
29.5k
      unMarkAll(ic);
2909
29.5k
      if (convert_code_r(&conv_state, ic, root))
2910
24.2k
    break;
2911
5.24k
      free(fp);
2912
5.24k
  }
2913
2914
24.2k
  return fp;
2915
24.2k
}
2916
2917
/*
2918
 * For iconv_to_fconv() errors.
2919
 */
2920
static void PCAP_NORETURN
2921
conv_error(conv_state_t *conv_state, const char *fmt, ...)
2922
0
{
2923
0
  va_list ap;
2924
2925
0
  va_start(ap, fmt);
2926
0
  (void)vsnprintf(conv_state->errbuf,
2927
0
      PCAP_ERRBUF_SIZE, fmt, ap);
2928
0
  va_end(ap);
2929
0
  longjmp(conv_state->top_ctx, 1);
2930
  /* NOTREACHED */
2931
#ifdef _AIX
2932
  PCAP_UNREACHABLE
2933
#endif /* _AIX */
2934
0
}
2935
2936
/*
2937
 * Make a copy of a BPF program and put it in the "fcode" member of
2938
 * a "pcap_t".
2939
 *
2940
 * If we fail to allocate memory for the copy, fill in the "errbuf"
2941
 * member of the "pcap_t" with an error message, and return -1;
2942
 * otherwise, return 0.
2943
 */
2944
int
2945
install_bpf_program(pcap_t *p, struct bpf_program *fp)
2946
0
{
2947
0
  size_t prog_size;
2948
2949
  /*
2950
   * Validate the program.
2951
   */
2952
0
  if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
2953
0
    snprintf(p->errbuf, sizeof(p->errbuf),
2954
0
      "BPF program is not valid");
2955
0
    return (-1);
2956
0
  }
2957
2958
  /*
2959
   * Free up any already installed program.
2960
   */
2961
0
  pcap_freecode(&p->fcode);
2962
2963
0
  prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2964
0
  p->fcode.bf_len = fp->bf_len;
2965
0
  p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2966
0
  if (p->fcode.bf_insns == NULL) {
2967
0
    pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2968
0
        errno, "malloc");
2969
0
    return (-1);
2970
0
  }
2971
0
  memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2972
0
  return (0);
2973
0
}
2974
2975
#ifdef BDEBUG
2976
static void
2977
dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2978
    FILE *out)
2979
{
2980
  int icount, noffset;
2981
  int i;
2982
2983
  if (block == NULL || isMarked(ic, block))
2984
    return;
2985
  Mark(ic, block);
2986
2987
  icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2988
  noffset = min(block->offset + icount, (int)prog->bf_len);
2989
2990
  fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2991
  for (i = block->offset; i < noffset; i++) {
2992
    fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2993
  }
2994
  fprintf(out, "\" tooltip=\"");
2995
  for (i = 0; i < BPF_MEMWORDS; i++)
2996
    if (block->val[i] != VAL_UNKNOWN)
2997
      fprintf(out, "val[%d]=%d ", i, block->val[i]);
2998
  fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
2999
  fprintf(out, "val[X]=%d", block->val[X_ATOM]);
3000
  fprintf(out, "\"");
3001
  if (JT(block) == NULL)
3002
    fprintf(out, ", peripheries=2");
3003
  fprintf(out, "];\n");
3004
3005
  dot_dump_node(ic, JT(block), prog, out);
3006
  dot_dump_node(ic, JF(block), prog, out);
3007
}
3008
3009
static void
3010
dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3011
{
3012
  if (block == NULL || isMarked(ic, block))
3013
    return;
3014
  Mark(ic, block);
3015
3016
  if (JT(block)) {
3017
    fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3018
        block->id, JT(block)->id);
3019
    fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3020
         block->id, JF(block)->id);
3021
  }
3022
  dot_dump_edge(ic, JT(block), out);
3023
  dot_dump_edge(ic, JF(block), out);
3024
}
3025
3026
/* Output the block CFG using graphviz/DOT language
3027
 * In the CFG, block's code, value index for each registers at EXIT,
3028
 * and the jump relationship is show.
3029
 *
3030
 * example DOT for BPF `ip src host 1.1.1.1' is:
3031
    digraph BPF {
3032
      block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh      [12]\n(001) jeq      #0x800           jt 2  jf 5" tooltip="val[A]=0 val[X]=0"];
3033
      block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld       [26]\n(003) jeq      #0x1010101       jt 4  jf 5" tooltip="val[A]=0 val[X]=0"];
3034
      block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret      #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3035
      block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret      #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3036
      "block0":se -> "block1":n [label="T"];
3037
      "block0":sw -> "block3":n [label="F"];
3038
      "block1":se -> "block2":n [label="T"];
3039
      "block1":sw -> "block3":n [label="F"];
3040
    }
3041
 *
3042
 *  After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3043
 *  and run `dot -Tpng -O bpf.dot' to draw the graph.
3044
 */
3045
static int
3046
dot_dump(struct icode *ic, char *errbuf)
3047
{
3048
  struct bpf_program f;
3049
  FILE *out = stdout;
3050
3051
  memset(bids, 0, sizeof bids);
3052
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3053
  if (f.bf_insns == NULL)
3054
    return -1;
3055
3056
  fprintf(out, "digraph BPF {\n");
3057
  unMarkAll(ic);
3058
  dot_dump_node(ic, ic->root, &f, out);
3059
  unMarkAll(ic);
3060
  dot_dump_edge(ic, ic->root, out);
3061
  fprintf(out, "}\n");
3062
3063
  free((char *)f.bf_insns);
3064
  return 0;
3065
}
3066
3067
static int
3068
plain_dump(struct icode *ic, char *errbuf)
3069
{
3070
  struct bpf_program f;
3071
3072
  memset(bids, 0, sizeof bids);
3073
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3074
  if (f.bf_insns == NULL)
3075
    return -1;
3076
  bpf_dump(&f, 1);
3077
  putchar('\n');
3078
  free((char *)f.bf_insns);
3079
  return 0;
3080
}
3081
3082
static void
3083
opt_dump(opt_state_t *opt_state, struct icode *ic)
3084
{
3085
  int status;
3086
  char errbuf[PCAP_ERRBUF_SIZE];
3087
3088
  /*
3089
   * If the CFG, in DOT format, is requested, output it rather than
3090
   * the code that would be generated from that graph.
3091
   */
3092
  if (pcap_print_dot_graph)
3093
    status = dot_dump(ic, errbuf);
3094
  else
3095
    status = plain_dump(ic, errbuf);
3096
  if (status == -1)
3097
    opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3098
}
3099
#endif