Coverage Report

Created: 2023-12-14 14:08

/src/libpcap/optimize.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3
 *  The Regents of the University of California.  All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that: (1) source code distributions
7
 * retain the above copyright notice and this paragraph in its entirety, (2)
8
 * distributions including binary code include the above copyright notice and
9
 * this paragraph in its entirety in the documentation or other materials
10
 * provided with the distribution, and (3) all advertising materials mentioning
11
 * features or use of this software display the following acknowledgement:
12
 * ``This product includes software developed by the University of California,
13
 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14
 * the University nor the names of its contributors may be used to endorse
15
 * or promote products derived from this software without specific prior
16
 * written permission.
17
 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18
 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20
 *
21
 *  Optimization module for BPF code intermediate representation.
22
 */
23
24
#ifdef HAVE_CONFIG_H
25
#include <config.h>
26
#endif
27
28
#include <pcap-types.h>
29
30
#include <stdio.h>
31
#include <stdlib.h>
32
#include <memory.h>
33
#include <setjmp.h>
34
#include <string.h>
35
#include <limits.h> /* for SIZE_MAX */
36
#include <errno.h>
37
38
#include "pcap-int.h"
39
40
#include "gencode.h"
41
#include "optimize.h"
42
#include "diag-control.h"
43
44
#ifdef HAVE_OS_PROTO_H
45
#include "os-proto.h"
46
#endif
47
48
#ifdef BDEBUG
49
/*
50
 * The internal "debug printout" flag for the filter expression optimizer.
51
 * The code to print that stuff is present only if BDEBUG is defined, so
52
 * the flag, and the routine to set it, are defined only if BDEBUG is
53
 * defined.
54
 */
55
static int pcap_optimizer_debug;
56
57
/*
58
 * Routine to set that flag.
59
 *
60
 * This is intended for libpcap developers, not for general use.
61
 * If you want to set these in a program, you'll have to declare this
62
 * routine yourself, with the appropriate DLL import attribute on Windows;
63
 * it's not declared in any header file, and won't be declared in any
64
 * header file provided by libpcap.
65
 */
66
PCAP_API void pcap_set_optimizer_debug(int value);
67
68
PCAP_API_DEF void
69
pcap_set_optimizer_debug(int value)
70
{
71
  pcap_optimizer_debug = value;
72
}
73
74
/*
75
 * The internal "print dot graph" flag for the filter expression optimizer.
76
 * The code to print that stuff is present only if BDEBUG is defined, so
77
 * the flag, and the routine to set it, are defined only if BDEBUG is
78
 * defined.
79
 */
80
static int pcap_print_dot_graph;
81
82
/*
83
 * Routine to set that flag.
84
 *
85
 * This is intended for libpcap developers, not for general use.
86
 * If you want to set these in a program, you'll have to declare this
87
 * routine yourself, with the appropriate DLL import attribute on Windows;
88
 * it's not declared in any header file, and won't be declared in any
89
 * header file provided by libpcap.
90
 */
91
PCAP_API void pcap_set_print_dot_graph(int value);
92
93
PCAP_API_DEF void
94
pcap_set_print_dot_graph(int value)
95
{
96
  pcap_print_dot_graph = value;
97
}
98
99
#endif
100
101
/*
102
 * lowest_set_bit().
103
 *
104
 * Takes a 32-bit integer as an argument.
105
 *
106
 * If handed a non-zero value, returns the index of the lowest set bit,
107
 * counting upwards from zero.
108
 *
109
 * If handed zero, the results are platform- and compiler-dependent.
110
 * Keep it out of the light, don't give it any water, don't feed it
111
 * after midnight, and don't pass zero to it.
112
 *
113
 * This is the same as the count of trailing zeroes in the word.
114
 */
115
#if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116
  /*
117
   * GCC 3.4 and later; we have __builtin_ctz().
118
   */
119
14.7M
  #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120
#elif defined(_MSC_VER)
121
  /*
122
   * Visual Studio; we support only 2005 and later, so use
123
   * _BitScanForward().
124
   */
125
#include <intrin.h>
126
127
#ifndef __clang__
128
#pragma intrinsic(_BitScanForward)
129
#endif
130
131
static __forceinline u_int
132
lowest_set_bit(int mask)
133
{
134
  unsigned long bit;
135
136
  /*
137
   * Don't sign-extend mask if long is longer than int.
138
   * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139
   */
140
  if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141
    abort();  /* mask is zero */
142
  return (u_int)bit;
143
}
144
#elif defined(MSDOS) && defined(__DJGPP__)
145
  /*
146
   * MS-DOS with DJGPP, which declares ffs() in <string.h>, which
147
   * we've already included.
148
   */
149
  #define lowest_set_bit(mask)  ((u_int)(ffs((mask)) - 1))
150
#elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
151
  /*
152
   * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
153
   * or some other platform (UN*X conforming to a sufficient recent version
154
   * of the Single UNIX Specification).
155
   */
156
  #include <strings.h>
157
  #define lowest_set_bit(mask)  (u_int)((ffs((mask)) - 1))
158
#else
159
/*
160
 * None of the above.
161
 * Use a perfect-hash-function-based function.
162
 */
163
static u_int
164
lowest_set_bit(int mask)
165
{
166
  unsigned int v = (unsigned int)mask;
167
168
  static const u_int MultiplyDeBruijnBitPosition[32] = {
169
    0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
170
    31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
171
  };
172
173
  /*
174
   * We strip off all but the lowermost set bit (v & ~v),
175
   * and perform a minimal perfect hash on it to look up the
176
   * number of low-order zero bits in a table.
177
   *
178
   * See:
179
   *
180
   *  http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
181
   *
182
   *  http://supertech.csail.mit.edu/papers/debruijn.pdf
183
   */
184
  return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
185
}
186
#endif
187
188
/*
189
 * Represents a deleted instruction.
190
 */
191
136M
#define NOP -1
192
193
/*
194
 * Register numbers for use-def values.
195
 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
196
 * location.  A_ATOM is the accumulator and X_ATOM is the index
197
 * register.
198
 */
199
68.5M
#define A_ATOM BPF_MEMWORDS
200
14.3M
#define X_ATOM (BPF_MEMWORDS+1)
201
202
/*
203
 * This define is used to represent *both* the accumulator and
204
 * x register in use-def computations.
205
 * Currently, the use-def code assumes only one definition per instruction.
206
 */
207
23.3M
#define AX_ATOM N_ATOMS
208
209
/*
210
 * These data structures are used in a Cocke and Shwarz style
211
 * value numbering scheme.  Since the flowgraph is acyclic,
212
 * exit values can be propagated from a node's predecessors
213
 * provided it is uniquely defined.
214
 */
215
struct valnode {
216
  int code;
217
  bpf_u_int32 v0, v1;
218
  int val;    /* the value number */
219
  struct valnode *next;
220
};
221
222
/* Integer constants mapped with the load immediate opcode. */
223
5.50M
#define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
224
225
struct vmapinfo {
226
  int is_const;
227
  bpf_u_int32 const_val;
228
};
229
230
typedef struct {
231
  /*
232
   * Place to longjmp to on an error.
233
   */
234
  jmp_buf top_ctx;
235
236
  /*
237
   * The buffer into which to put error message.
238
   */
239
  char *errbuf;
240
241
  /*
242
   * A flag to indicate that further optimization is needed.
243
   * Iterative passes are continued until a given pass yields no
244
   * code simplification or branch movement.
245
   */
246
  int done;
247
248
  /*
249
   * XXX - detect loops that do nothing but repeated AND/OR pullups
250
   * and edge moves.
251
   * If 100 passes in a row do nothing but that, treat that as a
252
   * sign that we're in a loop that just shuffles in a cycle in
253
   * which each pass just shuffles the code and we eventually
254
   * get back to the original configuration.
255
   *
256
   * XXX - we need a non-heuristic way of detecting, or preventing,
257
   * such a cycle.
258
   */
259
  int non_branch_movement_performed;
260
261
  u_int n_blocks;   /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
262
  struct block **blocks;
263
  u_int n_edges;    /* twice n_blocks, so guaranteed to be > 0 */
264
  struct edge **edges;
265
266
  /*
267
   * A bit vector set representation of the dominators.
268
   * We round up the set size to the next power of two.
269
   */
270
  u_int nodewords;  /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
271
  u_int edgewords;  /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
272
  struct block **levels;
273
  bpf_u_int32 *space;
274
275
41.7M
#define BITS_PER_WORD (8*sizeof(bpf_u_int32))
276
/*
277
 * True if a is in uset {p}
278
 */
279
2.94M
#define SET_MEMBER(p, a) \
280
2.94M
((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
281
282
/*
283
 * Add 'a' to uset p.
284
 */
285
10.5M
#define SET_INSERT(p, a) \
286
10.5M
(p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
287
288
/*
289
 * Delete 'a' from uset p.
290
 */
291
#define SET_DELETE(p, a) \
292
(p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
293
294
/*
295
 * a := a intersect b
296
 * n must be guaranteed to be > 0
297
 */
298
13.3M
#define SET_INTERSECT(a, b, n)\
299
13.3M
{\
300
13.3M
  register bpf_u_int32 *_x = a, *_y = b;\
301
13.3M
  register u_int _n = n;\
302
58.6M
  do *_x++ &= *_y++; while (--_n != 0);\
303
13.3M
}
304
305
/*
306
 * a := a - b
307
 * n must be guaranteed to be > 0
308
 */
309
#define SET_SUBTRACT(a, b, n)\
310
{\
311
  register bpf_u_int32 *_x = a, *_y = b;\
312
  register u_int _n = n;\
313
  do *_x++ &=~ *_y++; while (--_n != 0);\
314
}
315
316
/*
317
 * a := a union b
318
 * n must be guaranteed to be > 0
319
 */
320
4.44M
#define SET_UNION(a, b, n)\
321
4.44M
{\
322
4.44M
  register bpf_u_int32 *_x = a, *_y = b;\
323
4.44M
  register u_int _n = n;\
324
12.7M
  do *_x++ |= *_y++; while (--_n != 0);\
325
4.44M
}
326
327
  uset all_dom_sets;
328
  uset all_closure_sets;
329
  uset all_edge_sets;
330
331
8.58M
#define MODULUS 213
332
  struct valnode *hashtbl[MODULUS];
333
  bpf_u_int32 curval;
334
  bpf_u_int32 maxval;
335
336
  struct vmapinfo *vmap;
337
  struct valnode *vnode_base;
338
  struct valnode *next_vnode;
339
} opt_state_t;
340
341
typedef struct {
342
  /*
343
   * Place to longjmp to on an error.
344
   */
345
  jmp_buf top_ctx;
346
347
  /*
348
   * The buffer into which to put error message.
349
   */
350
  char *errbuf;
351
352
  /*
353
   * Some pointers used to convert the basic block form of the code,
354
   * into the array form that BPF requires.  'fstart' will point to
355
   * the malloc'd array while 'ftail' is used during the recursive
356
   * traversal.
357
   */
358
  struct bpf_insn *fstart;
359
  struct bpf_insn *ftail;
360
} conv_state_t;
361
362
static void opt_init(opt_state_t *, struct icode *);
363
static void opt_cleanup(opt_state_t *);
364
static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
365
    PCAP_PRINTFLIKE(2, 3);
366
367
static void intern_blocks(opt_state_t *, struct icode *);
368
369
static void find_inedges(opt_state_t *, struct block *);
370
#ifdef BDEBUG
371
static void opt_dump(opt_state_t *, struct icode *);
372
#endif
373
374
#ifndef MAX
375
2.22M
#define MAX(a,b) ((a)>(b)?(a):(b))
376
#endif
377
378
static void
379
find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
380
4.68M
{
381
4.68M
  int level;
382
383
4.68M
  if (isMarked(ic, b))
384
2.05M
    return;
385
386
2.63M
  Mark(ic, b);
387
2.63M
  b->link = 0;
388
389
2.63M
  if (JT(b)) {
390
2.22M
    find_levels_r(opt_state, ic, JT(b));
391
2.22M
    find_levels_r(opt_state, ic, JF(b));
392
2.22M
    level = MAX(JT(b)->level, JF(b)->level) + 1;
393
2.22M
  } else
394
411k
    level = 0;
395
2.63M
  b->level = level;
396
2.63M
  b->link = opt_state->levels[level];
397
2.63M
  opt_state->levels[level] = b;
398
2.63M
}
399
400
/*
401
 * Level graph.  The levels go from 0 at the leaves to
402
 * N_LEVELS at the root.  The opt_state->levels[] array points to the
403
 * first node of the level list, whose elements are linked
404
 * with the 'link' field of the struct block.
405
 */
406
static void
407
find_levels(opt_state_t *opt_state, struct icode *ic)
408
248k
{
409
248k
  memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
410
248k
  unMarkAll(ic);
411
248k
  find_levels_r(opt_state, ic, ic->root);
412
248k
}
413
414
/*
415
 * Find dominator relationships.
416
 * Assumes graph has been leveled.
417
 */
418
static void
419
find_dom(opt_state_t *opt_state, struct block *root)
420
248k
{
421
248k
  u_int i;
422
248k
  int level;
423
248k
  struct block *b;
424
248k
  bpf_u_int32 *x;
425
426
  /*
427
   * Initialize sets to contain all nodes.
428
   */
429
248k
  x = opt_state->all_dom_sets;
430
  /*
431
   * In opt_init(), we've made sure the product doesn't overflow.
432
   */
433
248k
  i = opt_state->n_blocks * opt_state->nodewords;
434
23.0M
  while (i != 0) {
435
22.7M
    --i;
436
22.7M
    *x++ = 0xFFFFFFFFU;
437
22.7M
  }
438
  /* Root starts off empty. */
439
635k
  for (i = opt_state->nodewords; i != 0;) {
440
387k
    --i;
441
387k
    root->dom[i] = 0;
442
387k
  }
443
444
  /* root->level is the highest level no found. */
445
2.66M
  for (level = root->level; level >= 0; --level) {
446
5.04M
    for (b = opt_state->levels[level]; b; b = b->link) {
447
2.63M
      SET_INSERT(b->dom, b->id);
448
2.63M
      if (JT(b) == 0)
449
411k
        continue;
450
2.22M
      SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
451
2.22M
      SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
452
2.22M
    }
453
2.41M
  }
454
248k
}
455
456
static void
457
propedom(opt_state_t *opt_state, struct edge *ep)
458
5.26M
{
459
5.26M
  SET_INSERT(ep->edom, ep->id);
460
5.26M
  if (ep->succ) {
461
4.44M
    SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
462
4.44M
    SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
463
4.44M
  }
464
5.26M
}
465
466
/*
467
 * Compute edge dominators.
468
 * Assumes graph has been leveled and predecessors established.
469
 */
470
static void
471
find_edom(opt_state_t *opt_state, struct block *root)
472
248k
{
473
248k
  u_int i;
474
248k
  uset x;
475
248k
  int level;
476
248k
  struct block *b;
477
478
248k
  x = opt_state->all_edge_sets;
479
  /*
480
   * In opt_init(), we've made sure the product doesn't overflow.
481
   */
482
84.0M
  for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
483
83.8M
    --i;
484
83.8M
    x[i] = 0xFFFFFFFFU;
485
83.8M
  }
486
487
  /* root->level is the highest level no found. */
488
248k
  memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
489
248k
  memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
490
2.66M
  for (level = root->level; level >= 0; --level) {
491
5.04M
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
492
2.63M
      propedom(opt_state, &b->et);
493
2.63M
      propedom(opt_state, &b->ef);
494
2.63M
    }
495
2.41M
  }
496
248k
}
497
498
/*
499
 * Find the backwards transitive closure of the flow graph.  These sets
500
 * are backwards in the sense that we find the set of nodes that reach
501
 * a given node, not the set of nodes that can be reached by a node.
502
 *
503
 * Assumes graph has been leveled.
504
 */
505
static void
506
find_closure(opt_state_t *opt_state, struct block *root)
507
248k
{
508
248k
  int level;
509
248k
  struct block *b;
510
511
  /*
512
   * Initialize sets to contain no nodes.
513
   */
514
248k
  memset((char *)opt_state->all_closure_sets, 0,
515
248k
        opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
516
517
  /* root->level is the highest level no found. */
518
2.66M
  for (level = root->level; level >= 0; --level) {
519
5.04M
    for (b = opt_state->levels[level]; b; b = b->link) {
520
2.63M
      SET_INSERT(b->closure, b->id);
521
2.63M
      if (JT(b) == 0)
522
411k
        continue;
523
2.22M
      SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
524
2.22M
      SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
525
2.22M
    }
526
2.41M
  }
527
248k
}
528
529
/*
530
 * Return the register number that is used by s.
531
 *
532
 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
533
 * are used, the scratch memory location's number if a scratch memory
534
 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
535
 *
536
 * The implementation should probably change to an array access.
537
 */
538
static int
539
atomuse(struct stmt *s)
540
34.5M
{
541
34.5M
  register int c = s->code;
542
543
34.5M
  if (c == NOP)
544
5.65M
    return -1;
545
546
28.8M
  switch (BPF_CLASS(c)) {
547
548
266k
  case BPF_RET:
549
266k
    return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
550
266k
      (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
551
552
11.3M
  case BPF_LD:
553
13.4M
  case BPF_LDX:
554
    /*
555
     * As there are fewer than 2^31 memory locations,
556
     * s->k should be convertible to int without problems.
557
     */
558
13.4M
    return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
559
13.4M
      (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
560
561
5.35M
  case BPF_ST:
562
5.35M
    return A_ATOM;
563
564
0
  case BPF_STX:
565
0
    return X_ATOM;
566
567
4.37M
  case BPF_JMP:
568
7.90M
  case BPF_ALU:
569
7.90M
    if (BPF_SRC(c) == BPF_X)
570
3.04M
      return AX_ATOM;
571
4.86M
    return A_ATOM;
572
573
1.92M
  case BPF_MISC:
574
1.92M
    return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
575
28.8M
  }
576
0
  abort();
577
  /* NOTREACHED */
578
28.8M
}
579
580
/*
581
 * Return the register number that is defined by 's'.  We assume that
582
 * a single stmt cannot define more than one register.  If no register
583
 * is defined, return -1.
584
 *
585
 * The implementation should probably change to an array access.
586
 */
587
static int
588
atomdef(struct stmt *s)
589
32.2M
{
590
32.2M
  if (s->code == NOP)
591
5.65M
    return -1;
592
593
26.6M
  switch (BPF_CLASS(s->code)) {
594
595
11.3M
  case BPF_LD:
596
14.9M
  case BPF_ALU:
597
14.9M
    return A_ATOM;
598
599
2.03M
  case BPF_LDX:
600
2.03M
    return X_ATOM;
601
602
5.35M
  case BPF_ST:
603
5.35M
  case BPF_STX:
604
5.35M
    return s->k;
605
606
1.92M
  case BPF_MISC:
607
1.92M
    return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
608
26.6M
  }
609
2.42M
  return -1;
610
26.6M
}
611
612
/*
613
 * Compute the sets of registers used, defined, and killed by 'b'.
614
 *
615
 * "Used" means that a statement in 'b' uses the register before any
616
 * statement in 'b' defines it, i.e. it uses the value left in
617
 * that register by a predecessor block of this block.
618
 * "Defined" means that a statement in 'b' defines it.
619
 * "Killed" means that a statement in 'b' defines it before any
620
 * statement in 'b' uses it, i.e. it kills the value left in that
621
 * register by a predecessor block of this block.
622
 */
623
static void
624
compute_local_ud(struct block *b)
625
2.63M
{
626
2.63M
  struct slist *s;
627
2.63M
  atomset def = 0, use = 0, killed = 0;
628
2.63M
  int atom;
629
630
20.4M
  for (s = b->stmts; s; s = s->next) {
631
17.7M
    if (s->s.code == NOP)
632
5.31M
      continue;
633
12.4M
    atom = atomuse(&s->s);
634
12.4M
    if (atom >= 0) {
635
8.41M
      if (atom == AX_ATOM) {
636
1.35M
        if (!ATOMELEM(def, X_ATOM))
637
0
          use |= ATOMMASK(X_ATOM);
638
1.35M
        if (!ATOMELEM(def, A_ATOM))
639
0
          use |= ATOMMASK(A_ATOM);
640
1.35M
      }
641
7.05M
      else if (atom < N_ATOMS) {
642
7.05M
        if (!ATOMELEM(def, atom))
643
124k
          use |= ATOMMASK(atom);
644
7.05M
      }
645
0
      else
646
0
        abort();
647
8.41M
    }
648
12.4M
    atom = atomdef(&s->s);
649
12.4M
    if (atom >= 0) {
650
12.4M
      if (!ATOMELEM(use, atom))
651
12.4M
        killed |= ATOMMASK(atom);
652
12.4M
      def |= ATOMMASK(atom);
653
12.4M
    }
654
12.4M
  }
655
2.63M
  if (BPF_CLASS(b->s.code) == BPF_JMP) {
656
    /*
657
     * XXX - what about RET?
658
     */
659
2.22M
    atom = atomuse(&b->s);
660
2.22M
    if (atom >= 0) {
661
2.22M
      if (atom == AX_ATOM) {
662
334k
        if (!ATOMELEM(def, X_ATOM))
663
6.76k
          use |= ATOMMASK(X_ATOM);
664
334k
        if (!ATOMELEM(def, A_ATOM))
665
6.76k
          use |= ATOMMASK(A_ATOM);
666
334k
      }
667
1.88M
      else if (atom < N_ATOMS) {
668
1.88M
        if (!ATOMELEM(def, atom))
669
76.2k
          use |= ATOMMASK(atom);
670
1.88M
      }
671
0
      else
672
0
        abort();
673
2.22M
    }
674
2.22M
  }
675
676
2.63M
  b->def = def;
677
2.63M
  b->kill = killed;
678
2.63M
  b->in_use = use;
679
2.63M
}
680
681
/*
682
 * Assume graph is already leveled.
683
 */
684
static void
685
find_ud(opt_state_t *opt_state, struct block *root)
686
248k
{
687
248k
  int i, maxlevel;
688
248k
  struct block *p;
689
690
  /*
691
   * root->level is the highest level no found;
692
   * count down from there.
693
   */
694
248k
  maxlevel = root->level;
695
2.66M
  for (i = maxlevel; i >= 0; --i)
696
5.04M
    for (p = opt_state->levels[i]; p; p = p->link) {
697
2.63M
      compute_local_ud(p);
698
2.63M
      p->out_use = 0;
699
2.63M
    }
700
701
2.41M
  for (i = 1; i <= maxlevel; ++i) {
702
4.38M
    for (p = opt_state->levels[i]; p; p = p->link) {
703
2.22M
      p->out_use |= JT(p)->in_use | JF(p)->in_use;
704
2.22M
      p->in_use |= p->out_use &~ p->kill;
705
2.22M
    }
706
2.16M
  }
707
248k
}
708
static void
709
init_val(opt_state_t *opt_state)
710
248k
{
711
248k
  opt_state->curval = 0;
712
248k
  opt_state->next_vnode = opt_state->vnode_base;
713
248k
  memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
714
248k
  memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
715
248k
}
716
717
/*
718
 * Because we really don't have an IR, this stuff is a little messy.
719
 *
720
 * This routine looks in the table of existing value number for a value
721
 * with generated from an operation with the specified opcode and
722
 * the specified values.  If it finds it, it returns its value number,
723
 * otherwise it makes a new entry in the table and returns the
724
 * value number of that entry.
725
 */
726
static bpf_u_int32
727
F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
728
8.58M
{
729
8.58M
  u_int hash;
730
8.58M
  bpf_u_int32 val;
731
8.58M
  struct valnode *p;
732
733
8.58M
  hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
734
8.58M
  hash %= MODULUS;
735
736
9.24M
  for (p = opt_state->hashtbl[hash]; p; p = p->next)
737
5.31M
    if (p->code == code && p->v0 == v0 && p->v1 == v1)
738
4.65M
      return p->val;
739
740
  /*
741
   * Not found.  Allocate a new value, and assign it a new
742
   * value number.
743
   *
744
   * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
745
   * increment it before using it as the new value number, which
746
   * means we never assign VAL_UNKNOWN.
747
   *
748
   * XXX - unless we overflow, but we probably won't have 2^32-1
749
   * values; we treat 32 bits as effectively infinite.
750
   */
751
3.93M
  val = ++opt_state->curval;
752
3.93M
  if (BPF_MODE(code) == BPF_IMM &&
753
3.93M
      (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
754
2.32M
    opt_state->vmap[val].const_val = v0;
755
2.32M
    opt_state->vmap[val].is_const = 1;
756
2.32M
  }
757
3.93M
  p = opt_state->next_vnode++;
758
3.93M
  p->val = val;
759
3.93M
  p->code = code;
760
3.93M
  p->v0 = v0;
761
3.93M
  p->v1 = v1;
762
3.93M
  p->next = opt_state->hashtbl[hash];
763
3.93M
  opt_state->hashtbl[hash] = p;
764
765
3.93M
  return val;
766
8.58M
}
767
768
static inline void
769
vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
770
10.4M
{
771
10.4M
  if (alter && newval != VAL_UNKNOWN && *valp == newval)
772
483k
    s->code = NOP;
773
9.98M
  else
774
9.98M
    *valp = newval;
775
10.4M
}
776
777
/*
778
 * Do constant-folding on binary operators.
779
 * (Unary operators are handled elsewhere.)
780
 */
781
static void
782
fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
783
227k
{
784
227k
  bpf_u_int32 a, b;
785
786
227k
  a = opt_state->vmap[v0].const_val;
787
227k
  b = opt_state->vmap[v1].const_val;
788
789
227k
  switch (BPF_OP(s->code)) {
790
30.8k
  case BPF_ADD:
791
30.8k
    a += b;
792
30.8k
    break;
793
794
10.4k
  case BPF_SUB:
795
10.4k
    a -= b;
796
10.4k
    break;
797
798
59.4k
  case BPF_MUL:
799
59.4k
    a *= b;
800
59.4k
    break;
801
802
22.1k
  case BPF_DIV:
803
22.1k
    if (b == 0)
804
322
      opt_error(opt_state, "division by zero");
805
21.8k
    a /= b;
806
21.8k
    break;
807
808
19.7k
  case BPF_MOD:
809
19.7k
    if (b == 0)
810
1.54k
      opt_error(opt_state, "modulus by zero");
811
18.2k
    a %= b;
812
18.2k
    break;
813
814
54.6k
  case BPF_AND:
815
54.6k
    a &= b;
816
54.6k
    break;
817
818
11.8k
  case BPF_OR:
819
11.8k
    a |= b;
820
11.8k
    break;
821
822
17.2k
  case BPF_XOR:
823
17.2k
    a ^= b;
824
17.2k
    break;
825
826
708
  case BPF_LSH:
827
    /*
828
     * A left shift of more than the width of the type
829
     * is undefined in C; we'll just treat it as shifting
830
     * all the bits out.
831
     *
832
     * XXX - the BPF interpreter doesn't check for this,
833
     * so its behavior is dependent on the behavior of
834
     * the processor on which it's running.  There are
835
     * processors on which it shifts all the bits out
836
     * and processors on which it does no shift.
837
     */
838
708
    if (b < 32)
839
465
      a <<= b;
840
243
    else
841
243
      a = 0;
842
708
    break;
843
844
669
  case BPF_RSH:
845
    /*
846
     * A right shift of more than the width of the type
847
     * is undefined in C; we'll just treat it as shifting
848
     * all the bits out.
849
     *
850
     * XXX - the BPF interpreter doesn't check for this,
851
     * so its behavior is dependent on the behavior of
852
     * the processor on which it's running.  There are
853
     * processors on which it shifts all the bits out
854
     * and processors on which it does no shift.
855
     */
856
669
    if (b < 32)
857
455
      a >>= b;
858
214
    else
859
214
      a = 0;
860
669
    break;
861
862
0
  default:
863
0
    abort();
864
227k
  }
865
225k
  s->k = a;
866
225k
  s->code = BPF_LD|BPF_IMM;
867
  /*
868
   * XXX - optimizer loop detection.
869
   */
870
225k
  opt_state->non_branch_movement_performed = 1;
871
225k
  opt_state->done = 0;
872
225k
}
873
874
static inline struct slist *
875
this_op(struct slist *s)
876
23.6M
{
877
29.2M
  while (s != 0 && s->s.code == NOP)
878
5.59M
    s = s->next;
879
23.6M
  return s;
880
23.6M
}
881
882
static void
883
opt_not(struct block *b)
884
0
{
885
0
  struct block *tmp = JT(b);
886
887
0
  JT(b) = JF(b);
888
0
  JF(b) = tmp;
889
0
}
890
891
static void
892
opt_peep(opt_state_t *opt_state, struct block *b)
893
2.42M
{
894
2.42M
  struct slist *s;
895
2.42M
  struct slist *next, *last;
896
2.42M
  bpf_u_int32 val;
897
898
2.42M
  s = b->stmts;
899
2.42M
  if (s == 0)
900
312k
    return;
901
902
2.10M
  last = s;
903
11.8M
  for (/*empty*/; /*empty*/; s = next) {
904
    /*
905
     * Skip over nops.
906
     */
907
11.8M
    s = this_op(s);
908
11.8M
    if (s == 0)
909
49.4k
      break;  /* nothing left in the block */
910
911
    /*
912
     * Find the next real instruction after that one
913
     * (skipping nops).
914
     */
915
11.7M
    next = this_op(s->next);
916
11.7M
    if (next == 0)
917
2.05M
      break;  /* no next instruction */
918
9.73M
    last = next;
919
920
    /*
921
     * st  M[k] --> st  M[k]
922
     * ldx M[k]   tax
923
     */
924
9.73M
    if (s->s.code == BPF_ST &&
925
9.73M
        next->s.code == (BPF_LDX|BPF_MEM) &&
926
9.73M
        s->s.k == next->s.k) {
927
      /*
928
       * XXX - optimizer loop detection.
929
       */
930
385k
      opt_state->non_branch_movement_performed = 1;
931
385k
      opt_state->done = 0;
932
385k
      next->s.code = BPF_MISC|BPF_TAX;
933
385k
    }
934
    /*
935
     * ld  #k --> ldx  #k
936
     * tax      txa
937
     */
938
9.73M
    if (s->s.code == (BPF_LD|BPF_IMM) &&
939
9.73M
        next->s.code == (BPF_MISC|BPF_TAX)) {
940
251k
      s->s.code = BPF_LDX|BPF_IMM;
941
251k
      next->s.code = BPF_MISC|BPF_TXA;
942
      /*
943
       * XXX - optimizer loop detection.
944
       */
945
251k
      opt_state->non_branch_movement_performed = 1;
946
251k
      opt_state->done = 0;
947
251k
    }
948
    /*
949
     * This is an ugly special case, but it happens
950
     * when you say tcp[k] or udp[k] where k is a constant.
951
     */
952
9.73M
    if (s->s.code == (BPF_LD|BPF_IMM)) {
953
1.98M
      struct slist *add, *tax, *ild;
954
955
      /*
956
       * Check that X isn't used on exit from this
957
       * block (which the optimizer might cause).
958
       * We know the code generator won't generate
959
       * any local dependencies.
960
       */
961
1.98M
      if (ATOMELEM(b->out_use, X_ATOM))
962
10.4k
        continue;
963
964
      /*
965
       * Check that the instruction following the ldi
966
       * is an addx, or it's an ldxms with an addx
967
       * following it (with 0 or more nops between the
968
       * ldxms and addx).
969
       */
970
1.97M
      if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
971
1.97M
        add = next;
972
0
      else
973
0
        add = this_op(next->next);
974
1.97M
      if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
975
1.97M
        continue;
976
977
      /*
978
       * Check that a tax follows that (with 0 or more
979
       * nops between them).
980
       */
981
0
      tax = this_op(add->next);
982
0
      if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
983
0
        continue;
984
985
      /*
986
       * Check that an ild follows that (with 0 or more
987
       * nops between them).
988
       */
989
0
      ild = this_op(tax->next);
990
0
      if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
991
0
          BPF_MODE(ild->s.code) != BPF_IND)
992
0
        continue;
993
      /*
994
       * We want to turn this sequence:
995
       *
996
       * (004) ldi     #0x2   {s}
997
       * (005) ldxms   [14]   {next}  -- optional
998
       * (006) addx     {add}
999
       * (007) tax      {tax}
1000
       * (008) ild     [x+0]    {ild}
1001
       *
1002
       * into this sequence:
1003
       *
1004
       * (004) nop
1005
       * (005) ldxms   [14]
1006
       * (006) nop
1007
       * (007) nop
1008
       * (008) ild     [x+2]
1009
       *
1010
       * XXX We need to check that X is not
1011
       * subsequently used, because we want to change
1012
       * what'll be in it after this sequence.
1013
       *
1014
       * We know we can eliminate the accumulator
1015
       * modifications earlier in the sequence since
1016
       * it is defined by the last stmt of this sequence
1017
       * (i.e., the last statement of the sequence loads
1018
       * a value into the accumulator, so we can eliminate
1019
       * earlier operations on the accumulator).
1020
       */
1021
0
      ild->s.k += s->s.k;
1022
0
      s->s.code = NOP;
1023
0
      add->s.code = NOP;
1024
0
      tax->s.code = NOP;
1025
      /*
1026
       * XXX - optimizer loop detection.
1027
       */
1028
0
      opt_state->non_branch_movement_performed = 1;
1029
0
      opt_state->done = 0;
1030
0
    }
1031
9.73M
  }
1032
  /*
1033
   * If the comparison at the end of a block is an equality
1034
   * comparison against a constant, and nobody uses the value
1035
   * we leave in the A register at the end of a block, and
1036
   * the operation preceding the comparison is an arithmetic
1037
   * operation, we can sometime optimize it away.
1038
   */
1039
2.10M
  if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1040
2.10M
      !ATOMELEM(b->out_use, A_ATOM)) {
1041
    /*
1042
     * We can optimize away certain subtractions of the
1043
     * X register.
1044
     */
1045
1.55M
    if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1046
54.0k
      val = b->val[X_ATOM];
1047
54.0k
      if (opt_state->vmap[val].is_const) {
1048
        /*
1049
         * If we have a subtract to do a comparison,
1050
         * and the X register is a known constant,
1051
         * we can merge this value into the
1052
         * comparison:
1053
         *
1054
         * sub x  ->  nop
1055
         * jeq #y jeq #(x+y)
1056
         */
1057
26.1k
        b->s.k += opt_state->vmap[val].const_val;
1058
26.1k
        last->s.code = NOP;
1059
        /*
1060
         * XXX - optimizer loop detection.
1061
         */
1062
26.1k
        opt_state->non_branch_movement_performed = 1;
1063
26.1k
        opt_state->done = 0;
1064
27.8k
      } else if (b->s.k == 0) {
1065
        /*
1066
         * If the X register isn't a constant,
1067
         * and the comparison in the test is
1068
         * against 0, we can compare with the
1069
         * X register, instead:
1070
         *
1071
         * sub x  ->  nop
1072
         * jeq #0 jeq x
1073
         */
1074
27.8k
        last->s.code = NOP;
1075
27.8k
        b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1076
        /*
1077
         * XXX - optimizer loop detection.
1078
         */
1079
27.8k
        opt_state->non_branch_movement_performed = 1;
1080
27.8k
        opt_state->done = 0;
1081
27.8k
      }
1082
54.0k
    }
1083
    /*
1084
     * Likewise, a constant subtract can be simplified:
1085
     *
1086
     * sub #x ->  nop
1087
     * jeq #y ->  jeq #(x+y)
1088
     */
1089
1.50M
    else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1090
0
      last->s.code = NOP;
1091
0
      b->s.k += last->s.k;
1092
      /*
1093
       * XXX - optimizer loop detection.
1094
       */
1095
0
      opt_state->non_branch_movement_performed = 1;
1096
0
      opt_state->done = 0;
1097
0
    }
1098
    /*
1099
     * And, similarly, a constant AND can be simplified
1100
     * if we're testing against 0, i.e.:
1101
     *
1102
     * and #k nop
1103
     * jeq #0  -> jset #k
1104
     */
1105
1.50M
    else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1106
1.50M
        b->s.k == 0) {
1107
0
      b->s.k = last->s.k;
1108
0
      b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1109
0
      last->s.code = NOP;
1110
      /*
1111
       * XXX - optimizer loop detection.
1112
       */
1113
0
      opt_state->non_branch_movement_performed = 1;
1114
0
      opt_state->done = 0;
1115
0
      opt_not(b);
1116
0
    }
1117
1.55M
  }
1118
  /*
1119
   * jset #0        ->   never
1120
   * jset #ffffffff ->   always
1121
   */
1122
2.10M
  if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1123
93
    if (b->s.k == 0)
1124
0
      JT(b) = JF(b);
1125
93
    if (b->s.k == 0xffffffffU)
1126
0
      JF(b) = JT(b);
1127
93
  }
1128
  /*
1129
   * If we're comparing against the index register, and the index
1130
   * register is a known constant, we can just compare against that
1131
   * constant.
1132
   */
1133
2.10M
  val = b->val[X_ATOM];
1134
2.10M
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1135
73.1k
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1136
73.1k
    b->s.code &= ~BPF_X;
1137
73.1k
    b->s.k = v;
1138
73.1k
  }
1139
  /*
1140
   * If the accumulator is a known constant, we can compute the
1141
   * comparison result.
1142
   */
1143
2.10M
  val = b->val[A_ATOM];
1144
2.10M
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1145
268k
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1146
268k
    switch (BPF_OP(b->s.code)) {
1147
1148
150k
    case BPF_JEQ:
1149
150k
      v = v == b->s.k;
1150
150k
      break;
1151
1152
43.6k
    case BPF_JGT:
1153
43.6k
      v = v > b->s.k;
1154
43.6k
      break;
1155
1156
74.6k
    case BPF_JGE:
1157
74.6k
      v = v >= b->s.k;
1158
74.6k
      break;
1159
1160
0
    case BPF_JSET:
1161
0
      v &= b->s.k;
1162
0
      break;
1163
1164
0
    default:
1165
0
      abort();
1166
268k
    }
1167
268k
    if (JF(b) != JT(b)) {
1168
      /*
1169
       * XXX - optimizer loop detection.
1170
       */
1171
138k
      opt_state->non_branch_movement_performed = 1;
1172
138k
      opt_state->done = 0;
1173
138k
    }
1174
268k
    if (v)
1175
79.6k
      JF(b) = JT(b);
1176
188k
    else
1177
188k
      JT(b) = JF(b);
1178
268k
  }
1179
2.10M
}
1180
1181
/*
1182
 * Compute the symbolic value of expression of 's', and update
1183
 * anything it defines in the value table 'val'.  If 'alter' is true,
1184
 * do various optimizations.  This code would be cleaner if symbolic
1185
 * evaluation and code transformations weren't folded together.
1186
 */
1187
static void
1188
opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1189
17.7M
{
1190
17.7M
  int op;
1191
17.7M
  bpf_u_int32 v;
1192
1193
17.7M
  switch (s->code) {
1194
1195
138k
  case BPF_LD|BPF_ABS|BPF_W:
1196
378k
  case BPF_LD|BPF_ABS|BPF_H:
1197
1.33M
  case BPF_LD|BPF_ABS|BPF_B:
1198
1.33M
    v = F(opt_state, s->code, s->k, 0L);
1199
1.33M
    vstore(s, &val[A_ATOM], v, alter);
1200
1.33M
    break;
1201
1202
0
  case BPF_LD|BPF_IND|BPF_W:
1203
0
  case BPF_LD|BPF_IND|BPF_H:
1204
124k
  case BPF_LD|BPF_IND|BPF_B:
1205
124k
    v = val[X_ATOM];
1206
124k
    if (alter && opt_state->vmap[v].is_const) {
1207
7.10k
      s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1208
7.10k
      s->k += opt_state->vmap[v].const_val;
1209
7.10k
      v = F(opt_state, s->code, s->k, 0L);
1210
      /*
1211
       * XXX - optimizer loop detection.
1212
       */
1213
7.10k
      opt_state->non_branch_movement_performed = 1;
1214
7.10k
      opt_state->done = 0;
1215
7.10k
    }
1216
117k
    else
1217
117k
      v = F(opt_state, s->code, s->k, v);
1218
124k
    vstore(s, &val[A_ATOM], v, alter);
1219
124k
    break;
1220
1221
0
  case BPF_LD|BPF_LEN:
1222
0
    v = F(opt_state, s->code, 0L, 0L);
1223
0
    vstore(s, &val[A_ATOM], v, alter);
1224
0
    break;
1225
1226
2.12M
  case BPF_LD|BPF_IMM:
1227
2.12M
    v = K(s->k);
1228
2.12M
    vstore(s, &val[A_ATOM], v, alter);
1229
2.12M
    break;
1230
1231
595k
  case BPF_LDX|BPF_IMM:
1232
595k
    v = K(s->k);
1233
595k
    vstore(s, &val[X_ATOM], v, alter);
1234
595k
    break;
1235
1236
0
  case BPF_LDX|BPF_MSH|BPF_B:
1237
0
    v = F(opt_state, s->code, s->k, 0L);
1238
0
    vstore(s, &val[X_ATOM], v, alter);
1239
0
    break;
1240
1241
462k
  case BPF_ALU|BPF_NEG:
1242
462k
    if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1243
86.8k
      s->code = BPF_LD|BPF_IMM;
1244
      /*
1245
       * Do this negation as unsigned arithmetic; that's
1246
       * what modern BPF engines do, and it guarantees
1247
       * that all possible values can be negated.  (Yeah,
1248
       * negating 0x80000000, the minimum signed 32-bit
1249
       * two's-complement value, results in 0x80000000,
1250
       * so it's still negative, but we *should* be doing
1251
       * all unsigned arithmetic here, to match what
1252
       * modern BPF engines do.)
1253
       *
1254
       * Express it as 0U - (unsigned value) so that we
1255
       * don't get compiler warnings about negating an
1256
       * unsigned value and don't get UBSan warnings
1257
       * about the result of negating 0x80000000 being
1258
       * undefined.
1259
       */
1260
86.8k
      s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1261
86.8k
      val[A_ATOM] = K(s->k);
1262
86.8k
    }
1263
375k
    else
1264
375k
      val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1265
462k
    break;
1266
1267
17.6k
  case BPF_ALU|BPF_ADD|BPF_K:
1268
17.6k
  case BPF_ALU|BPF_SUB|BPF_K:
1269
17.6k
  case BPF_ALU|BPF_MUL|BPF_K:
1270
17.6k
  case BPF_ALU|BPF_DIV|BPF_K:
1271
17.6k
  case BPF_ALU|BPF_MOD|BPF_K:
1272
131k
  case BPF_ALU|BPF_AND|BPF_K:
1273
131k
  case BPF_ALU|BPF_OR|BPF_K:
1274
131k
  case BPF_ALU|BPF_XOR|BPF_K:
1275
131k
  case BPF_ALU|BPF_LSH|BPF_K:
1276
131k
  case BPF_ALU|BPF_RSH|BPF_K:
1277
131k
    op = BPF_OP(s->code);
1278
131k
    if (alter) {
1279
11.7k
      if (s->k == 0) {
1280
        /*
1281
         * Optimize operations where the constant
1282
         * is zero.
1283
         *
1284
         * Don't optimize away "sub #0"
1285
         * as it may be needed later to
1286
         * fixup the generated math code.
1287
         *
1288
         * Fail if we're dividing by zero or taking
1289
         * a modulus by zero.
1290
         */
1291
0
        if (op == BPF_ADD ||
1292
0
            op == BPF_LSH || op == BPF_RSH ||
1293
0
            op == BPF_OR || op == BPF_XOR) {
1294
0
          s->code = NOP;
1295
0
          break;
1296
0
        }
1297
0
        if (op == BPF_MUL || op == BPF_AND) {
1298
0
          s->code = BPF_LD|BPF_IMM;
1299
0
          val[A_ATOM] = K(s->k);
1300
0
          break;
1301
0
        }
1302
0
        if (op == BPF_DIV)
1303
0
          opt_error(opt_state,
1304
0
              "division by zero");
1305
0
        if (op == BPF_MOD)
1306
0
          opt_error(opt_state,
1307
0
              "modulus by zero");
1308
0
      }
1309
11.7k
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1310
0
        fold_op(opt_state, s, val[A_ATOM], K(s->k));
1311
0
        val[A_ATOM] = K(s->k);
1312
0
        break;
1313
0
      }
1314
11.7k
    }
1315
131k
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1316
131k
    break;
1317
1318
163k
  case BPF_ALU|BPF_ADD|BPF_X:
1319
267k
  case BPF_ALU|BPF_SUB|BPF_X:
1320
603k
  case BPF_ALU|BPF_MUL|BPF_X:
1321
716k
  case BPF_ALU|BPF_DIV|BPF_X:
1322
837k
  case BPF_ALU|BPF_MOD|BPF_X:
1323
1.14M
  case BPF_ALU|BPF_AND|BPF_X:
1324
1.22M
  case BPF_ALU|BPF_OR|BPF_X:
1325
1.34M
  case BPF_ALU|BPF_XOR|BPF_X:
1326
1.34M
  case BPF_ALU|BPF_LSH|BPF_X:
1327
1.35M
  case BPF_ALU|BPF_RSH|BPF_X:
1328
1.35M
    op = BPF_OP(s->code);
1329
1.35M
    if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1330
227k
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1331
227k
        fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1332
227k
        val[A_ATOM] = K(s->k);
1333
227k
      }
1334
0
      else {
1335
0
        s->code = BPF_ALU|BPF_K|op;
1336
0
        s->k = opt_state->vmap[val[X_ATOM]].const_val;
1337
0
        if ((op == BPF_LSH || op == BPF_RSH) &&
1338
0
            s->k > 31)
1339
0
          opt_error(opt_state,
1340
0
              "shift by more than 31 bits");
1341
        /*
1342
         * XXX - optimizer loop detection.
1343
         */
1344
0
        opt_state->non_branch_movement_performed = 1;
1345
0
        opt_state->done = 0;
1346
0
        val[A_ATOM] =
1347
0
          F(opt_state, s->code, val[A_ATOM], K(s->k));
1348
0
      }
1349
227k
      break;
1350
227k
    }
1351
    /*
1352
     * Check if we're doing something to an accumulator
1353
     * that is 0, and simplify.  This may not seem like
1354
     * much of a simplification but it could open up further
1355
     * optimizations.
1356
     * XXX We could also check for mul by 1, etc.
1357
     */
1358
1.12M
    if (alter && opt_state->vmap[val[A_ATOM]].is_const
1359
1.12M
        && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1360
0
      if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1361
0
        s->code = BPF_MISC|BPF_TXA;
1362
0
        vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1363
0
        break;
1364
0
      }
1365
0
      else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1366
0
         op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1367
0
        s->code = BPF_LD|BPF_IMM;
1368
0
        s->k = 0;
1369
0
        vstore(s, &val[A_ATOM], K(s->k), alter);
1370
0
        break;
1371
0
      }
1372
0
      else if (op == BPF_NEG) {
1373
0
        s->code = NOP;
1374
0
        break;
1375
0
      }
1376
0
    }
1377
1.12M
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1378
1.12M
    break;
1379
1380
7.51k
  case BPF_MISC|BPF_TXA:
1381
7.51k
    vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1382
7.51k
    break;
1383
1384
2.29M
  case BPF_LD|BPF_MEM:
1385
2.29M
    v = val[s->k];
1386
2.29M
    if (alter && opt_state->vmap[v].is_const) {
1387
391k
      s->code = BPF_LD|BPF_IMM;
1388
391k
      s->k = opt_state->vmap[v].const_val;
1389
      /*
1390
       * XXX - optimizer loop detection.
1391
       */
1392
391k
      opt_state->non_branch_movement_performed = 1;
1393
391k
      opt_state->done = 0;
1394
391k
    }
1395
2.29M
    vstore(s, &val[A_ATOM], v, alter);
1396
2.29M
    break;
1397
1398
778k
  case BPF_MISC|BPF_TAX:
1399
778k
    vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1400
778k
    break;
1401
1402
509k
  case BPF_LDX|BPF_MEM:
1403
509k
    v = val[s->k];
1404
509k
    if (alter && opt_state->vmap[v].is_const) {
1405
7.10k
      s->code = BPF_LDX|BPF_IMM;
1406
7.10k
      s->k = opt_state->vmap[v].const_val;
1407
      /*
1408
       * XXX - optimizer loop detection.
1409
       */
1410
7.10k
      opt_state->non_branch_movement_performed = 1;
1411
7.10k
      opt_state->done = 0;
1412
7.10k
    }
1413
509k
    vstore(s, &val[X_ATOM], v, alter);
1414
509k
    break;
1415
1416
2.70M
  case BPF_ST:
1417
2.70M
    vstore(s, &val[s->k], val[A_ATOM], alter);
1418
2.70M
    break;
1419
1420
0
  case BPF_STX:
1421
0
    vstore(s, &val[s->k], val[X_ATOM], alter);
1422
0
    break;
1423
17.7M
  }
1424
17.7M
}
1425
1426
static void
1427
deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1428
19.8M
{
1429
19.8M
  register int atom;
1430
1431
19.8M
  atom = atomuse(s);
1432
19.8M
  if (atom >= 0) {
1433
9.63M
    if (atom == AX_ATOM) {
1434
1.34M
      last[X_ATOM] = 0;
1435
1.34M
      last[A_ATOM] = 0;
1436
1.34M
    }
1437
8.28M
    else
1438
8.28M
      last[atom] = 0;
1439
9.63M
  }
1440
19.8M
  atom = atomdef(s);
1441
19.8M
  if (atom >= 0) {
1442
11.7M
    if (last[atom]) {
1443
      /*
1444
       * XXX - optimizer loop detection.
1445
       */
1446
1.07M
      opt_state->non_branch_movement_performed = 1;
1447
1.07M
      opt_state->done = 0;
1448
1.07M
      last[atom]->code = NOP;
1449
1.07M
    }
1450
11.7M
    last[atom] = s;
1451
11.7M
  }
1452
19.8M
}
1453
1454
static void
1455
opt_deadstores(opt_state_t *opt_state, register struct block *b)
1456
2.42M
{
1457
2.42M
  register struct slist *s;
1458
2.42M
  register int atom;
1459
2.42M
  struct stmt *last[N_ATOMS];
1460
1461
2.42M
  memset((char *)last, 0, sizeof last);
1462
1463
19.8M
  for (s = b->stmts; s != 0; s = s->next)
1464
17.3M
    deadstmt(opt_state, &s->s, last);
1465
2.42M
  deadstmt(opt_state, &b->s, last);
1466
1467
45.9M
  for (atom = 0; atom < N_ATOMS; ++atom)
1468
43.5M
    if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1469
483k
      last[atom]->code = NOP;
1470
      /*
1471
       * XXX - optimizer loop detection.
1472
       */
1473
483k
      opt_state->non_branch_movement_performed = 1;
1474
483k
      opt_state->done = 0;
1475
483k
    }
1476
2.42M
}
1477
1478
static void
1479
opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1480
2.62M
{
1481
2.62M
  struct slist *s;
1482
2.62M
  struct edge *p;
1483
2.62M
  int i;
1484
2.62M
  bpf_u_int32 aval, xval;
1485
1486
#if 0
1487
  for (s = b->stmts; s && s->next; s = s->next)
1488
    if (BPF_CLASS(s->s.code) == BPF_JMP) {
1489
      do_stmts = 0;
1490
      break;
1491
    }
1492
#endif
1493
1494
  /*
1495
   * Initialize the atom values.
1496
   */
1497
2.62M
  p = b->in_edges;
1498
2.62M
  if (p == 0) {
1499
    /*
1500
     * We have no predecessors, so everything is undefined
1501
     * upon entry to this block.
1502
     */
1503
248k
    memset((char *)b->val, 0, sizeof(b->val));
1504
2.37M
  } else {
1505
    /*
1506
     * Inherit values from our predecessors.
1507
     *
1508
     * First, get the values from the predecessor along the
1509
     * first edge leading to this node.
1510
     */
1511
2.37M
    memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1512
    /*
1513
     * Now look at all the other nodes leading to this node.
1514
     * If, for the predecessor along that edge, a register
1515
     * has a different value from the one we have (i.e.,
1516
     * control paths are merging, and the merging paths
1517
     * assign different values to that register), give the
1518
     * register the undefined value of 0.
1519
     */
1520
4.42M
    while ((p = p->next) != NULL) {
1521
38.9M
      for (i = 0; i < N_ATOMS; ++i)
1522
36.9M
        if (b->val[i] != p->pred->val[i])
1523
4.38M
          b->val[i] = 0;
1524
2.05M
    }
1525
2.37M
  }
1526
2.62M
  aval = b->val[A_ATOM];
1527
2.62M
  xval = b->val[X_ATOM];
1528
20.3M
  for (s = b->stmts; s; s = s->next)
1529
17.7M
    opt_stmt(opt_state, &s->s, b->val, do_stmts);
1530
1531
  /*
1532
   * This is a special case: if we don't use anything from this
1533
   * block, and we load the accumulator or index register with a
1534
   * value that is already there, or if this block is a return,
1535
   * eliminate all the statements.
1536
   *
1537
   * XXX - what if it does a store?  Presumably that falls under
1538
   * the heading of "if we don't use anything from this block",
1539
   * i.e., if we use any memory location set to a different
1540
   * value by this block, then we use something from this block.
1541
   *
1542
   * XXX - why does it matter whether we use anything from this
1543
   * block?  If the accumulator or index register doesn't change
1544
   * its value, isn't that OK even if we use that value?
1545
   *
1546
   * XXX - if we load the accumulator with a different value,
1547
   * and the block ends with a conditional branch, we obviously
1548
   * can't eliminate it, as the branch depends on that value.
1549
   * For the index register, the conditional branch only depends
1550
   * on the index register value if the test is against the index
1551
   * register value rather than a constant; if nothing uses the
1552
   * value we put into the index register, and we're not testing
1553
   * against the index register's value, and there aren't any
1554
   * other problems that would keep us from eliminating this
1555
   * block, can we eliminate it?
1556
   */
1557
2.62M
  if (do_stmts &&
1558
2.62M
      ((b->out_use == 0 &&
1559
596k
        aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1560
596k
        xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1561
596k
       BPF_CLASS(b->s.code) == BPF_RET)) {
1562
202k
    if (b->stmts != 0) {
1563
49.0k
      b->stmts = 0;
1564
      /*
1565
       * XXX - optimizer loop detection.
1566
       */
1567
49.0k
      opt_state->non_branch_movement_performed = 1;
1568
49.0k
      opt_state->done = 0;
1569
49.0k
    }
1570
2.42M
  } else {
1571
2.42M
    opt_peep(opt_state, b);
1572
2.42M
    opt_deadstores(opt_state, b);
1573
2.42M
  }
1574
  /*
1575
   * Set up values for branch optimizer.
1576
   */
1577
2.62M
  if (BPF_SRC(b->s.code) == BPF_K)
1578
2.33M
    b->oval = K(b->s.k);
1579
288k
  else
1580
288k
    b->oval = b->val[X_ATOM];
1581
2.62M
  b->et.code = b->s.code;
1582
2.62M
  b->ef.code = -b->s.code;
1583
2.62M
}
1584
1585
/*
1586
 * Return true if any register that is used on exit from 'succ', has
1587
 * an exit value that is different from the corresponding exit value
1588
 * from 'b'.
1589
 */
1590
static int
1591
use_conflict(struct block *b, struct block *succ)
1592
1.79M
{
1593
1.79M
  int atom;
1594
1.79M
  atomset use = succ->out_use;
1595
1596
1.79M
  if (use == 0)
1597
1.72M
    return 0;
1598
1599
954k
  for (atom = 0; atom < N_ATOMS; ++atom)
1600
913k
    if (ATOMELEM(use, atom))
1601
62.6k
      if (b->val[atom] != succ->val[atom])
1602
21.5k
        return 1;
1603
41.0k
  return 0;
1604
62.6k
}
1605
1606
/*
1607
 * Given a block that is the successor of an edge, and an edge that
1608
 * dominates that edge, return either a pointer to a child of that
1609
 * block (a block to which that block jumps) if that block is a
1610
 * candidate to replace the successor of the latter edge or NULL
1611
 * if neither of the children of the first block are candidates.
1612
 */
1613
static struct block *
1614
fold_edge(struct block *child, struct edge *ep)
1615
14.7M
{
1616
14.7M
  int sense;
1617
14.7M
  bpf_u_int32 aval0, aval1, oval0, oval1;
1618
14.7M
  int code = ep->code;
1619
1620
14.7M
  if (code < 0) {
1621
    /*
1622
     * This edge is a "branch if false" edge.
1623
     */
1624
5.67M
    code = -code;
1625
5.67M
    sense = 0;
1626
9.03M
  } else {
1627
    /*
1628
     * This edge is a "branch if true" edge.
1629
     */
1630
9.03M
    sense = 1;
1631
9.03M
  }
1632
1633
  /*
1634
   * If the opcode for the branch at the end of the block we
1635
   * were handed isn't the same as the opcode for the branch
1636
   * to which the edge we were handed corresponds, the tests
1637
   * for those branches aren't testing the same conditions,
1638
   * so the blocks to which the first block branches aren't
1639
   * candidates to replace the successor of the edge.
1640
   */
1641
14.7M
  if (child->s.code != code)
1642
7.03M
    return 0;
1643
1644
7.67M
  aval0 = child->val[A_ATOM];
1645
7.67M
  oval0 = child->oval;
1646
7.67M
  aval1 = ep->pred->val[A_ATOM];
1647
7.67M
  oval1 = ep->pred->oval;
1648
1649
  /*
1650
   * If the A register value on exit from the successor block
1651
   * isn't the same as the A register value on exit from the
1652
   * predecessor of the edge, the blocks to which the first
1653
   * block branches aren't candidates to replace the successor
1654
   * of the edge.
1655
   */
1656
7.67M
  if (aval0 != aval1)
1657
5.22M
    return 0;
1658
1659
2.44M
  if (oval0 == oval1)
1660
    /*
1661
     * The operands of the branch instructions are
1662
     * identical, so the branches are testing the
1663
     * same condition, and the result is true if a true
1664
     * branch was taken to get here, otherwise false.
1665
     */
1666
1.17M
    return sense ? JT(child) : JF(child);
1667
1668
1.27M
  if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1669
    /*
1670
     * At this point, we only know the comparison if we
1671
     * came down the true branch, and it was an equality
1672
     * comparison with a constant.
1673
     *
1674
     * I.e., if we came down the true branch, and the branch
1675
     * was an equality comparison with a constant, we know the
1676
     * accumulator contains that constant.  If we came down
1677
     * the false branch, or the comparison wasn't with a
1678
     * constant, we don't know what was in the accumulator.
1679
     *
1680
     * We rely on the fact that distinct constants have distinct
1681
     * value numbers.
1682
     */
1683
270k
    return JF(child);
1684
1685
1.00M
  return 0;
1686
1.27M
}
1687
1688
/*
1689
 * If we can make this edge go directly to a child of the edge's current
1690
 * successor, do so.
1691
 */
1692
static void
1693
opt_j(opt_state_t *opt_state, struct edge *ep)
1694
3.51M
{
1695
3.51M
  register u_int i, k;
1696
3.51M
  register struct block *target;
1697
1698
  /*
1699
   * Does this edge go to a block where, if the test
1700
   * at the end of it succeeds, it goes to a block
1701
   * that's a leaf node of the DAG, i.e. a return
1702
   * statement?
1703
   * If so, there's nothing to optimize.
1704
   */
1705
3.51M
  if (JT(ep->succ) == 0)
1706
1.02M
    return;
1707
1708
  /*
1709
   * Does this edge go to a block that goes, in turn, to
1710
   * the same block regardless of whether the test at the
1711
   * end succeeds or fails?
1712
   */
1713
2.49M
  if (JT(ep->succ) == JF(ep->succ)) {
1714
    /*
1715
     * Common branch targets can be eliminated, provided
1716
     * there is no data dependency.
1717
     *
1718
     * Check whether any register used on exit from the
1719
     * block to which the successor of this edge goes
1720
     * has a value at that point that's different from
1721
     * the value it has on exit from the predecessor of
1722
     * this edge.  If not, the predecessor of this edge
1723
     * can just go to the block to which the successor
1724
     * of this edge goes, bypassing the successor of this
1725
     * edge, as the successor of this edge isn't doing
1726
     * any calculations whose results are different
1727
     * from what the blocks before it did and isn't
1728
     * doing any tests the results of which matter.
1729
     */
1730
346k
    if (!use_conflict(ep->pred, JT(ep->succ))) {
1731
      /*
1732
       * No, there isn't.
1733
       * Make this edge go to the block to
1734
       * which the successor of that edge
1735
       * goes.
1736
       *
1737
       * XXX - optimizer loop detection.
1738
       */
1739
337k
      opt_state->non_branch_movement_performed = 1;
1740
337k
      opt_state->done = 0;
1741
337k
      ep->succ = JT(ep->succ);
1742
337k
    }
1743
346k
  }
1744
  /*
1745
   * For each edge dominator that matches the successor of this
1746
   * edge, promote the edge successor to the its grandchild.
1747
   *
1748
   * XXX We violate the set abstraction here in favor a reasonably
1749
   * efficient loop.
1750
   */
1751
3.56M
 top:
1752
19.5M
  for (i = 0; i < opt_state->edgewords; ++i) {
1753
    /* i'th word in the bitset of dominators */
1754
17.4M
    register bpf_u_int32 x = ep->edom[i];
1755
1756
30.6M
    while (x != 0) {
1757
      /* Find the next dominator in that word and mark it as found */
1758
14.7M
      k = lowest_set_bit(x);
1759
14.7M
      x &=~ ((bpf_u_int32)1 << k);
1760
14.7M
      k += i * BITS_PER_WORD;
1761
1762
14.7M
      target = fold_edge(ep->succ, opt_state->edges[k]);
1763
      /*
1764
       * We have a candidate to replace the successor
1765
       * of ep.
1766
       *
1767
       * Check that there is no data dependency between
1768
       * nodes that will be violated if we move the edge;
1769
       * i.e., if any register used on exit from the
1770
       * candidate has a value at that point different
1771
       * from the value it has when we exit the
1772
       * predecessor of that edge, there's a data
1773
       * dependency that will be violated.
1774
       */
1775
14.7M
      if (target != 0 && !use_conflict(ep->pred, target)) {
1776
        /*
1777
         * It's safe to replace the successor of
1778
         * ep; do so, and note that we've made
1779
         * at least one change.
1780
         *
1781
         * XXX - this is one of the operations that
1782
         * happens when the optimizer gets into
1783
         * one of those infinite loops.
1784
         */
1785
1.43M
        opt_state->done = 0;
1786
1.43M
        ep->succ = target;
1787
1.43M
        if (JT(target) != 0)
1788
          /*
1789
           * Start over unless we hit a leaf.
1790
           */
1791
1.06M
          goto top;
1792
362k
        return;
1793
1.43M
      }
1794
14.7M
    }
1795
17.4M
  }
1796
3.56M
}
1797
1798
/*
1799
 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1800
 * "Predicate Assertion Propagation" in the BPF+ paper?
1801
 *
1802
 * Note that this looks at block dominators, not edge dominators.
1803
 * Don't think so.
1804
 *
1805
 * "A or B" compiles into
1806
 *
1807
 *          A
1808
 *       t / \ f
1809
 *        /   B
1810
 *       / t / \ f
1811
 *      \   /
1812
 *       \ /
1813
 *        X
1814
 *
1815
 *
1816
 */
1817
static void
1818
or_pullup(opt_state_t *opt_state, struct block *b)
1819
1.75M
{
1820
1.75M
  bpf_u_int32 val;
1821
1.75M
  int at_top;
1822
1.75M
  struct block *pull;
1823
1.75M
  struct block **diffp, **samep;
1824
1.75M
  struct edge *ep;
1825
1826
1.75M
  ep = b->in_edges;
1827
1.75M
  if (ep == 0)
1828
525k
    return;
1829
1830
  /*
1831
   * Make sure each predecessor loads the same value.
1832
   * XXX why?
1833
   */
1834
1.23M
  val = ep->pred->val[A_ATOM];
1835
1.45M
  for (ep = ep->next; ep != 0; ep = ep->next)
1836
442k
    if (val != ep->pred->val[A_ATOM])
1837
216k
      return;
1838
1839
  /*
1840
   * For the first edge in the list of edges coming into this block,
1841
   * see whether the predecessor of that edge comes here via a true
1842
   * branch or a false branch.
1843
   */
1844
1.01M
  if (JT(b->in_edges->pred) == b)
1845
551k
    diffp = &JT(b->in_edges->pred); /* jt */
1846
466k
  else
1847
466k
    diffp = &JF(b->in_edges->pred);  /* jf */
1848
1849
  /*
1850
   * diffp is a pointer to a pointer to the block.
1851
   *
1852
   * Go down the false chain looking as far as you can,
1853
   * making sure that each jump-compare is doing the
1854
   * same as the original block.
1855
   *
1856
   * If you reach the bottom before you reach a
1857
   * different jump-compare, just exit.  There's nothing
1858
   * to do here.  XXX - no, this version is checking for
1859
   * the value leaving the block; that's from the BPF+
1860
   * pullup routine.
1861
   */
1862
1.01M
  at_top = 1;
1863
1.59M
  for (;;) {
1864
    /*
1865
     * Done if that's not going anywhere XXX
1866
     */
1867
1.59M
    if (*diffp == 0)
1868
0
      return;
1869
1870
    /*
1871
     * Done if that predecessor blah blah blah isn't
1872
     * going the same place we're going XXX
1873
     *
1874
     * Does the true edge of this block point to the same
1875
     * location as the true edge of b?
1876
     */
1877
1.59M
    if (JT(*diffp) != JT(b))
1878
267k
      return;
1879
1880
    /*
1881
     * Done if this node isn't a dominator of that
1882
     * node blah blah blah XXX
1883
     *
1884
     * Does b dominate diffp?
1885
     */
1886
1.32M
    if (!SET_MEMBER((*diffp)->dom, b->id))
1887
12.1k
      return;
1888
1889
    /*
1890
     * Break out of the loop if that node's value of A
1891
     * isn't the value of A above XXX
1892
     */
1893
1.31M
    if ((*diffp)->val[A_ATOM] != val)
1894
737k
      break;
1895
1896
    /*
1897
     * Get the JF for that node XXX
1898
     * Go down the false path.
1899
     */
1900
580k
    diffp = &JF(*diffp);
1901
580k
    at_top = 0;
1902
580k
  }
1903
1904
  /*
1905
   * Now that we've found a different jump-compare in a chain
1906
   * below b, search further down until we find another
1907
   * jump-compare that looks at the original value.  This
1908
   * jump-compare should get pulled up.  XXX again we're
1909
   * comparing values not jump-compares.
1910
   */
1911
737k
  samep = &JF(*diffp);
1912
963k
  for (;;) {
1913
    /*
1914
     * Done if that's not going anywhere XXX
1915
     */
1916
963k
    if (*samep == 0)
1917
0
      return;
1918
1919
    /*
1920
     * Done if that predecessor blah blah blah isn't
1921
     * going the same place we're going XXX
1922
     */
1923
963k
    if (JT(*samep) != JT(b))
1924
668k
      return;
1925
1926
    /*
1927
     * Done if this node isn't a dominator of that
1928
     * node blah blah blah XXX
1929
     *
1930
     * Does b dominate samep?
1931
     */
1932
294k
    if (!SET_MEMBER((*samep)->dom, b->id))
1933
51.0k
      return;
1934
1935
    /*
1936
     * Break out of the loop if that node's value of A
1937
     * is the value of A above XXX
1938
     */
1939
243k
    if ((*samep)->val[A_ATOM] == val)
1940
17.3k
      break;
1941
1942
    /* XXX Need to check that there are no data dependencies
1943
       between dp0 and dp1.  Currently, the code generator
1944
       will not produce such dependencies. */
1945
226k
    samep = &JF(*samep);
1946
226k
  }
1947
#ifdef notdef
1948
  /* XXX This doesn't cover everything. */
1949
  for (i = 0; i < N_ATOMS; ++i)
1950
    if ((*samep)->val[i] != pred->val[i])
1951
      return;
1952
#endif
1953
  /* Pull up the node. */
1954
17.3k
  pull = *samep;
1955
17.3k
  *samep = JF(pull);
1956
17.3k
  JF(pull) = *diffp;
1957
1958
  /*
1959
   * At the top of the chain, each predecessor needs to point at the
1960
   * pulled up node.  Inside the chain, there is only one predecessor
1961
   * to worry about.
1962
   */
1963
17.3k
  if (at_top) {
1964
70.6k
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
1965
53.9k
      if (JT(ep->pred) == b)
1966
40.3k
        JT(ep->pred) = pull;
1967
13.5k
      else
1968
13.5k
        JF(ep->pred) = pull;
1969
53.9k
    }
1970
16.7k
  }
1971
569
  else
1972
569
    *diffp = pull;
1973
1974
  /*
1975
   * XXX - this is one of the operations that happens when the
1976
   * optimizer gets into one of those infinite loops.
1977
   */
1978
17.3k
  opt_state->done = 0;
1979
17.3k
}
1980
1981
static void
1982
and_pullup(opt_state_t *opt_state, struct block *b)
1983
1.75M
{
1984
1.75M
  bpf_u_int32 val;
1985
1.75M
  int at_top;
1986
1.75M
  struct block *pull;
1987
1.75M
  struct block **diffp, **samep;
1988
1.75M
  struct edge *ep;
1989
1990
1.75M
  ep = b->in_edges;
1991
1.75M
  if (ep == 0)
1992
525k
    return;
1993
1994
  /*
1995
   * Make sure each predecessor loads the same value.
1996
   */
1997
1.23M
  val = ep->pred->val[A_ATOM];
1998
1.45M
  for (ep = ep->next; ep != 0; ep = ep->next)
1999
442k
    if (val != ep->pred->val[A_ATOM])
2000
216k
      return;
2001
2002
1.01M
  if (JT(b->in_edges->pred) == b)
2003
547k
    diffp = &JT(b->in_edges->pred);
2004
469k
  else
2005
469k
    diffp = &JF(b->in_edges->pred);
2006
2007
1.01M
  at_top = 1;
2008
1.31M
  for (;;) {
2009
1.31M
    if (*diffp == 0)
2010
0
      return;
2011
2012
1.31M
    if (JF(*diffp) != JF(b))
2013
262k
      return;
2014
2015
1.04M
    if (!SET_MEMBER((*diffp)->dom, b->id))
2016
9.08k
      return;
2017
2018
1.04M
    if ((*diffp)->val[A_ATOM] != val)
2019
745k
      break;
2020
2021
294k
    diffp = &JT(*diffp);
2022
294k
    at_top = 0;
2023
294k
  }
2024
745k
  samep = &JT(*diffp);
2025
966k
  for (;;) {
2026
966k
    if (*samep == 0)
2027
0
      return;
2028
2029
966k
    if (JF(*samep) != JF(b))
2030
697k
      return;
2031
2032
269k
    if (!SET_MEMBER((*samep)->dom, b->id))
2033
37.7k
      return;
2034
2035
231k
    if ((*samep)->val[A_ATOM] == val)
2036
10.7k
      break;
2037
2038
    /* XXX Need to check that there are no data dependencies
2039
       between diffp and samep.  Currently, the code generator
2040
       will not produce such dependencies. */
2041
220k
    samep = &JT(*samep);
2042
220k
  }
2043
#ifdef notdef
2044
  /* XXX This doesn't cover everything. */
2045
  for (i = 0; i < N_ATOMS; ++i)
2046
    if ((*samep)->val[i] != pred->val[i])
2047
      return;
2048
#endif
2049
  /* Pull up the node. */
2050
10.7k
  pull = *samep;
2051
10.7k
  *samep = JT(pull);
2052
10.7k
  JT(pull) = *diffp;
2053
2054
  /*
2055
   * At the top of the chain, each predecessor needs to point at the
2056
   * pulled up node.  Inside the chain, there is only one predecessor
2057
   * to worry about.
2058
   */
2059
10.7k
  if (at_top) {
2060
21.3k
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
2061
10.9k
      if (JT(ep->pred) == b)
2062
10.3k
        JT(ep->pred) = pull;
2063
600
      else
2064
600
        JF(ep->pred) = pull;
2065
10.9k
    }
2066
10.3k
  }
2067
333
  else
2068
333
    *diffp = pull;
2069
2070
  /*
2071
   * XXX - this is one of the operations that happens when the
2072
   * optimizer gets into one of those infinite loops.
2073
   */
2074
10.7k
  opt_state->done = 0;
2075
10.7k
}
2076
2077
static void
2078
opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2079
248k
{
2080
248k
  int i, maxlevel;
2081
248k
  struct block *p;
2082
2083
248k
  init_val(opt_state);
2084
248k
  maxlevel = ic->root->level;
2085
2086
248k
  find_inedges(opt_state, ic->root);
2087
2.65M
  for (i = maxlevel; i >= 0; --i)
2088
5.03M
    for (p = opt_state->levels[i]; p; p = p->link)
2089
2.62M
      opt_blk(opt_state, p, do_stmts);
2090
2091
248k
  if (do_stmts)
2092
    /*
2093
     * No point trying to move branches; it can't possibly
2094
     * make a difference at this point.
2095
     *
2096
     * XXX - this might be after we detect a loop where
2097
     * we were just looping infinitely moving branches
2098
     * in such a fashion that we went through two or more
2099
     * versions of the machine code, eventually returning
2100
     * to the first version.  (We're really not doing a
2101
     * full loop detection, we're just testing for two
2102
     * passes in a row where we do nothing but
2103
     * move branches.)
2104
     */
2105
101k
    return;
2106
2107
  /*
2108
   * Is this what the BPF+ paper describes in sections 6.1.1,
2109
   * 6.1.2, and 6.1.3?
2110
   */
2111
1.87M
  for (i = 1; i <= maxlevel; ++i) {
2112
3.48M
    for (p = opt_state->levels[i]; p; p = p->link) {
2113
1.75M
      opt_j(opt_state, &p->et);
2114
1.75M
      opt_j(opt_state, &p->ef);
2115
1.75M
    }
2116
1.72M
  }
2117
2118
147k
  find_inedges(opt_state, ic->root);
2119
1.87M
  for (i = 1; i <= maxlevel; ++i) {
2120
3.48M
    for (p = opt_state->levels[i]; p; p = p->link) {
2121
1.75M
      or_pullup(opt_state, p);
2122
1.75M
      and_pullup(opt_state, p);
2123
1.75M
    }
2124
1.72M
  }
2125
147k
}
2126
2127
static inline void
2128
link_inedge(struct edge *parent, struct block *child)
2129
7.96M
{
2130
7.96M
  parent->next = child->in_edges;
2131
7.96M
  child->in_edges = parent;
2132
7.96M
}
2133
2134
static void
2135
find_inedges(opt_state_t *opt_state, struct block *root)
2136
393k
{
2137
393k
  u_int i;
2138
393k
  int level;
2139
393k
  struct block *b;
2140
2141
11.5M
  for (i = 0; i < opt_state->n_blocks; ++i)
2142
11.1M
    opt_state->blocks[i]->in_edges = 0;
2143
2144
  /*
2145
   * Traverse the graph, adding each edge to the predecessor
2146
   * list of its successors.  Skip the leaves (i.e. level 0).
2147
   */
2148
4.28M
  for (level = root->level; level > 0; --level) {
2149
7.87M
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
2150
3.98M
      link_inedge(&b->et, JT(b));
2151
3.98M
      link_inedge(&b->ef, JF(b));
2152
3.98M
    }
2153
3.89M
  }
2154
393k
}
2155
2156
static void
2157
opt_root(struct block **b)
2158
43.0k
{
2159
43.0k
  struct slist *tmp, *s;
2160
2161
43.0k
  s = (*b)->stmts;
2162
43.0k
  (*b)->stmts = 0;
2163
83.1k
  while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2164
40.1k
    *b = JT(*b);
2165
2166
43.0k
  tmp = (*b)->stmts;
2167
43.0k
  if (tmp != 0)
2168
5.01k
    sappend(s, tmp);
2169
43.0k
  (*b)->stmts = s;
2170
2171
  /*
2172
   * If the root node is a return, then there is no
2173
   * point executing any statements (since the bpf machine
2174
   * has no side effects).
2175
   */
2176
43.0k
  if (BPF_CLASS((*b)->s.code) == BPF_RET)
2177
29.7k
    (*b)->stmts = 0;
2178
43.0k
}
2179
2180
static void
2181
opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2182
89.8k
{
2183
2184
#ifdef BDEBUG
2185
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2186
    printf("opt_loop(root, %d) begin\n", do_stmts);
2187
    opt_dump(opt_state, ic);
2188
  }
2189
#endif
2190
2191
  /*
2192
   * XXX - optimizer loop detection.
2193
   */
2194
89.8k
  int loop_count = 0;
2195
248k
  for (;;) {
2196
248k
    opt_state->done = 1;
2197
    /*
2198
     * XXX - optimizer loop detection.
2199
     */
2200
248k
    opt_state->non_branch_movement_performed = 0;
2201
248k
    find_levels(opt_state, ic);
2202
248k
    find_dom(opt_state, ic->root);
2203
248k
    find_closure(opt_state, ic->root);
2204
248k
    find_ud(opt_state, ic->root);
2205
248k
    find_edom(opt_state, ic->root);
2206
248k
    opt_blks(opt_state, ic, do_stmts);
2207
#ifdef BDEBUG
2208
    if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2209
      printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2210
      opt_dump(opt_state, ic);
2211
    }
2212
#endif
2213
2214
    /*
2215
     * Was anything done in this optimizer pass?
2216
     */
2217
248k
    if (opt_state->done) {
2218
      /*
2219
       * No, so we've reached a fixed point.
2220
       * We're done.
2221
       */
2222
87.8k
      break;
2223
87.8k
    }
2224
2225
    /*
2226
     * XXX - was anything done other than branch movement
2227
     * in this pass?
2228
     */
2229
160k
    if (opt_state->non_branch_movement_performed) {
2230
      /*
2231
       * Yes.  Clear any loop-detection counter;
2232
       * we're making some form of progress (assuming
2233
       * we can't get into a cycle doing *other*
2234
       * optimizations...).
2235
       */
2236
134k
      loop_count = 0;
2237
134k
    } else {
2238
      /*
2239
       * No - increment the counter, and quit if
2240
       * it's up to 100.
2241
       */
2242
25.9k
      loop_count++;
2243
25.9k
      if (loop_count >= 100) {
2244
        /*
2245
         * We've done nothing but branch movement
2246
         * for 100 passes; we're probably
2247
         * in a cycle and will never reach a
2248
         * fixed point.
2249
         *
2250
         * XXX - yes, we really need a non-
2251
         * heuristic way of detecting a cycle.
2252
         */
2253
120
        opt_state->done = 1;
2254
120
        break;
2255
120
      }
2256
25.9k
    }
2257
160k
  }
2258
89.8k
}
2259
2260
/*
2261
 * Optimize the filter code in its dag representation.
2262
 * Return 0 on success, -1 on error.
2263
 */
2264
int
2265
bpf_optimize(struct icode *ic, char *errbuf)
2266
44.9k
{
2267
44.9k
  opt_state_t opt_state;
2268
2269
44.9k
  memset(&opt_state, 0, sizeof(opt_state));
2270
44.9k
  opt_state.errbuf = errbuf;
2271
44.9k
  opt_state.non_branch_movement_performed = 0;
2272
44.9k
  if (setjmp(opt_state.top_ctx)) {
2273
1.86k
    opt_cleanup(&opt_state);
2274
1.86k
    return -1;
2275
1.86k
  }
2276
43.0k
  opt_init(&opt_state, ic);
2277
43.0k
  opt_loop(&opt_state, ic, 0);
2278
43.0k
  opt_loop(&opt_state, ic, 1);
2279
43.0k
  intern_blocks(&opt_state, ic);
2280
#ifdef BDEBUG
2281
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2282
    printf("after intern_blocks()\n");
2283
    opt_dump(&opt_state, ic);
2284
  }
2285
#endif
2286
43.0k
  opt_root(&ic->root);
2287
#ifdef BDEBUG
2288
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2289
    printf("after opt_root()\n");
2290
    opt_dump(&opt_state, ic);
2291
  }
2292
#endif
2293
43.0k
  opt_cleanup(&opt_state);
2294
43.0k
  return 0;
2295
44.9k
}
2296
2297
static void
2298
make_marks(struct icode *ic, struct block *p)
2299
413k
{
2300
413k
  if (!isMarked(ic, p)) {
2301
239k
    Mark(ic, p);
2302
239k
    if (BPF_CLASS(p->s.code) != BPF_RET) {
2303
183k
      make_marks(ic, JT(p));
2304
183k
      make_marks(ic, JF(p));
2305
183k
    }
2306
239k
  }
2307
413k
}
2308
2309
/*
2310
 * Mark code array such that isMarked(ic->cur_mark, i) is true
2311
 * only for nodes that are alive.
2312
 */
2313
static void
2314
mark_code(struct icode *ic)
2315
45.1k
{
2316
45.1k
  ic->cur_mark += 1;
2317
45.1k
  make_marks(ic, ic->root);
2318
45.1k
}
2319
2320
/*
2321
 * True iff the two stmt lists load the same value from the packet into
2322
 * the accumulator.
2323
 */
2324
static int
2325
eq_slist(struct slist *x, struct slist *y)
2326
5.28k
{
2327
6.40k
  for (;;) {
2328
19.1k
    while (x && x->s.code == NOP)
2329
12.6k
      x = x->next;
2330
18.1k
    while (y && y->s.code == NOP)
2331
11.6k
      y = y->next;
2332
6.40k
    if (x == 0)
2333
2.51k
      return y == 0;
2334
3.88k
    if (y == 0)
2335
129
      return x == 0;
2336
3.75k
    if (x->s.code != y->s.code || x->s.k != y->s.k)
2337
2.63k
      return 0;
2338
1.12k
    x = x->next;
2339
1.12k
    y = y->next;
2340
1.12k
  }
2341
5.28k
}
2342
2343
static inline int
2344
eq_blk(struct block *b0, struct block *b1)
2345
1.67M
{
2346
1.67M
  if (b0->s.code == b1->s.code &&
2347
1.67M
      b0->s.k == b1->s.k &&
2348
1.67M
      b0->et.succ == b1->et.succ &&
2349
1.67M
      b0->ef.succ == b1->ef.succ)
2350
5.28k
    return eq_slist(b0->stmts, b1->stmts);
2351
1.66M
  return 0;
2352
1.67M
}
2353
2354
static void
2355
intern_blocks(opt_state_t *opt_state, struct icode *ic)
2356
43.0k
{
2357
43.0k
  struct block *p;
2358
43.0k
  u_int i, j;
2359
43.0k
  int done1; /* don't shadow global */
2360
45.1k
 top:
2361
45.1k
  done1 = 1;
2362
1.15M
  for (i = 0; i < opt_state->n_blocks; ++i)
2363
1.11M
    opt_state->blocks[i]->link = 0;
2364
2365
45.1k
  mark_code(ic);
2366
2367
1.11M
  for (i = opt_state->n_blocks - 1; i != 0; ) {
2368
1.06M
    --i;
2369
1.06M
    if (!isMarked(ic, opt_state->blocks[i]))
2370
857k
      continue;
2371
6.76M
    for (j = i + 1; j < opt_state->n_blocks; ++j) {
2372
6.55M
      if (!isMarked(ic, opt_state->blocks[j]))
2373
4.88M
        continue;
2374
1.67M
      if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2375
2.25k
        opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2376
2.20k
          opt_state->blocks[j]->link : opt_state->blocks[j];
2377
2.25k
        break;
2378
2.25k
      }
2379
1.67M
    }
2380
210k
  }
2381
1.15M
  for (i = 0; i < opt_state->n_blocks; ++i) {
2382
1.11M
    p = opt_state->blocks[i];
2383
1.11M
    if (JT(p) == 0)
2384
80.9k
      continue;
2385
1.03M
    if (JT(p)->link) {
2386
3.19k
      done1 = 0;
2387
3.19k
      JT(p) = JT(p)->link;
2388
3.19k
    }
2389
1.03M
    if (JF(p)->link) {
2390
3.40k
      done1 = 0;
2391
3.40k
      JF(p) = JF(p)->link;
2392
3.40k
    }
2393
1.03M
  }
2394
45.1k
  if (!done1)
2395
2.12k
    goto top;
2396
45.1k
}
2397
2398
static void
2399
opt_cleanup(opt_state_t *opt_state)
2400
44.9k
{
2401
44.9k
  free((void *)opt_state->vnode_base);
2402
44.9k
  free((void *)opt_state->vmap);
2403
44.9k
  free((void *)opt_state->edges);
2404
44.9k
  free((void *)opt_state->space);
2405
44.9k
  free((void *)opt_state->levels);
2406
44.9k
  free((void *)opt_state->blocks);
2407
44.9k
}
2408
2409
/*
2410
 * For optimizer errors.
2411
 */
2412
static void PCAP_NORETURN
2413
opt_error(opt_state_t *opt_state, const char *fmt, ...)
2414
1.86k
{
2415
1.86k
  va_list ap;
2416
2417
1.86k
  if (opt_state->errbuf != NULL) {
2418
1.86k
    va_start(ap, fmt);
2419
1.86k
    (void)vsnprintf(opt_state->errbuf,
2420
1.86k
        PCAP_ERRBUF_SIZE, fmt, ap);
2421
1.86k
    va_end(ap);
2422
1.86k
  }
2423
1.86k
  longjmp(opt_state->top_ctx, 1);
2424
  /* NOTREACHED */
2425
#ifdef _AIX
2426
  PCAP_UNREACHABLE
2427
#endif /* _AIX */
2428
1.86k
}
2429
2430
/*
2431
 * Return the number of stmts in 's'.
2432
 */
2433
static u_int
2434
slength(struct slist *s)
2435
6.51M
{
2436
6.51M
  u_int n = 0;
2437
2438
23.4M
  for (; s; s = s->next)
2439
16.9M
    if (s->s.code != NOP)
2440
15.9M
      ++n;
2441
6.51M
  return n;
2442
6.51M
}
2443
2444
/*
2445
 * Return the number of nodes reachable by 'p'.
2446
 * All nodes should be initially unmarked.
2447
 */
2448
static int
2449
count_blocks(struct icode *ic, struct block *p)
2450
2.04M
{
2451
2.04M
  if (p == 0 || isMarked(ic, p))
2452
1.04M
    return 0;
2453
1.00M
  Mark(ic, p);
2454
1.00M
  return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2455
2.04M
}
2456
2457
/*
2458
 * Do a depth first search on the flow graph, numbering the
2459
 * the basic blocks, and entering them into the 'blocks' array.`
2460
 */
2461
static void
2462
number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2463
2.04M
{
2464
2.04M
  u_int n;
2465
2466
2.04M
  if (p == 0 || isMarked(ic, p))
2467
1.04M
    return;
2468
2469
1.00M
  Mark(ic, p);
2470
1.00M
  n = opt_state->n_blocks++;
2471
1.00M
  if (opt_state->n_blocks == 0) {
2472
    /*
2473
     * Overflow.
2474
     */
2475
0
    opt_error(opt_state, "filter is too complex to optimize");
2476
0
  }
2477
1.00M
  p->id = n;
2478
1.00M
  opt_state->blocks[n] = p;
2479
2480
1.00M
  number_blks_r(opt_state, ic, JT(p));
2481
1.00M
  number_blks_r(opt_state, ic, JF(p));
2482
1.00M
}
2483
2484
/*
2485
 * Return the number of stmts in the flowgraph reachable by 'p'.
2486
 * The nodes should be unmarked before calling.
2487
 *
2488
 * Note that "stmts" means "instructions", and that this includes
2489
 *
2490
 *  side-effect statements in 'p' (slength(p->stmts));
2491
 *
2492
 *  statements in the true branch from 'p' (count_stmts(JT(p)));
2493
 *
2494
 *  statements in the false branch from 'p' (count_stmts(JF(p)));
2495
 *
2496
 *  the conditional jump itself (1);
2497
 *
2498
 *  an extra long jump if the true branch requires it (p->longjt);
2499
 *
2500
 *  an extra long jump if the false branch requires it (p->longjf).
2501
 */
2502
static u_int
2503
count_stmts(struct icode *ic, struct block *p)
2504
6.49M
{
2505
6.49M
  u_int n;
2506
2507
6.49M
  if (p == 0 || isMarked(ic, p))
2508
3.27M
    return 0;
2509
3.22M
  Mark(ic, p);
2510
3.22M
  n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2511
3.22M
  return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2512
6.49M
}
2513
2514
/*
2515
 * Allocate memory.  All allocation is done before optimization
2516
 * is begun.  A linear bound on the size of all data structures is computed
2517
 * from the total number of blocks and/or statements.
2518
 */
2519
static void
2520
opt_init(opt_state_t *opt_state, struct icode *ic)
2521
44.9k
{
2522
44.9k
  bpf_u_int32 *p;
2523
44.9k
  int i, n, max_stmts;
2524
44.9k
  u_int product;
2525
44.9k
  size_t block_memsize, edge_memsize;
2526
2527
  /*
2528
   * First, count the blocks, so we can malloc an array to map
2529
   * block number to block.  Then, put the blocks into the array.
2530
   */
2531
44.9k
  unMarkAll(ic);
2532
44.9k
  n = count_blocks(ic, ic->root);
2533
44.9k
  opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2534
44.9k
  if (opt_state->blocks == NULL)
2535
0
    opt_error(opt_state, "malloc");
2536
44.9k
  unMarkAll(ic);
2537
44.9k
  opt_state->n_blocks = 0;
2538
44.9k
  number_blks_r(opt_state, ic, ic->root);
2539
2540
  /*
2541
   * This "should not happen".
2542
   */
2543
44.9k
  if (opt_state->n_blocks == 0)
2544
0
    opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2545
2546
44.9k
  opt_state->n_edges = 2 * opt_state->n_blocks;
2547
44.9k
  if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2548
    /*
2549
     * Overflow.
2550
     */
2551
0
    opt_error(opt_state, "filter is too complex to optimize");
2552
0
  }
2553
44.9k
  opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2554
44.9k
  if (opt_state->edges == NULL) {
2555
0
    opt_error(opt_state, "malloc");
2556
0
  }
2557
2558
  /*
2559
   * The number of levels is bounded by the number of nodes.
2560
   */
2561
44.9k
  opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2562
44.9k
  if (opt_state->levels == NULL) {
2563
0
    opt_error(opt_state, "malloc");
2564
0
  }
2565
2566
44.9k
  opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2567
44.9k
  opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2568
2569
  /*
2570
   * Make sure opt_state->n_blocks * opt_state->nodewords fits
2571
   * in a u_int; we use it as a u_int number-of-iterations
2572
   * value.
2573
   */
2574
44.9k
  product = opt_state->n_blocks * opt_state->nodewords;
2575
44.9k
  if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2576
    /*
2577
     * XXX - just punt and don't try to optimize?
2578
     * In practice, this is unlikely to happen with
2579
     * a normal filter.
2580
     */
2581
0
    opt_error(opt_state, "filter is too complex to optimize");
2582
0
  }
2583
2584
  /*
2585
   * Make sure the total memory required for that doesn't
2586
   * overflow.
2587
   */
2588
44.9k
  block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2589
44.9k
  if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2590
0
    opt_error(opt_state, "filter is too complex to optimize");
2591
0
  }
2592
2593
  /*
2594
   * Make sure opt_state->n_edges * opt_state->edgewords fits
2595
   * in a u_int; we use it as a u_int number-of-iterations
2596
   * value.
2597
   */
2598
44.9k
  product = opt_state->n_edges * opt_state->edgewords;
2599
44.9k
  if ((product / opt_state->n_edges) != opt_state->edgewords) {
2600
0
    opt_error(opt_state, "filter is too complex to optimize");
2601
0
  }
2602
2603
  /*
2604
   * Make sure the total memory required for that doesn't
2605
   * overflow.
2606
   */
2607
44.9k
  edge_memsize = (size_t)product * sizeof(*opt_state->space);
2608
44.9k
  if (edge_memsize / product != sizeof(*opt_state->space)) {
2609
0
    opt_error(opt_state, "filter is too complex to optimize");
2610
0
  }
2611
2612
  /*
2613
   * Make sure the total memory required for both of them doesn't
2614
   * overflow.
2615
   */
2616
44.9k
  if (block_memsize > SIZE_MAX - edge_memsize) {
2617
0
    opt_error(opt_state, "filter is too complex to optimize");
2618
0
  }
2619
2620
  /* XXX */
2621
44.9k
  opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2622
44.9k
  if (opt_state->space == NULL) {
2623
0
    opt_error(opt_state, "malloc");
2624
0
  }
2625
44.9k
  p = opt_state->space;
2626
44.9k
  opt_state->all_dom_sets = p;
2627
1.04M
  for (i = 0; i < n; ++i) {
2628
1.00M
    opt_state->blocks[i]->dom = p;
2629
1.00M
    p += opt_state->nodewords;
2630
1.00M
  }
2631
44.9k
  opt_state->all_closure_sets = p;
2632
1.04M
  for (i = 0; i < n; ++i) {
2633
1.00M
    opt_state->blocks[i]->closure = p;
2634
1.00M
    p += opt_state->nodewords;
2635
1.00M
  }
2636
44.9k
  opt_state->all_edge_sets = p;
2637
1.04M
  for (i = 0; i < n; ++i) {
2638
1.00M
    register struct block *b = opt_state->blocks[i];
2639
2640
1.00M
    b->et.edom = p;
2641
1.00M
    p += opt_state->edgewords;
2642
1.00M
    b->ef.edom = p;
2643
1.00M
    p += opt_state->edgewords;
2644
1.00M
    b->et.id = i;
2645
1.00M
    opt_state->edges[i] = &b->et;
2646
1.00M
    b->ef.id = opt_state->n_blocks + i;
2647
1.00M
    opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2648
1.00M
    b->et.pred = b;
2649
1.00M
    b->ef.pred = b;
2650
1.00M
  }
2651
44.9k
  max_stmts = 0;
2652
1.04M
  for (i = 0; i < n; ++i)
2653
1.00M
    max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2654
  /*
2655
   * We allocate at most 3 value numbers per statement,
2656
   * so this is an upper bound on the number of valnodes
2657
   * we'll need.
2658
   */
2659
44.9k
  opt_state->maxval = 3 * max_stmts;
2660
44.9k
  opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2661
44.9k
  if (opt_state->vmap == NULL) {
2662
0
    opt_error(opt_state, "malloc");
2663
0
  }
2664
44.9k
  opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2665
44.9k
  if (opt_state->vnode_base == NULL) {
2666
0
    opt_error(opt_state, "malloc");
2667
0
  }
2668
44.9k
}
2669
2670
/*
2671
 * This is only used when supporting optimizer debugging.  It is
2672
 * global state, so do *not* do more than one compile in parallel
2673
 * and expect it to provide meaningful information.
2674
 */
2675
#ifdef BDEBUG
2676
int bids[NBIDS];
2677
#endif
2678
2679
static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2680
    PCAP_PRINTFLIKE(2, 3);
2681
2682
/*
2683
 * Returns true if successful.  Returns false if a branch has
2684
 * an offset that is too large.  If so, we have marked that
2685
 * branch so that on a subsequent iteration, it will be treated
2686
 * properly.
2687
 */
2688
static int
2689
convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2690
5.24M
{
2691
5.24M
  struct bpf_insn *dst;
2692
5.24M
  struct slist *src;
2693
5.24M
  u_int slen;
2694
5.24M
  u_int off;
2695
5.24M
  struct slist **offset = NULL;
2696
2697
5.24M
  if (p == 0 || isMarked(ic, p))
2698
2.46M
    return (1);
2699
2.77M
  Mark(ic, p);
2700
2701
2.77M
  if (convert_code_r(conv_state, ic, JF(p)) == 0)
2702
349k
    return (0);
2703
2.42M
  if (convert_code_r(conv_state, ic, JT(p)) == 0)
2704
131k
    return (0);
2705
2706
2.29M
  slen = slength(p->stmts);
2707
2.29M
  dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2708
    /* inflate length by any extra jumps */
2709
2710
2.29M
  p->offset = (int)(dst - conv_state->fstart);
2711
2712
  /* generate offset[] for convenience  */
2713
2.29M
  if (slen) {
2714
2.17M
    offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2715
2.17M
    if (!offset) {
2716
0
      conv_error(conv_state, "not enough core");
2717
      /*NOTREACHED*/
2718
0
    }
2719
2.17M
  }
2720
2.29M
  src = p->stmts;
2721
7.28M
  for (off = 0; off < slen && src; off++) {
2722
#if 0
2723
    printf("off=%d src=%x\n", off, src);
2724
#endif
2725
4.99M
    offset[off] = src;
2726
4.99M
    src = src->next;
2727
4.99M
  }
2728
2729
2.29M
  off = 0;
2730
7.79M
  for (src = p->stmts; src; src = src->next) {
2731
5.49M
    if (src->s.code == NOP)
2732
502k
      continue;
2733
4.99M
    dst->code = (u_short)src->s.code;
2734
4.99M
    dst->k = src->s.k;
2735
2736
    /* fill block-local relative jump */
2737
4.99M
    if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2738
#if 0
2739
      if (src->s.jt || src->s.jf) {
2740
        free(offset);
2741
        conv_error(conv_state, "illegal jmp destination");
2742
        /*NOTREACHED*/
2743
      }
2744
#endif
2745
4.97M
      goto filled;
2746
4.97M
    }
2747
18.3k
    if (off == slen - 2)  /*???*/
2748
0
      goto filled;
2749
2750
18.3k
      {
2751
18.3k
    u_int i;
2752
18.3k
    int jt, jf;
2753
18.3k
    const char ljerr[] = "%s for block-local relative jump: off=%d";
2754
2755
#if 0
2756
    printf("code=%x off=%d %x %x\n", src->s.code,
2757
      off, src->s.jt, src->s.jf);
2758
#endif
2759
2760
18.3k
    if (!src->s.jt || !src->s.jf) {
2761
0
      free(offset);
2762
0
      conv_error(conv_state, ljerr, "no jmp destination", off);
2763
      /*NOTREACHED*/
2764
0
    }
2765
2766
18.3k
    jt = jf = 0;
2767
672k
    for (i = 0; i < slen; i++) {
2768
654k
      if (offset[i] == src->s.jt) {
2769
18.3k
        if (jt) {
2770
0
          free(offset);
2771
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2772
          /*NOTREACHED*/
2773
0
        }
2774
2775
18.3k
        if (i - off - 1 >= 256) {
2776
0
          free(offset);
2777
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2778
          /*NOTREACHED*/
2779
0
        }
2780
18.3k
        dst->jt = (u_char)(i - off - 1);
2781
18.3k
        jt++;
2782
18.3k
      }
2783
654k
      if (offset[i] == src->s.jf) {
2784
18.3k
        if (jf) {
2785
0
          free(offset);
2786
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2787
          /*NOTREACHED*/
2788
0
        }
2789
18.3k
        if (i - off - 1 >= 256) {
2790
0
          free(offset);
2791
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2792
          /*NOTREACHED*/
2793
0
        }
2794
18.3k
        dst->jf = (u_char)(i - off - 1);
2795
18.3k
        jf++;
2796
18.3k
      }
2797
654k
    }
2798
18.3k
    if (!jt || !jf) {
2799
0
      free(offset);
2800
0
      conv_error(conv_state, ljerr, "no destination found", off);
2801
      /*NOTREACHED*/
2802
0
    }
2803
18.3k
      }
2804
4.99M
filled:
2805
4.99M
    ++dst;
2806
4.99M
    ++off;
2807
4.99M
  }
2808
2.29M
  if (offset)
2809
2.17M
    free(offset);
2810
2811
#ifdef BDEBUG
2812
  if (dst - conv_state->fstart < NBIDS)
2813
    bids[dst - conv_state->fstart] = p->id + 1;
2814
#endif
2815
2.29M
  dst->code = (u_short)p->s.code;
2816
2.29M
  dst->k = p->s.k;
2817
2.29M
  if (JT(p)) {
2818
    /* number of extra jumps inserted */
2819
2.22M
    u_char extrajmps = 0;
2820
2.22M
    off = JT(p)->offset - (p->offset + slen) - 1;
2821
2.22M
    if (off >= 256) {
2822
        /* offset too large for branch, must add a jump */
2823
72.3k
        if (p->longjt == 0) {
2824
      /* mark this instruction and retry */
2825
4.40k
      p->longjt++;
2826
4.40k
      return(0);
2827
4.40k
        }
2828
67.9k
        dst->jt = extrajmps;
2829
67.9k
        extrajmps++;
2830
67.9k
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2831
67.9k
        dst[extrajmps].k = off - extrajmps;
2832
67.9k
    }
2833
2.14M
    else
2834
2.14M
        dst->jt = (u_char)off;
2835
2.21M
    off = JF(p)->offset - (p->offset + slen) - 1;
2836
2.21M
    if (off >= 256) {
2837
        /* offset too large for branch, must add a jump */
2838
146k
        if (p->longjf == 0) {
2839
      /* mark this instruction and retry */
2840
7.90k
      p->longjf++;
2841
7.90k
      return(0);
2842
7.90k
        }
2843
        /* branch if F to following jump */
2844
        /* if two jumps are inserted, F goes to second one */
2845
138k
        dst->jf = extrajmps;
2846
138k
        extrajmps++;
2847
138k
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2848
138k
        dst[extrajmps].k = off - extrajmps;
2849
138k
    }
2850
2.06M
    else
2851
2.06M
        dst->jf = (u_char)off;
2852
2.21M
  }
2853
2.28M
  return (1);
2854
2.29M
}
2855
2856
2857
/*
2858
 * Convert flowgraph intermediate representation to the
2859
 * BPF array representation.  Set *lenp to the number of instructions.
2860
 *
2861
 * This routine does *NOT* leak the memory pointed to by fp.  It *must
2862
 * not* do free(fp) before returning fp; doing so would make no sense,
2863
 * as the BPF array pointed to by the return value of icode_to_fcode()
2864
 * must be valid - it's being returned for use in a bpf_program structure.
2865
 *
2866
 * If it appears that icode_to_fcode() is leaking, the problem is that
2867
 * the program using pcap_compile() is failing to free the memory in
2868
 * the BPF program when it's done - the leak is in the program, not in
2869
 * the routine that happens to be allocating the memory.  (By analogy, if
2870
 * a program calls fopen() without ever calling fclose() on the FILE *,
2871
 * it will leak the FILE structure; the leak is not in fopen(), it's in
2872
 * the program.)  Change the program to use pcap_freecode() when it's
2873
 * done with the filter program.  See the pcap man page.
2874
 */
2875
struct bpf_insn *
2876
icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2877
    char *errbuf)
2878
33.1k
{
2879
33.1k
  u_int n;
2880
33.1k
  struct bpf_insn *fp;
2881
33.1k
  conv_state_t conv_state;
2882
2883
33.1k
  conv_state.fstart = NULL;
2884
33.1k
  conv_state.errbuf = errbuf;
2885
33.1k
  if (setjmp(conv_state.top_ctx) != 0) {
2886
0
    free(conv_state.fstart);
2887
0
    return NULL;
2888
0
  }
2889
2890
  /*
2891
   * Loop doing convert_code_r() until no branches remain
2892
   * with too-large offsets.
2893
   */
2894
45.5k
  for (;;) {
2895
45.5k
      unMarkAll(ic);
2896
45.5k
      n = *lenp = count_stmts(ic, root);
2897
2898
45.5k
      fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2899
45.5k
      if (fp == NULL) {
2900
0
    (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2901
0
        "malloc");
2902
0
    return NULL;
2903
0
      }
2904
45.5k
      memset((char *)fp, 0, sizeof(*fp) * n);
2905
45.5k
      conv_state.fstart = fp;
2906
45.5k
      conv_state.ftail = fp + n;
2907
2908
45.5k
      unMarkAll(ic);
2909
45.5k
      if (convert_code_r(&conv_state, ic, root))
2910
33.1k
    break;
2911
12.3k
      free(fp);
2912
12.3k
  }
2913
2914
33.1k
  return fp;
2915
33.1k
}
2916
2917
/*
2918
 * For iconv_to_fconv() errors.
2919
 */
2920
static void PCAP_NORETURN
2921
conv_error(conv_state_t *conv_state, const char *fmt, ...)
2922
0
{
2923
0
  va_list ap;
2924
2925
0
  va_start(ap, fmt);
2926
0
  (void)vsnprintf(conv_state->errbuf,
2927
0
      PCAP_ERRBUF_SIZE, fmt, ap);
2928
0
  va_end(ap);
2929
0
  longjmp(conv_state->top_ctx, 1);
2930
  /* NOTREACHED */
2931
#ifdef _AIX
2932
  PCAP_UNREACHABLE
2933
#endif /* _AIX */
2934
0
}
2935
2936
/*
2937
 * Make a copy of a BPF program and put it in the "fcode" member of
2938
 * a "pcap_t".
2939
 *
2940
 * If we fail to allocate memory for the copy, fill in the "errbuf"
2941
 * member of the "pcap_t" with an error message, and return -1;
2942
 * otherwise, return 0.
2943
 */
2944
int
2945
install_bpf_program(pcap_t *p, struct bpf_program *fp)
2946
0
{
2947
0
  size_t prog_size;
2948
2949
  /*
2950
   * Validate the program.
2951
   */
2952
0
  if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
2953
0
    snprintf(p->errbuf, sizeof(p->errbuf),
2954
0
      "BPF program is not valid");
2955
0
    return (-1);
2956
0
  }
2957
2958
  /*
2959
   * Free up any already installed program.
2960
   */
2961
0
  pcap_freecode(&p->fcode);
2962
2963
0
  prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2964
0
  p->fcode.bf_len = fp->bf_len;
2965
0
  p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2966
0
  if (p->fcode.bf_insns == NULL) {
2967
0
    pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2968
0
        errno, "malloc");
2969
0
    return (-1);
2970
0
  }
2971
0
  memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2972
0
  return (0);
2973
0
}
2974
2975
#ifdef BDEBUG
2976
static void
2977
dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2978
    FILE *out)
2979
{
2980
  int icount, noffset;
2981
  int i;
2982
2983
  if (block == NULL || isMarked(ic, block))
2984
    return;
2985
  Mark(ic, block);
2986
2987
  icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2988
  noffset = min(block->offset + icount, (int)prog->bf_len);
2989
2990
  fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2991
  for (i = block->offset; i < noffset; i++) {
2992
    fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2993
  }
2994
  fprintf(out, "\" tooltip=\"");
2995
  for (i = 0; i < BPF_MEMWORDS; i++)
2996
    if (block->val[i] != VAL_UNKNOWN)
2997
      fprintf(out, "val[%d]=%d ", i, block->val[i]);
2998
  fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
2999
  fprintf(out, "val[X]=%d", block->val[X_ATOM]);
3000
  fprintf(out, "\"");
3001
  if (JT(block) == NULL)
3002
    fprintf(out, ", peripheries=2");
3003
  fprintf(out, "];\n");
3004
3005
  dot_dump_node(ic, JT(block), prog, out);
3006
  dot_dump_node(ic, JF(block), prog, out);
3007
}
3008
3009
static void
3010
dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3011
{
3012
  if (block == NULL || isMarked(ic, block))
3013
    return;
3014
  Mark(ic, block);
3015
3016
  if (JT(block)) {
3017
    fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3018
        block->id, JT(block)->id);
3019
    fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3020
         block->id, JF(block)->id);
3021
  }
3022
  dot_dump_edge(ic, JT(block), out);
3023
  dot_dump_edge(ic, JF(block), out);
3024
}
3025
3026
/* Output the block CFG using graphviz/DOT language
3027
 * In the CFG, block's code, value index for each registers at EXIT,
3028
 * and the jump relationship is show.
3029
 *
3030
 * example DOT for BPF `ip src host 1.1.1.1' is:
3031
    digraph BPF {
3032
      block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh      [12]\n(001) jeq      #0x800           jt 2  jf 5" tooltip="val[A]=0 val[X]=0"];
3033
      block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld       [26]\n(003) jeq      #0x1010101       jt 4  jf 5" tooltip="val[A]=0 val[X]=0"];
3034
      block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret      #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3035
      block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret      #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3036
      "block0":se -> "block1":n [label="T"];
3037
      "block0":sw -> "block3":n [label="F"];
3038
      "block1":se -> "block2":n [label="T"];
3039
      "block1":sw -> "block3":n [label="F"];
3040
    }
3041
 *
3042
 *  After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3043
 *  and run `dot -Tpng -O bpf.dot' to draw the graph.
3044
 */
3045
static int
3046
dot_dump(struct icode *ic, char *errbuf)
3047
{
3048
  struct bpf_program f;
3049
  FILE *out = stdout;
3050
3051
  memset(bids, 0, sizeof bids);
3052
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3053
  if (f.bf_insns == NULL)
3054
    return -1;
3055
3056
  fprintf(out, "digraph BPF {\n");
3057
  unMarkAll(ic);
3058
  dot_dump_node(ic, ic->root, &f, out);
3059
  unMarkAll(ic);
3060
  dot_dump_edge(ic, ic->root, out);
3061
  fprintf(out, "}\n");
3062
3063
  free((char *)f.bf_insns);
3064
  return 0;
3065
}
3066
3067
static int
3068
plain_dump(struct icode *ic, char *errbuf)
3069
{
3070
  struct bpf_program f;
3071
3072
  memset(bids, 0, sizeof bids);
3073
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3074
  if (f.bf_insns == NULL)
3075
    return -1;
3076
  bpf_dump(&f, 1);
3077
  putchar('\n');
3078
  free((char *)f.bf_insns);
3079
  return 0;
3080
}
3081
3082
static void
3083
opt_dump(opt_state_t *opt_state, struct icode *ic)
3084
{
3085
  int status;
3086
  char errbuf[PCAP_ERRBUF_SIZE];
3087
3088
  /*
3089
   * If the CFG, in DOT format, is requested, output it rather than
3090
   * the code that would be generated from that graph.
3091
   */
3092
  if (pcap_print_dot_graph)
3093
    status = dot_dump(ic, errbuf);
3094
  else
3095
    status = plain_dump(ic, errbuf);
3096
  if (status == -1)
3097
    opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3098
}
3099
#endif