Coverage Report

Created: 2023-05-18 19:08

/src/libpcap/optimize.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3
 *  The Regents of the University of California.  All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that: (1) source code distributions
7
 * retain the above copyright notice and this paragraph in its entirety, (2)
8
 * distributions including binary code include the above copyright notice and
9
 * this paragraph in its entirety in the documentation or other materials
10
 * provided with the distribution, and (3) all advertising materials mentioning
11
 * features or use of this software display the following acknowledgement:
12
 * ``This product includes software developed by the University of California,
13
 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14
 * the University nor the names of its contributors may be used to endorse
15
 * or promote products derived from this software without specific prior
16
 * written permission.
17
 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18
 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20
 *
21
 *  Optimization module for BPF code intermediate representation.
22
 */
23
24
#ifdef HAVE_CONFIG_H
25
#include <config.h>
26
#endif
27
28
#include <pcap-types.h>
29
30
#include <stdio.h>
31
#include <stdlib.h>
32
#include <memory.h>
33
#include <setjmp.h>
34
#include <string.h>
35
#include <limits.h> /* for SIZE_MAX */
36
#include <errno.h>
37
38
#include "pcap-int.h"
39
40
#include "gencode.h"
41
#include "optimize.h"
42
#include "diag-control.h"
43
44
#ifdef HAVE_OS_PROTO_H
45
#include "os-proto.h"
46
#endif
47
48
#ifdef BDEBUG
49
/*
50
 * The internal "debug printout" flag for the filter expression optimizer.
51
 * The code to print that stuff is present only if BDEBUG is defined, so
52
 * the flag, and the routine to set it, are defined only if BDEBUG is
53
 * defined.
54
 */
55
static int pcap_optimizer_debug;
56
57
/*
58
 * Routine to set that flag.
59
 *
60
 * This is intended for libpcap developers, not for general use.
61
 * If you want to set these in a program, you'll have to declare this
62
 * routine yourself, with the appropriate DLL import attribute on Windows;
63
 * it's not declared in any header file, and won't be declared in any
64
 * header file provided by libpcap.
65
 */
66
PCAP_API void pcap_set_optimizer_debug(int value);
67
68
PCAP_API_DEF void
69
pcap_set_optimizer_debug(int value)
70
{
71
  pcap_optimizer_debug = value;
72
}
73
74
/*
75
 * The internal "print dot graph" flag for the filter expression optimizer.
76
 * The code to print that stuff is present only if BDEBUG is defined, so
77
 * the flag, and the routine to set it, are defined only if BDEBUG is
78
 * defined.
79
 */
80
static int pcap_print_dot_graph;
81
82
/*
83
 * Routine to set that flag.
84
 *
85
 * This is intended for libpcap developers, not for general use.
86
 * If you want to set these in a program, you'll have to declare this
87
 * routine yourself, with the appropriate DLL import attribute on Windows;
88
 * it's not declared in any header file, and won't be declared in any
89
 * header file provided by libpcap.
90
 */
91
PCAP_API void pcap_set_print_dot_graph(int value);
92
93
PCAP_API_DEF void
94
pcap_set_print_dot_graph(int value)
95
{
96
  pcap_print_dot_graph = value;
97
}
98
99
#endif
100
101
/*
102
 * lowest_set_bit().
103
 *
104
 * Takes a 32-bit integer as an argument.
105
 *
106
 * If handed a non-zero value, returns the index of the lowest set bit,
107
 * counting upwards from zero.
108
 *
109
 * If handed zero, the results are platform- and compiler-dependent.
110
 * Keep it out of the light, don't give it any water, don't feed it
111
 * after midnight, and don't pass zero to it.
112
 *
113
 * This is the same as the count of trailing zeroes in the word.
114
 */
115
#if PCAP_IS_AT_LEAST_GNUC_VERSION(3,4)
116
  /*
117
   * GCC 3.4 and later; we have __builtin_ctz().
118
   */
119
2.50M
  #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
120
#elif defined(_MSC_VER)
121
  /*
122
   * Visual Studio; we support only 2005 and later, so use
123
   * _BitScanForward().
124
   */
125
#include <intrin.h>
126
127
#ifndef __clang__
128
#pragma intrinsic(_BitScanForward)
129
#endif
130
131
static __forceinline u_int
132
lowest_set_bit(int mask)
133
{
134
  unsigned long bit;
135
136
  /*
137
   * Don't sign-extend mask if long is longer than int.
138
   * (It's currently not, in MSVC, even on 64-bit platforms, but....)
139
   */
140
  if (_BitScanForward(&bit, (unsigned int)mask) == 0)
141
    abort();  /* mask is zero */
142
  return (u_int)bit;
143
}
144
#elif defined(MSDOS) && defined(__DJGPP__)
145
  /*
146
   * MS-DOS with DJGPP, which declares ffs() in <string.h>, which
147
   * we've already included.
148
   */
149
  #define lowest_set_bit(mask)  ((u_int)(ffs((mask)) - 1))
150
#elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
151
  /*
152
   * MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
153
   * or some other platform (UN*X conforming to a sufficient recent version
154
   * of the Single UNIX Specification).
155
   */
156
  #include <strings.h>
157
  #define lowest_set_bit(mask)  (u_int)((ffs((mask)) - 1))
158
#else
159
/*
160
 * None of the above.
161
 * Use a perfect-hash-function-based function.
162
 */
163
static u_int
164
lowest_set_bit(int mask)
165
{
166
  unsigned int v = (unsigned int)mask;
167
168
  static const u_int MultiplyDeBruijnBitPosition[32] = {
169
    0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
170
    31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
171
  };
172
173
  /*
174
   * We strip off all but the lowermost set bit (v & ~v),
175
   * and perform a minimal perfect hash on it to look up the
176
   * number of low-order zero bits in a table.
177
   *
178
   * See:
179
   *
180
   *  http://7ooo.mooo.com/text/ComputingTrailingZerosHOWTO.pdf
181
   *
182
   *  http://supertech.csail.mit.edu/papers/debruijn.pdf
183
   */
184
  return (MultiplyDeBruijnBitPosition[((v & -v) * 0x077CB531U) >> 27]);
185
}
186
#endif
187
188
/*
189
 * Represents a deleted instruction.
190
 */
191
34.7M
#define NOP -1
192
193
/*
194
 * Register numbers for use-def values.
195
 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
196
 * location.  A_ATOM is the accumulator and X_ATOM is the index
197
 * register.
198
 */
199
18.4M
#define A_ATOM BPF_MEMWORDS
200
4.14M
#define X_ATOM (BPF_MEMWORDS+1)
201
202
/*
203
 * This define is used to represent *both* the accumulator and
204
 * x register in use-def computations.
205
 * Currently, the use-def code assumes only one definition per instruction.
206
 */
207
5.94M
#define AX_ATOM N_ATOMS
208
209
/*
210
 * These data structures are used in a Cocke and Shwarz style
211
 * value numbering scheme.  Since the flowgraph is acyclic,
212
 * exit values can be propagated from a node's predecessors
213
 * provided it is uniquely defined.
214
 */
215
struct valnode {
216
  int code;
217
  bpf_u_int32 v0, v1;
218
  int val;    /* the value number */
219
  struct valnode *next;
220
};
221
222
/* Integer constants mapped with the load immediate opcode. */
223
1.78M
#define K(i) F(opt_state, BPF_LD|BPF_IMM|BPF_W, i, 0U)
224
225
struct vmapinfo {
226
  int is_const;
227
  bpf_u_int32 const_val;
228
};
229
230
typedef struct {
231
  /*
232
   * Place to longjmp to on an error.
233
   */
234
  jmp_buf top_ctx;
235
236
  /*
237
   * The buffer into which to put error message.
238
   */
239
  char *errbuf;
240
241
  /*
242
   * A flag to indicate that further optimization is needed.
243
   * Iterative passes are continued until a given pass yields no
244
   * code simplification or branch movement.
245
   */
246
  int done;
247
248
  /*
249
   * XXX - detect loops that do nothing but repeated AND/OR pullups
250
   * and edge moves.
251
   * If 100 passes in a row do nothing but that, treat that as a
252
   * sign that we're in a loop that just shuffles in a cycle in
253
   * which each pass just shuffles the code and we eventually
254
   * get back to the original configuration.
255
   *
256
   * XXX - we need a non-heuristic way of detecting, or preventing,
257
   * such a cycle.
258
   */
259
  int non_branch_movement_performed;
260
261
  u_int n_blocks;   /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
262
  struct block **blocks;
263
  u_int n_edges;    /* twice n_blocks, so guaranteed to be > 0 */
264
  struct edge **edges;
265
266
  /*
267
   * A bit vector set representation of the dominators.
268
   * We round up the set size to the next power of two.
269
   */
270
  u_int nodewords;  /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
271
  u_int edgewords;  /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
272
  struct block **levels;
273
  bpf_u_int32 *space;
274
275
12.2M
#define BITS_PER_WORD (8*sizeof(bpf_u_int32))
276
/*
277
 * True if a is in uset {p}
278
 */
279
790k
#define SET_MEMBER(p, a) \
280
790k
((p)[(unsigned)(a) / BITS_PER_WORD] & ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD)))
281
282
/*
283
 * Add 'a' to uset p.
284
 */
285
4.06M
#define SET_INSERT(p, a) \
286
4.06M
(p)[(unsigned)(a) / BITS_PER_WORD] |= ((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
287
288
/*
289
 * Delete 'a' from uset p.
290
 */
291
#define SET_DELETE(p, a) \
292
(p)[(unsigned)(a) / BITS_PER_WORD] &= ~((bpf_u_int32)1 << ((unsigned)(a) % BITS_PER_WORD))
293
294
/*
295
 * a := a intersect b
296
 * n must be guaranteed to be > 0
297
 */
298
4.63M
#define SET_INTERSECT(a, b, n)\
299
4.63M
{\
300
4.63M
  register bpf_u_int32 *_x = a, *_y = b;\
301
4.63M
  register u_int _n = n;\
302
11.0M
  do *_x++ &= *_y++; while (--_n != 0);\
303
4.63M
}
304
305
/*
306
 * a := a - b
307
 * n must be guaranteed to be > 0
308
 */
309
#define SET_SUBTRACT(a, b, n)\
310
{\
311
  register bpf_u_int32 *_x = a, *_y = b;\
312
  register u_int _n = n;\
313
  do *_x++ &=~ *_y++; while (--_n != 0);\
314
}
315
316
/*
317
 * a := a union b
318
 * n must be guaranteed to be > 0
319
 */
320
1.54M
#define SET_UNION(a, b, n)\
321
1.54M
{\
322
1.54M
  register bpf_u_int32 *_x = a, *_y = b;\
323
1.54M
  register u_int _n = n;\
324
2.60M
  do *_x++ |= *_y++; while (--_n != 0);\
325
1.54M
}
326
327
  uset all_dom_sets;
328
  uset all_closure_sets;
329
  uset all_edge_sets;
330
331
2.83M
#define MODULUS 213
332
  struct valnode *hashtbl[MODULUS];
333
  bpf_u_int32 curval;
334
  bpf_u_int32 maxval;
335
336
  struct vmapinfo *vmap;
337
  struct valnode *vnode_base;
338
  struct valnode *next_vnode;
339
} opt_state_t;
340
341
typedef struct {
342
  /*
343
   * Place to longjmp to on an error.
344
   */
345
  jmp_buf top_ctx;
346
347
  /*
348
   * The buffer into which to put error message.
349
   */
350
  char *errbuf;
351
352
  /*
353
   * Some pointers used to convert the basic block form of the code,
354
   * into the array form that BPF requires.  'fstart' will point to
355
   * the malloc'd array while 'ftail' is used during the recursive
356
   * traversal.
357
   */
358
  struct bpf_insn *fstart;
359
  struct bpf_insn *ftail;
360
} conv_state_t;
361
362
static void opt_init(opt_state_t *, struct icode *);
363
static void opt_cleanup(opt_state_t *);
364
static void PCAP_NORETURN opt_error(opt_state_t *, const char *, ...)
365
    PCAP_PRINTFLIKE(2, 3);
366
367
static void intern_blocks(opt_state_t *, struct icode *);
368
369
static void find_inedges(opt_state_t *, struct block *);
370
#ifdef BDEBUG
371
static void opt_dump(opt_state_t *, struct icode *);
372
#endif
373
374
#ifndef MAX
375
772k
#define MAX(a,b) ((a)>(b)?(a):(b))
376
#endif
377
378
static void
379
find_levels_r(opt_state_t *opt_state, struct icode *ic, struct block *b)
380
1.69M
{
381
1.69M
  int level;
382
383
1.69M
  if (isMarked(ic, b))
384
674k
    return;
385
386
1.01M
  Mark(ic, b);
387
1.01M
  b->link = 0;
388
389
1.01M
  if (JT(b)) {
390
772k
    find_levels_r(opt_state, ic, JT(b));
391
772k
    find_levels_r(opt_state, ic, JF(b));
392
772k
    level = MAX(JT(b)->level, JF(b)->level) + 1;
393
772k
  } else
394
243k
    level = 0;
395
1.01M
  b->level = level;
396
1.01M
  b->link = opt_state->levels[level];
397
1.01M
  opt_state->levels[level] = b;
398
1.01M
}
399
400
/*
401
 * Level graph.  The levels go from 0 at the leaves to
402
 * N_LEVELS at the root.  The opt_state->levels[] array points to the
403
 * first node of the level list, whose elements are linked
404
 * with the 'link' field of the struct block.
405
 */
406
static void
407
find_levels(opt_state_t *opt_state, struct icode *ic)
408
145k
{
409
145k
  memset((char *)opt_state->levels, 0, opt_state->n_blocks * sizeof(*opt_state->levels));
410
145k
  unMarkAll(ic);
411
145k
  find_levels_r(opt_state, ic, ic->root);
412
145k
}
413
414
/*
415
 * Find dominator relationships.
416
 * Assumes graph has been leveled.
417
 */
418
static void
419
find_dom(opt_state_t *opt_state, struct block *root)
420
145k
{
421
145k
  u_int i;
422
145k
  int level;
423
145k
  struct block *b;
424
145k
  bpf_u_int32 *x;
425
426
  /*
427
   * Initialize sets to contain all nodes.
428
   */
429
145k
  x = opt_state->all_dom_sets;
430
  /*
431
   * In opt_init(), we've made sure the product doesn't overflow.
432
   */
433
145k
  i = opt_state->n_blocks * opt_state->nodewords;
434
3.39M
  while (i != 0) {
435
3.24M
    --i;
436
3.24M
    *x++ = 0xFFFFFFFFU;
437
3.24M
  }
438
  /* Root starts off empty. */
439
311k
  for (i = opt_state->nodewords; i != 0;) {
440
166k
    --i;
441
166k
    root->dom[i] = 0;
442
166k
  }
443
444
  /* root->level is the highest level no found. */
445
1.02M
  for (level = root->level; level >= 0; --level) {
446
1.89M
    for (b = opt_state->levels[level]; b; b = b->link) {
447
1.01M
      SET_INSERT(b->dom, b->id);
448
1.01M
      if (JT(b) == 0)
449
243k
        continue;
450
772k
      SET_INTERSECT(JT(b)->dom, b->dom, opt_state->nodewords);
451
772k
      SET_INTERSECT(JF(b)->dom, b->dom, opt_state->nodewords);
452
772k
    }
453
879k
  }
454
145k
}
455
456
static void
457
propedom(opt_state_t *opt_state, struct edge *ep)
458
2.03M
{
459
2.03M
  SET_INSERT(ep->edom, ep->id);
460
2.03M
  if (ep->succ) {
461
1.54M
    SET_INTERSECT(ep->succ->et.edom, ep->edom, opt_state->edgewords);
462
1.54M
    SET_INTERSECT(ep->succ->ef.edom, ep->edom, opt_state->edgewords);
463
1.54M
  }
464
2.03M
}
465
466
/*
467
 * Compute edge dominators.
468
 * Assumes graph has been leveled and predecessors established.
469
 */
470
static void
471
find_edom(opt_state_t *opt_state, struct block *root)
472
145k
{
473
145k
  u_int i;
474
145k
  uset x;
475
145k
  int level;
476
145k
  struct block *b;
477
478
145k
  x = opt_state->all_edge_sets;
479
  /*
480
   * In opt_init(), we've made sure the product doesn't overflow.
481
   */
482
10.8M
  for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
483
10.7M
    --i;
484
10.7M
    x[i] = 0xFFFFFFFFU;
485
10.7M
  }
486
487
  /* root->level is the highest level no found. */
488
145k
  memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
489
145k
  memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
490
1.02M
  for (level = root->level; level >= 0; --level) {
491
1.89M
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
492
1.01M
      propedom(opt_state, &b->et);
493
1.01M
      propedom(opt_state, &b->ef);
494
1.01M
    }
495
879k
  }
496
145k
}
497
498
/*
499
 * Find the backwards transitive closure of the flow graph.  These sets
500
 * are backwards in the sense that we find the set of nodes that reach
501
 * a given node, not the set of nodes that can be reached by a node.
502
 *
503
 * Assumes graph has been leveled.
504
 */
505
static void
506
find_closure(opt_state_t *opt_state, struct block *root)
507
145k
{
508
145k
  int level;
509
145k
  struct block *b;
510
511
  /*
512
   * Initialize sets to contain no nodes.
513
   */
514
145k
  memset((char *)opt_state->all_closure_sets, 0,
515
145k
        opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
516
517
  /* root->level is the highest level no found. */
518
1.02M
  for (level = root->level; level >= 0; --level) {
519
1.89M
    for (b = opt_state->levels[level]; b; b = b->link) {
520
1.01M
      SET_INSERT(b->closure, b->id);
521
1.01M
      if (JT(b) == 0)
522
243k
        continue;
523
772k
      SET_UNION(JT(b)->closure, b->closure, opt_state->nodewords);
524
772k
      SET_UNION(JF(b)->closure, b->closure, opt_state->nodewords);
525
772k
    }
526
879k
  }
527
145k
}
528
529
/*
530
 * Return the register number that is used by s.
531
 *
532
 * Returns ATOM_A if A is used, ATOM_X if X is used, AX_ATOM if both A and X
533
 * are used, the scratch memory location's number if a scratch memory
534
 * location is used (e.g., 0 for M[0]), or -1 if none of those are used.
535
 *
536
 * The implementation should probably change to an array access.
537
 */
538
static int
539
atomuse(struct stmt *s)
540
9.50M
{
541
9.50M
  register int c = s->code;
542
543
9.50M
  if (c == NOP)
544
1.66M
    return -1;
545
546
7.84M
  switch (BPF_CLASS(c)) {
547
548
147k
  case BPF_RET:
549
147k
    return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
550
147k
      (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
551
552
3.10M
  case BPF_LD:
553
3.59M
  case BPF_LDX:
554
    /*
555
     * As there are fewer than 2^31 memory locations,
556
     * s->k should be convertible to int without problems.
557
     */
558
3.59M
    return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
559
3.59M
      (BPF_MODE(c) == BPF_MEM) ? (int)s->k : -1;
560
561
1.17M
  case BPF_ST:
562
1.17M
    return A_ATOM;
563
564
0
  case BPF_STX:
565
0
    return X_ATOM;
566
567
1.53M
  case BPF_JMP:
568
2.44M
  case BPF_ALU:
569
2.44M
    if (BPF_SRC(c) == BPF_X)
570
660k
      return AX_ATOM;
571
1.78M
    return A_ATOM;
572
573
475k
  case BPF_MISC:
574
475k
    return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
575
7.84M
  }
576
0
  abort();
577
  /* NOTREACHED */
578
7.84M
}
579
580
/*
581
 * Return the register number that is defined by 's'.  We assume that
582
 * a single stmt cannot define more than one register.  If no register
583
 * is defined, return -1.
584
 *
585
 * The implementation should probably change to an array access.
586
 */
587
static int
588
atomdef(struct stmt *s)
589
8.73M
{
590
8.73M
  if (s->code == NOP)
591
1.66M
    return -1;
592
593
7.07M
  switch (BPF_CLASS(s->code)) {
594
595
3.10M
  case BPF_LD:
596
4.02M
  case BPF_ALU:
597
4.02M
    return A_ATOM;
598
599
492k
  case BPF_LDX:
600
492k
    return X_ATOM;
601
602
1.17M
  case BPF_ST:
603
1.17M
  case BPF_STX:
604
1.17M
    return s->k;
605
606
475k
  case BPF_MISC:
607
475k
    return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
608
7.07M
  }
609
908k
  return -1;
610
7.07M
}
611
612
/*
613
 * Compute the sets of registers used, defined, and killed by 'b'.
614
 *
615
 * "Used" means that a statement in 'b' uses the register before any
616
 * statement in 'b' defines it, i.e. it uses the value left in
617
 * that register by a predecessor block of this block.
618
 * "Defined" means that a statement in 'b' defines it.
619
 * "Killed" means that a statement in 'b' defines it before any
620
 * statement in 'b' uses it, i.e. it kills the value left in that
621
 * register by a predecessor block of this block.
622
 */
623
static void
624
compute_local_ud(struct block *b)
625
1.01M
{
626
1.01M
  struct slist *s;
627
1.01M
  atomset def = 0, use = 0, killed = 0;
628
1.01M
  int atom;
629
630
5.76M
  for (s = b->stmts; s; s = s->next) {
631
4.75M
    if (s->s.code == NOP)
632
1.55M
      continue;
633
3.19M
    atom = atomuse(&s->s);
634
3.19M
    if (atom >= 0) {
635
1.99M
      if (atom == AX_ATOM) {
636
318k
        if (!ATOMELEM(def, X_ATOM))
637
116
          use |= ATOMMASK(X_ATOM);
638
318k
        if (!ATOMELEM(def, A_ATOM))
639
0
          use |= ATOMMASK(A_ATOM);
640
318k
      }
641
1.68M
      else if (atom < N_ATOMS) {
642
1.68M
        if (!ATOMELEM(def, atom))
643
54.4k
          use |= ATOMMASK(atom);
644
1.68M
      }
645
0
      else
646
0
        abort();
647
1.99M
    }
648
3.19M
    atom = atomdef(&s->s);
649
3.19M
    if (atom >= 0) {
650
3.19M
      if (!ATOMELEM(use, atom))
651
3.19M
        killed |= ATOMMASK(atom);
652
3.19M
      def |= ATOMMASK(atom);
653
3.19M
    }
654
3.19M
  }
655
1.01M
  if (BPF_CLASS(b->s.code) == BPF_JMP) {
656
    /*
657
     * XXX - what about RET?
658
     */
659
772k
    atom = atomuse(&b->s);
660
772k
    if (atom >= 0) {
661
772k
      if (atom == AX_ATOM) {
662
59.1k
        if (!ATOMELEM(def, X_ATOM))
663
1.20k
          use |= ATOMMASK(X_ATOM);
664
59.1k
        if (!ATOMELEM(def, A_ATOM))
665
1.18k
          use |= ATOMMASK(A_ATOM);
666
59.1k
      }
667
713k
      else if (atom < N_ATOMS) {
668
713k
        if (!ATOMELEM(def, atom))
669
17.0k
          use |= ATOMMASK(atom);
670
713k
      }
671
0
      else
672
0
        abort();
673
772k
    }
674
772k
  }
675
676
1.01M
  b->def = def;
677
1.01M
  b->kill = killed;
678
1.01M
  b->in_use = use;
679
1.01M
}
680
681
/*
682
 * Assume graph is already leveled.
683
 */
684
static void
685
find_ud(opt_state_t *opt_state, struct block *root)
686
145k
{
687
145k
  int i, maxlevel;
688
145k
  struct block *p;
689
690
  /*
691
   * root->level is the highest level no found;
692
   * count down from there.
693
   */
694
145k
  maxlevel = root->level;
695
1.02M
  for (i = maxlevel; i >= 0; --i)
696
1.89M
    for (p = opt_state->levels[i]; p; p = p->link) {
697
1.01M
      compute_local_ud(p);
698
1.01M
      p->out_use = 0;
699
1.01M
    }
700
701
879k
  for (i = 1; i <= maxlevel; ++i) {
702
1.50M
    for (p = opt_state->levels[i]; p; p = p->link) {
703
772k
      p->out_use |= JT(p)->in_use | JF(p)->in_use;
704
772k
      p->in_use |= p->out_use &~ p->kill;
705
772k
    }
706
733k
  }
707
145k
}
708
static void
709
init_val(opt_state_t *opt_state)
710
145k
{
711
145k
  opt_state->curval = 0;
712
145k
  opt_state->next_vnode = opt_state->vnode_base;
713
145k
  memset((char *)opt_state->vmap, 0, opt_state->maxval * sizeof(*opt_state->vmap));
714
145k
  memset((char *)opt_state->hashtbl, 0, sizeof opt_state->hashtbl);
715
145k
}
716
717
/*
718
 * Because we really don't have an IR, this stuff is a little messy.
719
 *
720
 * This routine looks in the table of existing value number for a value
721
 * with generated from an operation with the specified opcode and
722
 * the specified values.  If it finds it, it returns its value number,
723
 * otherwise it makes a new entry in the table and returns the
724
 * value number of that entry.
725
 */
726
static bpf_u_int32
727
F(opt_state_t *opt_state, int code, bpf_u_int32 v0, bpf_u_int32 v1)
728
2.83M
{
729
2.83M
  u_int hash;
730
2.83M
  bpf_u_int32 val;
731
2.83M
  struct valnode *p;
732
733
2.83M
  hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
734
2.83M
  hash %= MODULUS;
735
736
3.00M
  for (p = opt_state->hashtbl[hash]; p; p = p->next)
737
1.47M
    if (p->code == code && p->v0 == v0 && p->v1 == v1)
738
1.30M
      return p->val;
739
740
  /*
741
   * Not found.  Allocate a new value, and assign it a new
742
   * value number.
743
   *
744
   * opt_state->curval starts out as 0, which means VAL_UNKNOWN; we
745
   * increment it before using it as the new value number, which
746
   * means we never assign VAL_UNKNOWN.
747
   *
748
   * XXX - unless we overflow, but we probably won't have 2^32-1
749
   * values; we treat 32 bits as effectively infinite.
750
   */
751
1.53M
  val = ++opt_state->curval;
752
1.53M
  if (BPF_MODE(code) == BPF_IMM &&
753
1.53M
      (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
754
940k
    opt_state->vmap[val].const_val = v0;
755
940k
    opt_state->vmap[val].is_const = 1;
756
940k
  }
757
1.53M
  p = opt_state->next_vnode++;
758
1.53M
  p->val = val;
759
1.53M
  p->code = code;
760
1.53M
  p->v0 = v0;
761
1.53M
  p->v1 = v1;
762
1.53M
  p->next = opt_state->hashtbl[hash];
763
1.53M
  opt_state->hashtbl[hash] = p;
764
765
1.53M
  return val;
766
2.83M
}
767
768
static inline void
769
vstore(struct stmt *s, bpf_u_int32 *valp, bpf_u_int32 newval, int alter)
770
2.67M
{
771
2.67M
  if (alter && newval != VAL_UNKNOWN && *valp == newval)
772
142k
    s->code = NOP;
773
2.53M
  else
774
2.53M
    *valp = newval;
775
2.67M
}
776
777
/*
778
 * Do constant-folding on binary operators.
779
 * (Unary operators are handled elsewhere.)
780
 */
781
static void
782
fold_op(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 v0, bpf_u_int32 v1)
783
59.7k
{
784
59.7k
  bpf_u_int32 a, b;
785
786
59.7k
  a = opt_state->vmap[v0].const_val;
787
59.7k
  b = opt_state->vmap[v1].const_val;
788
789
59.7k
  switch (BPF_OP(s->code)) {
790
9.13k
  case BPF_ADD:
791
9.13k
    a += b;
792
9.13k
    break;
793
794
1.81k
  case BPF_SUB:
795
1.81k
    a -= b;
796
1.81k
    break;
797
798
13.2k
  case BPF_MUL:
799
13.2k
    a *= b;
800
13.2k
    break;
801
802
6.82k
  case BPF_DIV:
803
6.82k
    if (b == 0)
804
239
      opt_error(opt_state, "division by zero");
805
6.58k
    a /= b;
806
6.58k
    break;
807
808
8.79k
  case BPF_MOD:
809
8.79k
    if (b == 0)
810
1.05k
      opt_error(opt_state, "modulus by zero");
811
7.73k
    a %= b;
812
7.73k
    break;
813
814
13.1k
  case BPF_AND:
815
13.1k
    a &= b;
816
13.1k
    break;
817
818
1.87k
  case BPF_OR:
819
1.87k
    a |= b;
820
1.87k
    break;
821
822
3.37k
  case BPF_XOR:
823
3.37k
    a ^= b;
824
3.37k
    break;
825
826
481
  case BPF_LSH:
827
    /*
828
     * A left shift of more than the width of the type
829
     * is undefined in C; we'll just treat it as shifting
830
     * all the bits out.
831
     *
832
     * XXX - the BPF interpreter doesn't check for this,
833
     * so its behavior is dependent on the behavior of
834
     * the processor on which it's running.  There are
835
     * processors on which it shifts all the bits out
836
     * and processors on which it does no shift.
837
     */
838
481
    if (b < 32)
839
399
      a <<= b;
840
82
    else
841
82
      a = 0;
842
481
    break;
843
844
1.08k
  case BPF_RSH:
845
    /*
846
     * A right shift of more than the width of the type
847
     * is undefined in C; we'll just treat it as shifting
848
     * all the bits out.
849
     *
850
     * XXX - the BPF interpreter doesn't check for this,
851
     * so its behavior is dependent on the behavior of
852
     * the processor on which it's running.  There are
853
     * processors on which it shifts all the bits out
854
     * and processors on which it does no shift.
855
     */
856
1.08k
    if (b < 32)
857
742
      a >>= b;
858
339
    else
859
339
      a = 0;
860
1.08k
    break;
861
862
0
  default:
863
0
    abort();
864
59.7k
  }
865
58.4k
  s->k = a;
866
58.4k
  s->code = BPF_LD|BPF_IMM;
867
  /*
868
   * XXX - optimizer loop detection.
869
   */
870
58.4k
  opt_state->non_branch_movement_performed = 1;
871
58.4k
  opt_state->done = 0;
872
58.4k
}
873
874
static inline struct slist *
875
this_op(struct slist *s)
876
5.99M
{
877
7.65M
  while (s != 0 && s->s.code == NOP)
878
1.65M
    s = s->next;
879
5.99M
  return s;
880
5.99M
}
881
882
static void
883
opt_not(struct block *b)
884
516
{
885
516
  struct block *tmp = JT(b);
886
887
516
  JT(b) = JF(b);
888
516
  JF(b) = tmp;
889
516
}
890
891
static void
892
opt_peep(opt_state_t *opt_state, struct block *b)
893
908k
{
894
908k
  struct slist *s;
895
908k
  struct slist *next, *last;
896
908k
  bpf_u_int32 val;
897
898
908k
  s = b->stmts;
899
908k
  if (s == 0)
900
153k
    return;
901
902
754k
  last = s;
903
3.01M
  for (/*empty*/; /*empty*/; s = next) {
904
    /*
905
     * Skip over nops.
906
     */
907
3.01M
    s = this_op(s);
908
3.01M
    if (s == 0)
909
32.9k
      break;  /* nothing left in the block */
910
911
    /*
912
     * Find the next real instruction after that one
913
     * (skipping nops).
914
     */
915
2.98M
    next = this_op(s->next);
916
2.98M
    if (next == 0)
917
721k
      break;  /* no next instruction */
918
2.25M
    last = next;
919
920
    /*
921
     * st  M[k] --> st  M[k]
922
     * ldx M[k]   tax
923
     */
924
2.25M
    if (s->s.code == BPF_ST &&
925
2.25M
        next->s.code == (BPF_LDX|BPF_MEM) &&
926
2.25M
        s->s.k == next->s.k) {
927
      /*
928
       * XXX - optimizer loop detection.
929
       */
930
100k
      opt_state->non_branch_movement_performed = 1;
931
100k
      opt_state->done = 0;
932
100k
      next->s.code = BPF_MISC|BPF_TAX;
933
100k
    }
934
    /*
935
     * ld  #k --> ldx  #k
936
     * tax      txa
937
     */
938
2.25M
    if (s->s.code == (BPF_LD|BPF_IMM) &&
939
2.25M
        next->s.code == (BPF_MISC|BPF_TAX)) {
940
65.4k
      s->s.code = BPF_LDX|BPF_IMM;
941
65.4k
      next->s.code = BPF_MISC|BPF_TXA;
942
      /*
943
       * XXX - optimizer loop detection.
944
       */
945
65.4k
      opt_state->non_branch_movement_performed = 1;
946
65.4k
      opt_state->done = 0;
947
65.4k
    }
948
    /*
949
     * This is an ugly special case, but it happens
950
     * when you say tcp[k] or udp[k] where k is a constant.
951
     */
952
2.25M
    if (s->s.code == (BPF_LD|BPF_IMM)) {
953
446k
      struct slist *add, *tax, *ild;
954
955
      /*
956
       * Check that X isn't used on exit from this
957
       * block (which the optimizer might cause).
958
       * We know the code generator won't generate
959
       * any local dependencies.
960
       */
961
446k
      if (ATOMELEM(b->out_use, X_ATOM))
962
1.55k
        continue;
963
964
      /*
965
       * Check that the instruction following the ldi
966
       * is an addx, or it's an ldxms with an addx
967
       * following it (with 0 or more nops between the
968
       * ldxms and addx).
969
       */
970
444k
      if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
971
444k
        add = next;
972
229
      else
973
229
        add = this_op(next->next);
974
444k
      if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
975
443k
        continue;
976
977
      /*
978
       * Check that a tax follows that (with 0 or more
979
       * nops between them).
980
       */
981
1.82k
      tax = this_op(add->next);
982
1.82k
      if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
983
1.24k
        continue;
984
985
      /*
986
       * Check that an ild follows that (with 0 or more
987
       * nops between them).
988
       */
989
582
      ild = this_op(tax->next);
990
582
      if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
991
582
          BPF_MODE(ild->s.code) != BPF_IND)
992
265
        continue;
993
      /*
994
       * We want to turn this sequence:
995
       *
996
       * (004) ldi     #0x2   {s}
997
       * (005) ldxms   [14]   {next}  -- optional
998
       * (006) addx     {add}
999
       * (007) tax      {tax}
1000
       * (008) ild     [x+0]    {ild}
1001
       *
1002
       * into this sequence:
1003
       *
1004
       * (004) nop
1005
       * (005) ldxms   [14]
1006
       * (006) nop
1007
       * (007) nop
1008
       * (008) ild     [x+2]
1009
       *
1010
       * XXX We need to check that X is not
1011
       * subsequently used, because we want to change
1012
       * what'll be in it after this sequence.
1013
       *
1014
       * We know we can eliminate the accumulator
1015
       * modifications earlier in the sequence since
1016
       * it is defined by the last stmt of this sequence
1017
       * (i.e., the last statement of the sequence loads
1018
       * a value into the accumulator, so we can eliminate
1019
       * earlier operations on the accumulator).
1020
       */
1021
317
      ild->s.k += s->s.k;
1022
317
      s->s.code = NOP;
1023
317
      add->s.code = NOP;
1024
317
      tax->s.code = NOP;
1025
      /*
1026
       * XXX - optimizer loop detection.
1027
       */
1028
317
      opt_state->non_branch_movement_performed = 1;
1029
317
      opt_state->done = 0;
1030
317
    }
1031
2.25M
  }
1032
  /*
1033
   * If the comparison at the end of a block is an equality
1034
   * comparison against a constant, and nobody uses the value
1035
   * we leave in the A register at the end of a block, and
1036
   * the operation preceding the comparison is an arithmetic
1037
   * operation, we can sometime optimize it away.
1038
   */
1039
754k
  if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
1040
754k
      !ATOMELEM(b->out_use, A_ATOM)) {
1041
    /*
1042
     * We can optimize away certain subtractions of the
1043
     * X register.
1044
     */
1045
616k
    if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
1046
9.71k
      val = b->val[X_ATOM];
1047
9.71k
      if (opt_state->vmap[val].is_const) {
1048
        /*
1049
         * If we have a subtract to do a comparison,
1050
         * and the X register is a known constant,
1051
         * we can merge this value into the
1052
         * comparison:
1053
         *
1054
         * sub x  ->  nop
1055
         * jeq #y jeq #(x+y)
1056
         */
1057
4.03k
        b->s.k += opt_state->vmap[val].const_val;
1058
4.03k
        last->s.code = NOP;
1059
        /*
1060
         * XXX - optimizer loop detection.
1061
         */
1062
4.03k
        opt_state->non_branch_movement_performed = 1;
1063
4.03k
        opt_state->done = 0;
1064
5.68k
      } else if (b->s.k == 0) {
1065
        /*
1066
         * If the X register isn't a constant,
1067
         * and the comparison in the test is
1068
         * against 0, we can compare with the
1069
         * X register, instead:
1070
         *
1071
         * sub x  ->  nop
1072
         * jeq #0 jeq x
1073
         */
1074
5.63k
        last->s.code = NOP;
1075
5.63k
        b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
1076
        /*
1077
         * XXX - optimizer loop detection.
1078
         */
1079
5.63k
        opt_state->non_branch_movement_performed = 1;
1080
5.63k
        opt_state->done = 0;
1081
5.63k
      }
1082
9.71k
    }
1083
    /*
1084
     * Likewise, a constant subtract can be simplified:
1085
     *
1086
     * sub #x ->  nop
1087
     * jeq #y ->  jeq #(x+y)
1088
     */
1089
606k
    else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
1090
52
      last->s.code = NOP;
1091
52
      b->s.k += last->s.k;
1092
      /*
1093
       * XXX - optimizer loop detection.
1094
       */
1095
52
      opt_state->non_branch_movement_performed = 1;
1096
52
      opt_state->done = 0;
1097
52
    }
1098
    /*
1099
     * And, similarly, a constant AND can be simplified
1100
     * if we're testing against 0, i.e.:
1101
     *
1102
     * and #k nop
1103
     * jeq #0  -> jset #k
1104
     */
1105
606k
    else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
1106
606k
        b->s.k == 0) {
1107
516
      b->s.k = last->s.k;
1108
516
      b->s.code = BPF_JMP|BPF_K|BPF_JSET;
1109
516
      last->s.code = NOP;
1110
      /*
1111
       * XXX - optimizer loop detection.
1112
       */
1113
516
      opt_state->non_branch_movement_performed = 1;
1114
516
      opt_state->done = 0;
1115
516
      opt_not(b);
1116
516
    }
1117
616k
  }
1118
  /*
1119
   * jset #0        ->   never
1120
   * jset #ffffffff ->   always
1121
   */
1122
754k
  if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
1123
3.99k
    if (b->s.k == 0)
1124
260
      JT(b) = JF(b);
1125
3.99k
    if (b->s.k == 0xffffffffU)
1126
0
      JF(b) = JT(b);
1127
3.99k
  }
1128
  /*
1129
   * If we're comparing against the index register, and the index
1130
   * register is a known constant, we can just compare against that
1131
   * constant.
1132
   */
1133
754k
  val = b->val[X_ATOM];
1134
754k
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
1135
19.9k
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1136
19.9k
    b->s.code &= ~BPF_X;
1137
19.9k
    b->s.k = v;
1138
19.9k
  }
1139
  /*
1140
   * If the accumulator is a known constant, we can compute the
1141
   * comparison result.
1142
   */
1143
754k
  val = b->val[A_ATOM];
1144
754k
  if (opt_state->vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
1145
94.8k
    bpf_u_int32 v = opt_state->vmap[val].const_val;
1146
94.8k
    switch (BPF_OP(b->s.code)) {
1147
1148
47.6k
    case BPF_JEQ:
1149
47.6k
      v = v == b->s.k;
1150
47.6k
      break;
1151
1152
22.9k
    case BPF_JGT:
1153
22.9k
      v = v > b->s.k;
1154
22.9k
      break;
1155
1156
24.2k
    case BPF_JGE:
1157
24.2k
      v = v >= b->s.k;
1158
24.2k
      break;
1159
1160
0
    case BPF_JSET:
1161
0
      v &= b->s.k;
1162
0
      break;
1163
1164
0
    default:
1165
0
      abort();
1166
94.8k
    }
1167
94.8k
    if (JF(b) != JT(b)) {
1168
      /*
1169
       * XXX - optimizer loop detection.
1170
       */
1171
41.3k
      opt_state->non_branch_movement_performed = 1;
1172
41.3k
      opt_state->done = 0;
1173
41.3k
    }
1174
94.8k
    if (v)
1175
30.2k
      JF(b) = JT(b);
1176
64.5k
    else
1177
64.5k
      JT(b) = JF(b);
1178
94.8k
  }
1179
754k
}
1180
1181
/*
1182
 * Compute the symbolic value of expression of 's', and update
1183
 * anything it defines in the value table 'val'.  If 'alter' is true,
1184
 * do various optimizations.  This code would be cleaner if symbolic
1185
 * evaluation and code transformations weren't folded together.
1186
 */
1187
static void
1188
opt_stmt(opt_state_t *opt_state, struct stmt *s, bpf_u_int32 val[], int alter)
1189
4.72M
{
1190
4.72M
  int op;
1191
4.72M
  bpf_u_int32 v;
1192
1193
4.72M
  switch (s->code) {
1194
1195
114k
  case BPF_LD|BPF_ABS|BPF_W:
1196
228k
  case BPF_LD|BPF_ABS|BPF_H:
1197
546k
  case BPF_LD|BPF_ABS|BPF_B:
1198
546k
    v = F(opt_state, s->code, s->k, 0L);
1199
546k
    vstore(s, &val[A_ATOM], v, alter);
1200
546k
    break;
1201
1202
7.09k
  case BPF_LD|BPF_IND|BPF_W:
1203
14.4k
  case BPF_LD|BPF_IND|BPF_H:
1204
63.1k
  case BPF_LD|BPF_IND|BPF_B:
1205
63.1k
    v = val[X_ATOM];
1206
63.1k
    if (alter && opt_state->vmap[v].is_const) {
1207
4.30k
      s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
1208
4.30k
      s->k += opt_state->vmap[v].const_val;
1209
4.30k
      v = F(opt_state, s->code, s->k, 0L);
1210
      /*
1211
       * XXX - optimizer loop detection.
1212
       */
1213
4.30k
      opt_state->non_branch_movement_performed = 1;
1214
4.30k
      opt_state->done = 0;
1215
4.30k
    }
1216
58.8k
    else
1217
58.8k
      v = F(opt_state, s->code, s->k, v);
1218
63.1k
    vstore(s, &val[A_ATOM], v, alter);
1219
63.1k
    break;
1220
1221
15.3k
  case BPF_LD|BPF_LEN:
1222
15.3k
    v = F(opt_state, s->code, 0L, 0L);
1223
15.3k
    vstore(s, &val[A_ATOM], v, alter);
1224
15.3k
    break;
1225
1226
513k
  case BPF_LD|BPF_IMM:
1227
513k
    v = K(s->k);
1228
513k
    vstore(s, &val[A_ATOM], v, alter);
1229
513k
    break;
1230
1231
113k
  case BPF_LDX|BPF_IMM:
1232
113k
    v = K(s->k);
1233
113k
    vstore(s, &val[X_ATOM], v, alter);
1234
113k
    break;
1235
1236
4.33k
  case BPF_LDX|BPF_MSH|BPF_B:
1237
4.33k
    v = F(opt_state, s->code, s->k, 0L);
1238
4.33k
    vstore(s, &val[X_ATOM], v, alter);
1239
4.33k
    break;
1240
1241
76.9k
  case BPF_ALU|BPF_NEG:
1242
76.9k
    if (alter && opt_state->vmap[val[A_ATOM]].is_const) {
1243
17.9k
      s->code = BPF_LD|BPF_IMM;
1244
      /*
1245
       * Do this negation as unsigned arithmetic; that's
1246
       * what modern BPF engines do, and it guarantees
1247
       * that all possible values can be negated.  (Yeah,
1248
       * negating 0x80000000, the minimum signed 32-bit
1249
       * two's-complement value, results in 0x80000000,
1250
       * so it's still negative, but we *should* be doing
1251
       * all unsigned arithmetic here, to match what
1252
       * modern BPF engines do.)
1253
       *
1254
       * Express it as 0U - (unsigned value) so that we
1255
       * don't get compiler warnings about negating an
1256
       * unsigned value and don't get UBSan warnings
1257
       * about the result of negating 0x80000000 being
1258
       * undefined.
1259
       */
1260
17.9k
      s->k = 0U - opt_state->vmap[val[A_ATOM]].const_val;
1261
17.9k
      val[A_ATOM] = K(s->k);
1262
17.9k
    }
1263
59.0k
    else
1264
59.0k
      val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], 0L);
1265
76.9k
    break;
1266
1267
12.8k
  case BPF_ALU|BPF_ADD|BPF_K:
1268
13.9k
  case BPF_ALU|BPF_SUB|BPF_K:
1269
18.3k
  case BPF_ALU|BPF_MUL|BPF_K:
1270
18.9k
  case BPF_ALU|BPF_DIV|BPF_K:
1271
19.2k
  case BPF_ALU|BPF_MOD|BPF_K:
1272
104k
  case BPF_ALU|BPF_AND|BPF_K:
1273
105k
  case BPF_ALU|BPF_OR|BPF_K:
1274
105k
  case BPF_ALU|BPF_XOR|BPF_K:
1275
108k
  case BPF_ALU|BPF_LSH|BPF_K:
1276
108k
  case BPF_ALU|BPF_RSH|BPF_K:
1277
108k
    op = BPF_OP(s->code);
1278
108k
    if (alter) {
1279
29.1k
      if (s->k == 0) {
1280
        /*
1281
         * Optimize operations where the constant
1282
         * is zero.
1283
         *
1284
         * Don't optimize away "sub #0"
1285
         * as it may be needed later to
1286
         * fixup the generated math code.
1287
         *
1288
         * Fail if we're dividing by zero or taking
1289
         * a modulus by zero.
1290
         */
1291
1.50k
        if (op == BPF_ADD ||
1292
1.50k
            op == BPF_LSH || op == BPF_RSH ||
1293
1.50k
            op == BPF_OR || op == BPF_XOR) {
1294
301
          s->code = NOP;
1295
301
          break;
1296
301
        }
1297
1.20k
        if (op == BPF_MUL || op == BPF_AND) {
1298
972
          s->code = BPF_LD|BPF_IMM;
1299
972
          val[A_ATOM] = K(s->k);
1300
972
          break;
1301
972
        }
1302
233
        if (op == BPF_DIV)
1303
7
          opt_error(opt_state,
1304
7
              "division by zero");
1305
226
        if (op == BPF_MOD)
1306
24
          opt_error(opt_state,
1307
24
              "modulus by zero");
1308
226
      }
1309
27.8k
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1310
958
        fold_op(opt_state, s, val[A_ATOM], K(s->k));
1311
958
        val[A_ATOM] = K(s->k);
1312
958
        break;
1313
958
      }
1314
27.8k
    }
1315
106k
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], K(s->k));
1316
106k
    break;
1317
1318
50.7k
  case BPF_ALU|BPF_ADD|BPF_X:
1319
70.2k
  case BPF_ALU|BPF_SUB|BPF_X:
1320
138k
  case BPF_ALU|BPF_MUL|BPF_X:
1321
170k
  case BPF_ALU|BPF_DIV|BPF_X:
1322
214k
  case BPF_ALU|BPF_MOD|BPF_X:
1323
280k
  case BPF_ALU|BPF_AND|BPF_X:
1324
291k
  case BPF_ALU|BPF_OR|BPF_X:
1325
307k
  case BPF_ALU|BPF_XOR|BPF_X:
1326
309k
  case BPF_ALU|BPF_LSH|BPF_X:
1327
314k
  case BPF_ALU|BPF_RSH|BPF_X:
1328
314k
    op = BPF_OP(s->code);
1329
314k
    if (alter && opt_state->vmap[val[X_ATOM]].is_const) {
1330
63.5k
      if (opt_state->vmap[val[A_ATOM]].is_const) {
1331
58.7k
        fold_op(opt_state, s, val[A_ATOM], val[X_ATOM]);
1332
58.7k
        val[A_ATOM] = K(s->k);
1333
58.7k
      }
1334
4.79k
      else {
1335
4.79k
        s->code = BPF_ALU|BPF_K|op;
1336
4.79k
        s->k = opt_state->vmap[val[X_ATOM]].const_val;
1337
4.79k
        if ((op == BPF_LSH || op == BPF_RSH) &&
1338
4.79k
            s->k > 31)
1339
6
          opt_error(opt_state,
1340
6
              "shift by more than 31 bits");
1341
        /*
1342
         * XXX - optimizer loop detection.
1343
         */
1344
4.78k
        opt_state->non_branch_movement_performed = 1;
1345
4.78k
        opt_state->done = 0;
1346
4.78k
        val[A_ATOM] =
1347
4.78k
          F(opt_state, s->code, val[A_ATOM], K(s->k));
1348
4.78k
      }
1349
63.5k
      break;
1350
63.5k
    }
1351
    /*
1352
     * Check if we're doing something to an accumulator
1353
     * that is 0, and simplify.  This may not seem like
1354
     * much of a simplification but it could open up further
1355
     * optimizations.
1356
     * XXX We could also check for mul by 1, etc.
1357
     */
1358
251k
    if (alter && opt_state->vmap[val[A_ATOM]].is_const
1359
251k
        && opt_state->vmap[val[A_ATOM]].const_val == 0) {
1360
548
      if (op == BPF_ADD || op == BPF_OR || op == BPF_XOR) {
1361
130
        s->code = BPF_MISC|BPF_TXA;
1362
130
        vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1363
130
        break;
1364
130
      }
1365
418
      else if (op == BPF_MUL || op == BPF_DIV || op == BPF_MOD ||
1366
418
         op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1367
290
        s->code = BPF_LD|BPF_IMM;
1368
290
        s->k = 0;
1369
290
        vstore(s, &val[A_ATOM], K(s->k), alter);
1370
290
        break;
1371
290
      }
1372
128
      else if (op == BPF_NEG) {
1373
0
        s->code = NOP;
1374
0
        break;
1375
0
      }
1376
548
    }
1377
250k
    val[A_ATOM] = F(opt_state, s->code, val[A_ATOM], val[X_ATOM]);
1378
250k
    break;
1379
1380
2.27k
  case BPF_MISC|BPF_TXA:
1381
2.27k
    vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1382
2.27k
    break;
1383
1384
477k
  case BPF_LD|BPF_MEM:
1385
477k
    v = val[s->k];
1386
477k
    if (alter && opt_state->vmap[v].is_const) {
1387
98.5k
      s->code = BPF_LD|BPF_IMM;
1388
98.5k
      s->k = opt_state->vmap[v].const_val;
1389
      /*
1390
       * XXX - optimizer loop detection.
1391
       */
1392
98.5k
      opt_state->non_branch_movement_performed = 1;
1393
98.5k
      opt_state->done = 0;
1394
98.5k
    }
1395
477k
    vstore(s, &val[A_ATOM], v, alter);
1396
477k
    break;
1397
1398
188k
  case BPF_MISC|BPF_TAX:
1399
188k
    vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1400
188k
    break;
1401
1402
154k
  case BPF_LDX|BPF_MEM:
1403
154k
    v = val[s->k];
1404
154k
    if (alter && opt_state->vmap[v].is_const) {
1405
4.24k
      s->code = BPF_LDX|BPF_IMM;
1406
4.24k
      s->k = opt_state->vmap[v].const_val;
1407
      /*
1408
       * XXX - optimizer loop detection.
1409
       */
1410
4.24k
      opt_state->non_branch_movement_performed = 1;
1411
4.24k
      opt_state->done = 0;
1412
4.24k
    }
1413
154k
    vstore(s, &val[X_ATOM], v, alter);
1414
154k
    break;
1415
1416
596k
  case BPF_ST:
1417
596k
    vstore(s, &val[s->k], val[A_ATOM], alter);
1418
596k
    break;
1419
1420
0
  case BPF_STX:
1421
0
    vstore(s, &val[s->k], val[X_ATOM], alter);
1422
0
    break;
1423
4.72M
  }
1424
4.72M
}
1425
1426
static void
1427
deadstmt(opt_state_t *opt_state, register struct stmt *s, register struct stmt *last[])
1428
5.53M
{
1429
5.53M
  register int atom;
1430
1431
5.53M
  atom = atomuse(s);
1432
5.53M
  if (atom >= 0) {
1433
2.51M
    if (atom == AX_ATOM) {
1434
283k
      last[X_ATOM] = 0;
1435
283k
      last[A_ATOM] = 0;
1436
283k
    }
1437
2.22M
    else
1438
2.22M
      last[atom] = 0;
1439
2.51M
  }
1440
5.53M
  atom = atomdef(s);
1441
5.53M
  if (atom >= 0) {
1442
2.97M
    if (last[atom]) {
1443
      /*
1444
       * XXX - optimizer loop detection.
1445
       */
1446
289k
      opt_state->non_branch_movement_performed = 1;
1447
289k
      opt_state->done = 0;
1448
289k
      last[atom]->code = NOP;
1449
289k
    }
1450
2.97M
    last[atom] = s;
1451
2.97M
  }
1452
5.53M
}
1453
1454
static void
1455
opt_deadstores(opt_state_t *opt_state, register struct block *b)
1456
908k
{
1457
908k
  register struct slist *s;
1458
908k
  register int atom;
1459
908k
  struct stmt *last[N_ATOMS];
1460
1461
908k
  memset((char *)last, 0, sizeof last);
1462
1463
5.53M
  for (s = b->stmts; s != 0; s = s->next)
1464
4.63M
    deadstmt(opt_state, &s->s, last);
1465
908k
  deadstmt(opt_state, &b->s, last);
1466
1467
17.2M
  for (atom = 0; atom < N_ATOMS; ++atom)
1468
16.3M
    if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1469
134k
      last[atom]->code = NOP;
1470
      /*
1471
       * XXX - optimizer loop detection.
1472
       */
1473
134k
      opt_state->non_branch_movement_performed = 1;
1474
134k
      opt_state->done = 0;
1475
134k
    }
1476
908k
}
1477
1478
static void
1479
opt_blk(opt_state_t *opt_state, struct block *b, int do_stmts)
1480
1.01M
{
1481
1.01M
  struct slist *s;
1482
1.01M
  struct edge *p;
1483
1.01M
  int i;
1484
1.01M
  bpf_u_int32 aval, xval;
1485
1486
#if 0
1487
  for (s = b->stmts; s && s->next; s = s->next)
1488
    if (BPF_CLASS(s->s.code) == BPF_JMP) {
1489
      do_stmts = 0;
1490
      break;
1491
    }
1492
#endif
1493
1494
  /*
1495
   * Initialize the atom values.
1496
   */
1497
1.01M
  p = b->in_edges;
1498
1.01M
  if (p == 0) {
1499
    /*
1500
     * We have no predecessors, so everything is undefined
1501
     * upon entry to this block.
1502
     */
1503
145k
    memset((char *)b->val, 0, sizeof(b->val));
1504
867k
  } else {
1505
    /*
1506
     * Inherit values from our predecessors.
1507
     *
1508
     * First, get the values from the predecessor along the
1509
     * first edge leading to this node.
1510
     */
1511
867k
    memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1512
    /*
1513
     * Now look at all the other nodes leading to this node.
1514
     * If, for the predecessor along that edge, a register
1515
     * has a different value from the one we have (i.e.,
1516
     * control paths are merging, and the merging paths
1517
     * assign different values to that register), give the
1518
     * register the undefined value of 0.
1519
     */
1520
1.53M
    while ((p = p->next) != NULL) {
1521
12.7M
      for (i = 0; i < N_ATOMS; ++i)
1522
12.1M
        if (b->val[i] != p->pred->val[i])
1523
771k
          b->val[i] = 0;
1524
672k
    }
1525
867k
  }
1526
1.01M
  aval = b->val[A_ATOM];
1527
1.01M
  xval = b->val[X_ATOM];
1528
5.73M
  for (s = b->stmts; s; s = s->next)
1529
4.72M
    opt_stmt(opt_state, &s->s, b->val, do_stmts);
1530
1531
  /*
1532
   * This is a special case: if we don't use anything from this
1533
   * block, and we load the accumulator or index register with a
1534
   * value that is already there, or if this block is a return,
1535
   * eliminate all the statements.
1536
   *
1537
   * XXX - what if it does a store?  Presumably that falls under
1538
   * the heading of "if we don't use anything from this block",
1539
   * i.e., if we use any memory location set to a different
1540
   * value by this block, then we use something from this block.
1541
   *
1542
   * XXX - why does it matter whether we use anything from this
1543
   * block?  If the accumulator or index register doesn't change
1544
   * its value, isn't that OK even if we use that value?
1545
   *
1546
   * XXX - if we load the accumulator with a different value,
1547
   * and the block ends with a conditional branch, we obviously
1548
   * can't eliminate it, as the branch depends on that value.
1549
   * For the index register, the conditional branch only depends
1550
   * on the index register value if the test is against the index
1551
   * register value rather than a constant; if nothing uses the
1552
   * value we put into the index register, and we're not testing
1553
   * against the index register's value, and there aren't any
1554
   * other problems that would keep us from eliminating this
1555
   * block, can we eliminate it?
1556
   */
1557
1.01M
  if (do_stmts &&
1558
1.01M
      ((b->out_use == 0 &&
1559
293k
        aval != VAL_UNKNOWN && b->val[A_ATOM] == aval &&
1560
293k
        xval != VAL_UNKNOWN && b->val[X_ATOM] == xval) ||
1561
293k
       BPF_CLASS(b->s.code) == BPF_RET)) {
1562
103k
    if (b->stmts != 0) {
1563
6.79k
      b->stmts = 0;
1564
      /*
1565
       * XXX - optimizer loop detection.
1566
       */
1567
6.79k
      opt_state->non_branch_movement_performed = 1;
1568
6.79k
      opt_state->done = 0;
1569
6.79k
    }
1570
909k
  } else {
1571
909k
    opt_peep(opt_state, b);
1572
909k
    opt_deadstores(opt_state, b);
1573
909k
  }
1574
  /*
1575
   * Set up values for branch optimizer.
1576
   */
1577
1.01M
  if (BPF_SRC(b->s.code) == BPF_K)
1578
967k
    b->oval = K(b->s.k);
1579
44.9k
  else
1580
44.9k
    b->oval = b->val[X_ATOM];
1581
1.01M
  b->et.code = b->s.code;
1582
1.01M
  b->ef.code = -b->s.code;
1583
1.01M
}
1584
1585
/*
1586
 * Return true if any register that is used on exit from 'succ', has
1587
 * an exit value that is different from the corresponding exit value
1588
 * from 'b'.
1589
 */
1590
static int
1591
use_conflict(struct block *b, struct block *succ)
1592
478k
{
1593
478k
  int atom;
1594
478k
  atomset use = succ->out_use;
1595
1596
478k
  if (use == 0)
1597
459k
    return 0;
1598
1599
271k
  for (atom = 0; atom < N_ATOMS; ++atom)
1600
260k
    if (ATOMELEM(use, atom))
1601
18.9k
      if (b->val[atom] != succ->val[atom])
1602
7.75k
        return 1;
1603
11.2k
  return 0;
1604
18.9k
}
1605
1606
/*
1607
 * Given a block that is the successor of an edge, and an edge that
1608
 * dominates that edge, return either a pointer to a child of that
1609
 * block (a block to which that block jumps) if that block is a
1610
 * candidate to replace the successor of the latter edge or NULL
1611
 * if neither of the children of the first block are candidates.
1612
 */
1613
static struct block *
1614
fold_edge(struct block *child, struct edge *ep)
1615
2.50M
{
1616
2.50M
  int sense;
1617
2.50M
  bpf_u_int32 aval0, aval1, oval0, oval1;
1618
2.50M
  int code = ep->code;
1619
1620
2.50M
  if (code < 0) {
1621
    /*
1622
     * This edge is a "branch if false" edge.
1623
     */
1624
905k
    code = -code;
1625
905k
    sense = 0;
1626
1.60M
  } else {
1627
    /*
1628
     * This edge is a "branch if true" edge.
1629
     */
1630
1.60M
    sense = 1;
1631
1.60M
  }
1632
1633
  /*
1634
   * If the opcode for the branch at the end of the block we
1635
   * were handed isn't the same as the opcode for the branch
1636
   * to which the edge we were handed corresponds, the tests
1637
   * for those branches aren't testing the same conditions,
1638
   * so the blocks to which the first block branches aren't
1639
   * candidates to replace the successor of the edge.
1640
   */
1641
2.50M
  if (child->s.code != code)
1642
738k
    return 0;
1643
1644
1.76M
  aval0 = child->val[A_ATOM];
1645
1.76M
  oval0 = child->oval;
1646
1.76M
  aval1 = ep->pred->val[A_ATOM];
1647
1.76M
  oval1 = ep->pred->oval;
1648
1649
  /*
1650
   * If the A register value on exit from the successor block
1651
   * isn't the same as the A register value on exit from the
1652
   * predecessor of the edge, the blocks to which the first
1653
   * block branches aren't candidates to replace the successor
1654
   * of the edge.
1655
   */
1656
1.76M
  if (aval0 != aval1)
1657
1.16M
    return 0;
1658
1659
603k
  if (oval0 == oval1)
1660
    /*
1661
     * The operands of the branch instructions are
1662
     * identical, so the branches are testing the
1663
     * same condition, and the result is true if a true
1664
     * branch was taken to get here, otherwise false.
1665
     */
1666
310k
    return sense ? JT(child) : JF(child);
1667
1668
292k
  if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1669
    /*
1670
     * At this point, we only know the comparison if we
1671
     * came down the true branch, and it was an equality
1672
     * comparison with a constant.
1673
     *
1674
     * I.e., if we came down the true branch, and the branch
1675
     * was an equality comparison with a constant, we know the
1676
     * accumulator contains that constant.  If we came down
1677
     * the false branch, or the comparison wasn't with a
1678
     * constant, we don't know what was in the accumulator.
1679
     *
1680
     * We rely on the fact that distinct constants have distinct
1681
     * value numbers.
1682
     */
1683
55.7k
    return JF(child);
1684
1685
236k
  return 0;
1686
292k
}
1687
1688
/*
1689
 * If we can make this edge go directly to a child of the edge's current
1690
 * successor, do so.
1691
 */
1692
static void
1693
opt_j(opt_state_t *opt_state, struct edge *ep)
1694
1.14M
{
1695
1.14M
  register u_int i, k;
1696
1.14M
  register struct block *target;
1697
1698
  /*
1699
   * Does this edge go to a block where, if the test
1700
   * at the end of it succeeds, it goes to a block
1701
   * that's a leaf node of the DAG, i.e. a return
1702
   * statement?
1703
   * If so, there's nothing to optimize.
1704
   */
1705
1.14M
  if (JT(ep->succ) == 0)
1706
431k
    return;
1707
1708
  /*
1709
   * Does this edge go to a block that goes, in turn, to
1710
   * the same block regardless of whether the test at the
1711
   * end succeeds or fails?
1712
   */
1713
708k
  if (JT(ep->succ) == JF(ep->succ)) {
1714
    /*
1715
     * Common branch targets can be eliminated, provided
1716
     * there is no data dependency.
1717
     *
1718
     * Check whether any register used on exit from the
1719
     * block to which the successor of this edge goes
1720
     * has a value at that point that's different from
1721
     * the value it has on exit from the predecessor of
1722
     * this edge.  If not, the predecessor of this edge
1723
     * can just go to the block to which the successor
1724
     * of this edge goes, bypassing the successor of this
1725
     * edge, as the successor of this edge isn't doing
1726
     * any calculations whose results are different
1727
     * from what the blocks before it did and isn't
1728
     * doing any tests the results of which matter.
1729
     */
1730
112k
    if (!use_conflict(ep->pred, JT(ep->succ))) {
1731
      /*
1732
       * No, there isn't.
1733
       * Make this edge go to the block to
1734
       * which the successor of that edge
1735
       * goes.
1736
       *
1737
       * XXX - optimizer loop detection.
1738
       */
1739
106k
      opt_state->non_branch_movement_performed = 1;
1740
106k
      opt_state->done = 0;
1741
106k
      ep->succ = JT(ep->succ);
1742
106k
    }
1743
112k
  }
1744
  /*
1745
   * For each edge dominator that matches the successor of this
1746
   * edge, promote the edge successor to the its grandchild.
1747
   *
1748
   * XXX We violate the set abstraction here in favor a reasonably
1749
   * efficient loop.
1750
   */
1751
961k
 top:
1752
3.29M
  for (i = 0; i < opt_state->edgewords; ++i) {
1753
    /* i'th word in the bitset of dominators */
1754
2.69M
    register bpf_u_int32 x = ep->edom[i];
1755
1756
4.83M
    while (x != 0) {
1757
      /* Find the next dominator in that word and mark it as found */
1758
2.50M
      k = lowest_set_bit(x);
1759
2.50M
      x &=~ ((bpf_u_int32)1 << k);
1760
2.50M
      k += i * BITS_PER_WORD;
1761
1762
2.50M
      target = fold_edge(ep->succ, opt_state->edges[k]);
1763
      /*
1764
       * We have a candidate to replace the successor
1765
       * of ep.
1766
       *
1767
       * Check that there is no data dependency between
1768
       * nodes that will be violated if we move the edge;
1769
       * i.e., if any register used on exit from the
1770
       * candidate has a value at that point different
1771
       * from the value it has when we exit the
1772
       * predecessor of that edge, there's a data
1773
       * dependency that will be violated.
1774
       */
1775
2.50M
      if (target != 0 && !use_conflict(ep->pred, target)) {
1776
        /*
1777
         * It's safe to replace the successor of
1778
         * ep; do so, and note that we've made
1779
         * at least one change.
1780
         *
1781
         * XXX - this is one of the operations that
1782
         * happens when the optimizer gets into
1783
         * one of those infinite loops.
1784
         */
1785
364k
        opt_state->done = 0;
1786
364k
        ep->succ = target;
1787
364k
        if (JT(target) != 0)
1788
          /*
1789
           * Start over unless we hit a leaf.
1790
           */
1791
252k
          goto top;
1792
111k
        return;
1793
364k
      }
1794
2.50M
    }
1795
2.69M
  }
1796
961k
}
1797
1798
/*
1799
 * XXX - is this, and and_pullup(), what's described in section 6.1.2
1800
 * "Predicate Assertion Propagation" in the BPF+ paper?
1801
 *
1802
 * Note that this looks at block dominators, not edge dominators.
1803
 * Don't think so.
1804
 *
1805
 * "A or B" compiles into
1806
 *
1807
 *          A
1808
 *       t / \ f
1809
 *        /   B
1810
 *       / t / \ f
1811
 *      \   /
1812
 *       \ /
1813
 *        X
1814
 *
1815
 *
1816
 */
1817
static void
1818
or_pullup(opt_state_t *opt_state, struct block *b)
1819
570k
{
1820
570k
  bpf_u_int32 val;
1821
570k
  int at_top;
1822
570k
  struct block *pull;
1823
570k
  struct block **diffp, **samep;
1824
570k
  struct edge *ep;
1825
1826
570k
  ep = b->in_edges;
1827
570k
  if (ep == 0)
1828
205k
    return;
1829
1830
  /*
1831
   * Make sure each predecessor loads the same value.
1832
   * XXX why?
1833
   */
1834
364k
  val = ep->pred->val[A_ATOM];
1835
404k
  for (ep = ep->next; ep != 0; ep = ep->next)
1836
100k
    if (val != ep->pred->val[A_ATOM])
1837
60.1k
      return;
1838
1839
  /*
1840
   * For the first edge in the list of edges coming into this block,
1841
   * see whether the predecessor of that edge comes here via a true
1842
   * branch or a false branch.
1843
   */
1844
304k
  if (JT(b->in_edges->pred) == b)
1845
189k
    diffp = &JT(b->in_edges->pred); /* jt */
1846
115k
  else
1847
115k
    diffp = &JF(b->in_edges->pred);  /* jf */
1848
1849
  /*
1850
   * diffp is a pointer to a pointer to the block.
1851
   *
1852
   * Go down the false chain looking as far as you can,
1853
   * making sure that each jump-compare is doing the
1854
   * same as the original block.
1855
   *
1856
   * If you reach the bottom before you reach a
1857
   * different jump-compare, just exit.  There's nothing
1858
   * to do here.  XXX - no, this version is checking for
1859
   * the value leaving the block; that's from the BPF+
1860
   * pullup routine.
1861
   */
1862
304k
  at_top = 1;
1863
437k
  for (;;) {
1864
    /*
1865
     * Done if that's not going anywhere XXX
1866
     */
1867
437k
    if (*diffp == 0)
1868
0
      return;
1869
1870
    /*
1871
     * Done if that predecessor blah blah blah isn't
1872
     * going the same place we're going XXX
1873
     *
1874
     * Does the true edge of this block point to the same
1875
     * location as the true edge of b?
1876
     */
1877
437k
    if (JT(*diffp) != JT(b))
1878
80.0k
      return;
1879
1880
    /*
1881
     * Done if this node isn't a dominator of that
1882
     * node blah blah blah XXX
1883
     *
1884
     * Does b dominate diffp?
1885
     */
1886
357k
    if (!SET_MEMBER((*diffp)->dom, b->id))
1887
2.60k
      return;
1888
1889
    /*
1890
     * Break out of the loop if that node's value of A
1891
     * isn't the value of A above XXX
1892
     */
1893
355k
    if ((*diffp)->val[A_ATOM] != val)
1894
221k
      break;
1895
1896
    /*
1897
     * Get the JF for that node XXX
1898
     * Go down the false path.
1899
     */
1900
133k
    diffp = &JF(*diffp);
1901
133k
    at_top = 0;
1902
133k
  }
1903
1904
  /*
1905
   * Now that we've found a different jump-compare in a chain
1906
   * below b, search further down until we find another
1907
   * jump-compare that looks at the original value.  This
1908
   * jump-compare should get pulled up.  XXX again we're
1909
   * comparing values not jump-compares.
1910
   */
1911
221k
  samep = &JF(*diffp);
1912
263k
  for (;;) {
1913
    /*
1914
     * Done if that's not going anywhere XXX
1915
     */
1916
263k
    if (*samep == 0)
1917
0
      return;
1918
1919
    /*
1920
     * Done if that predecessor blah blah blah isn't
1921
     * going the same place we're going XXX
1922
     */
1923
263k
    if (JT(*samep) != JT(b))
1924
206k
      return;
1925
1926
    /*
1927
     * Done if this node isn't a dominator of that
1928
     * node blah blah blah XXX
1929
     *
1930
     * Does b dominate samep?
1931
     */
1932
56.4k
    if (!SET_MEMBER((*samep)->dom, b->id))
1933
14.3k
      return;
1934
1935
    /*
1936
     * Break out of the loop if that node's value of A
1937
     * is the value of A above XXX
1938
     */
1939
42.1k
    if ((*samep)->val[A_ATOM] == val)
1940
729
      break;
1941
1942
    /* XXX Need to check that there are no data dependencies
1943
       between dp0 and dp1.  Currently, the code generator
1944
       will not produce such dependencies. */
1945
41.3k
    samep = &JF(*samep);
1946
41.3k
  }
1947
#ifdef notdef
1948
  /* XXX This doesn't cover everything. */
1949
  for (i = 0; i < N_ATOMS; ++i)
1950
    if ((*samep)->val[i] != pred->val[i])
1951
      return;
1952
#endif
1953
  /* Pull up the node. */
1954
729
  pull = *samep;
1955
729
  *samep = JF(pull);
1956
729
  JF(pull) = *diffp;
1957
1958
  /*
1959
   * At the top of the chain, each predecessor needs to point at the
1960
   * pulled up node.  Inside the chain, there is only one predecessor
1961
   * to worry about.
1962
   */
1963
729
  if (at_top) {
1964
1.84k
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
1965
1.11k
      if (JT(ep->pred) == b)
1966
474
        JT(ep->pred) = pull;
1967
645
      else
1968
645
        JF(ep->pred) = pull;
1969
1.11k
    }
1970
721
  }
1971
8
  else
1972
8
    *diffp = pull;
1973
1974
  /*
1975
   * XXX - this is one of the operations that happens when the
1976
   * optimizer gets into one of those infinite loops.
1977
   */
1978
729
  opt_state->done = 0;
1979
729
}
1980
1981
static void
1982
and_pullup(opt_state_t *opt_state, struct block *b)
1983
570k
{
1984
570k
  bpf_u_int32 val;
1985
570k
  int at_top;
1986
570k
  struct block *pull;
1987
570k
  struct block **diffp, **samep;
1988
570k
  struct edge *ep;
1989
1990
570k
  ep = b->in_edges;
1991
570k
  if (ep == 0)
1992
205k
    return;
1993
1994
  /*
1995
   * Make sure each predecessor loads the same value.
1996
   */
1997
364k
  val = ep->pred->val[A_ATOM];
1998
404k
  for (ep = ep->next; ep != 0; ep = ep->next)
1999
100k
    if (val != ep->pred->val[A_ATOM])
2000
60.1k
      return;
2001
2002
304k
  if (JT(b->in_edges->pred) == b)
2003
188k
    diffp = &JT(b->in_edges->pred);
2004
115k
  else
2005
115k
    diffp = &JF(b->in_edges->pred);
2006
2007
304k
  at_top = 1;
2008
388k
  for (;;) {
2009
388k
    if (*diffp == 0)
2010
0
      return;
2011
2012
388k
    if (JF(*diffp) != JF(b))
2013
63.0k
      return;
2014
2015
325k
    if (!SET_MEMBER((*diffp)->dom, b->id))
2016
5.45k
      return;
2017
2018
319k
    if ((*diffp)->val[A_ATOM] != val)
2019
235k
      break;
2020
2021
83.6k
    diffp = &JT(*diffp);
2022
83.6k
    at_top = 0;
2023
83.6k
  }
2024
235k
  samep = &JT(*diffp);
2025
282k
  for (;;) {
2026
282k
    if (*samep == 0)
2027
0
      return;
2028
2029
282k
    if (JF(*samep) != JF(b))
2030
231k
      return;
2031
2032
50.8k
    if (!SET_MEMBER((*samep)->dom, b->id))
2033
3.30k
      return;
2034
2035
47.5k
    if ((*samep)->val[A_ATOM] == val)
2036
640
      break;
2037
2038
    /* XXX Need to check that there are no data dependencies
2039
       between diffp and samep.  Currently, the code generator
2040
       will not produce such dependencies. */
2041
46.8k
    samep = &JT(*samep);
2042
46.8k
  }
2043
#ifdef notdef
2044
  /* XXX This doesn't cover everything. */
2045
  for (i = 0; i < N_ATOMS; ++i)
2046
    if ((*samep)->val[i] != pred->val[i])
2047
      return;
2048
#endif
2049
  /* Pull up the node. */
2050
640
  pull = *samep;
2051
640
  *samep = JT(pull);
2052
640
  JT(pull) = *diffp;
2053
2054
  /*
2055
   * At the top of the chain, each predecessor needs to point at the
2056
   * pulled up node.  Inside the chain, there is only one predecessor
2057
   * to worry about.
2058
   */
2059
640
  if (at_top) {
2060
1.34k
    for (ep = b->in_edges; ep != 0; ep = ep->next) {
2061
715
      if (JT(ep->pred) == b)
2062
273
        JT(ep->pred) = pull;
2063
442
      else
2064
442
        JF(ep->pred) = pull;
2065
715
    }
2066
634
  }
2067
6
  else
2068
6
    *diffp = pull;
2069
2070
  /*
2071
   * XXX - this is one of the operations that happens when the
2072
   * optimizer gets into one of those infinite loops.
2073
   */
2074
640
  opt_state->done = 0;
2075
640
}
2076
2077
static void
2078
opt_blks(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2079
145k
{
2080
145k
  int i, maxlevel;
2081
145k
  struct block *p;
2082
2083
145k
  init_val(opt_state);
2084
145k
  maxlevel = ic->root->level;
2085
2086
145k
  find_inedges(opt_state, ic->root);
2087
1.02M
  for (i = maxlevel; i >= 0; --i)
2088
1.89M
    for (p = opt_state->levels[i]; p; p = p->link)
2089
1.01M
      opt_blk(opt_state, p, do_stmts);
2090
2091
145k
  if (do_stmts)
2092
    /*
2093
     * No point trying to move branches; it can't possibly
2094
     * make a difference at this point.
2095
     *
2096
     * XXX - this might be after we detect a loop where
2097
     * we were just looping infinitely moving branches
2098
     * in such a fashion that we went through two or more
2099
     * versions of the machine code, eventually returning
2100
     * to the first version.  (We're really not doing a
2101
     * full loop detection, we're just testing for two
2102
     * passes in a row where we do nothing but
2103
     * move branches.)
2104
     */
2105
62.4k
    return;
2106
2107
  /*
2108
   * Is this what the BPF+ paper describes in sections 6.1.1,
2109
   * 6.1.2, and 6.1.3?
2110
   */
2111
629k
  for (i = 1; i <= maxlevel; ++i) {
2112
1.11M
    for (p = opt_state->levels[i]; p; p = p->link) {
2113
570k
      opt_j(opt_state, &p->et);
2114
570k
      opt_j(opt_state, &p->ef);
2115
570k
    }
2116
546k
  }
2117
2118
83.3k
  find_inedges(opt_state, ic->root);
2119
629k
  for (i = 1; i <= maxlevel; ++i) {
2120
1.11M
    for (p = opt_state->levels[i]; p; p = p->link) {
2121
570k
      or_pullup(opt_state, p);
2122
570k
      and_pullup(opt_state, p);
2123
570k
    }
2124
546k
  }
2125
83.3k
}
2126
2127
static inline void
2128
link_inedge(struct edge *parent, struct block *child)
2129
2.68M
{
2130
2.68M
  parent->next = child->in_edges;
2131
2.68M
  child->in_edges = parent;
2132
2.68M
}
2133
2134
static void
2135
find_inedges(opt_state_t *opt_state, struct block *root)
2136
227k
{
2137
227k
  u_int i;
2138
227k
  int level;
2139
227k
  struct block *b;
2140
2141
3.22M
  for (i = 0; i < opt_state->n_blocks; ++i)
2142
2.99M
    opt_state->blocks[i]->in_edges = 0;
2143
2144
  /*
2145
   * Traverse the graph, adding each edge to the predecessor
2146
   * list of its successors.  Skip the leaves (i.e. level 0).
2147
   */
2148
1.50M
  for (level = root->level; level > 0; --level) {
2149
2.62M
    for (b = opt_state->levels[level]; b != 0; b = b->link) {
2150
1.34M
      link_inedge(&b->et, JT(b));
2151
1.34M
      link_inedge(&b->ef, JF(b));
2152
1.34M
    }
2153
1.27M
  }
2154
227k
}
2155
2156
static void
2157
opt_root(struct block **b)
2158
31.6k
{
2159
31.6k
  struct slist *tmp, *s;
2160
2161
31.6k
  s = (*b)->stmts;
2162
31.6k
  (*b)->stmts = 0;
2163
47.0k
  while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
2164
15.4k
    *b = JT(*b);
2165
2166
31.6k
  tmp = (*b)->stmts;
2167
31.6k
  if (tmp != 0)
2168
2.34k
    sappend(s, tmp);
2169
31.6k
  (*b)->stmts = s;
2170
2171
  /*
2172
   * If the root node is a return, then there is no
2173
   * point executing any statements (since the bpf machine
2174
   * has no side effects).
2175
   */
2176
31.6k
  if (BPF_CLASS((*b)->s.code) == BPF_RET)
2177
16.0k
    (*b)->stmts = 0;
2178
31.6k
}
2179
2180
static void
2181
opt_loop(opt_state_t *opt_state, struct icode *ic, int do_stmts)
2182
65.9k
{
2183
2184
#ifdef BDEBUG
2185
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2186
    printf("opt_loop(root, %d) begin\n", do_stmts);
2187
    opt_dump(opt_state, ic);
2188
  }
2189
#endif
2190
2191
  /*
2192
   * XXX - optimizer loop detection.
2193
   */
2194
65.9k
  int loop_count = 0;
2195
145k
  for (;;) {
2196
145k
    opt_state->done = 1;
2197
    /*
2198
     * XXX - optimizer loop detection.
2199
     */
2200
145k
    opt_state->non_branch_movement_performed = 0;
2201
145k
    find_levels(opt_state, ic);
2202
145k
    find_dom(opt_state, ic->root);
2203
145k
    find_closure(opt_state, ic->root);
2204
145k
    find_ud(opt_state, ic->root);
2205
145k
    find_edom(opt_state, ic->root);
2206
145k
    opt_blks(opt_state, ic, do_stmts);
2207
#ifdef BDEBUG
2208
    if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2209
      printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, opt_state->done);
2210
      opt_dump(opt_state, ic);
2211
    }
2212
#endif
2213
2214
    /*
2215
     * Was anything done in this optimizer pass?
2216
     */
2217
145k
    if (opt_state->done) {
2218
      /*
2219
       * No, so we've reached a fixed point.
2220
       * We're done.
2221
       */
2222
64.6k
      break;
2223
64.6k
    }
2224
2225
    /*
2226
     * XXX - was anything done other than branch movement
2227
     * in this pass?
2228
     */
2229
81.1k
    if (opt_state->non_branch_movement_performed) {
2230
      /*
2231
       * Yes.  Clear any loop-detection counter;
2232
       * we're making some form of progress (assuming
2233
       * we can't get into a cycle doing *other*
2234
       * optimizations...).
2235
       */
2236
73.1k
      loop_count = 0;
2237
73.1k
    } else {
2238
      /*
2239
       * No - increment the counter, and quit if
2240
       * it's up to 100.
2241
       */
2242
8.00k
      loop_count++;
2243
8.00k
      if (loop_count >= 100) {
2244
        /*
2245
         * We've done nothing but branch movement
2246
         * for 100 passes; we're probably
2247
         * in a cycle and will never reach a
2248
         * fixed point.
2249
         *
2250
         * XXX - yes, we really need a non-
2251
         * heuristic way of detecting a cycle.
2252
         */
2253
0
        opt_state->done = 1;
2254
0
        break;
2255
0
      }
2256
8.00k
    }
2257
81.1k
  }
2258
65.9k
}
2259
2260
/*
2261
 * Optimize the filter code in its dag representation.
2262
 * Return 0 on success, -1 on error.
2263
 */
2264
int
2265
bpf_optimize(struct icode *ic, char *errbuf)
2266
32.9k
{
2267
32.9k
  opt_state_t opt_state;
2268
2269
32.9k
  memset(&opt_state, 0, sizeof(opt_state));
2270
32.9k
  opt_state.errbuf = errbuf;
2271
32.9k
  opt_state.non_branch_movement_performed = 0;
2272
32.9k
  if (setjmp(opt_state.top_ctx)) {
2273
1.33k
    opt_cleanup(&opt_state);
2274
1.33k
    return -1;
2275
1.33k
  }
2276
31.6k
  opt_init(&opt_state, ic);
2277
31.6k
  opt_loop(&opt_state, ic, 0);
2278
31.6k
  opt_loop(&opt_state, ic, 1);
2279
31.6k
  intern_blocks(&opt_state, ic);
2280
#ifdef BDEBUG
2281
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2282
    printf("after intern_blocks()\n");
2283
    opt_dump(&opt_state, ic);
2284
  }
2285
#endif
2286
31.6k
  opt_root(&ic->root);
2287
#ifdef BDEBUG
2288
  if (pcap_optimizer_debug > 1 || pcap_print_dot_graph) {
2289
    printf("after opt_root()\n");
2290
    opt_dump(&opt_state, ic);
2291
  }
2292
#endif
2293
31.6k
  opt_cleanup(&opt_state);
2294
31.6k
  return 0;
2295
32.9k
}
2296
2297
static void
2298
make_marks(struct icode *ic, struct block *p)
2299
344k
{
2300
344k
  if (!isMarked(ic, p)) {
2301
205k
    Mark(ic, p);
2302
205k
    if (BPF_CLASS(p->s.code) != BPF_RET) {
2303
155k
      make_marks(ic, JT(p));
2304
155k
      make_marks(ic, JF(p));
2305
155k
    }
2306
205k
  }
2307
344k
}
2308
2309
/*
2310
 * Mark code array such that isMarked(ic->cur_mark, i) is true
2311
 * only for nodes that are alive.
2312
 */
2313
static void
2314
mark_code(struct icode *ic)
2315
34.2k
{
2316
34.2k
  ic->cur_mark += 1;
2317
34.2k
  make_marks(ic, ic->root);
2318
34.2k
}
2319
2320
/*
2321
 * True iff the two stmt lists load the same value from the packet into
2322
 * the accumulator.
2323
 */
2324
static int
2325
eq_slist(struct slist *x, struct slist *y)
2326
7.59k
{
2327
11.9k
  for (;;) {
2328
14.1k
    while (x && x->s.code == NOP)
2329
2.16k
      x = x->next;
2330
14.9k
    while (y && y->s.code == NOP)
2331
2.95k
      y = y->next;
2332
11.9k
    if (x == 0)
2333
3.71k
      return y == 0;
2334
8.27k
    if (y == 0)
2335
117
      return x == 0;
2336
8.15k
    if (x->s.code != y->s.code || x->s.k != y->s.k)
2337
3.76k
      return 0;
2338
4.39k
    x = x->next;
2339
4.39k
    y = y->next;
2340
4.39k
  }
2341
7.59k
}
2342
2343
static inline int
2344
eq_blk(struct block *b0, struct block *b1)
2345
1.83M
{
2346
1.83M
  if (b0->s.code == b1->s.code &&
2347
1.83M
      b0->s.k == b1->s.k &&
2348
1.83M
      b0->et.succ == b1->et.succ &&
2349
1.83M
      b0->ef.succ == b1->ef.succ)
2350
7.59k
    return eq_slist(b0->stmts, b1->stmts);
2351
1.82M
  return 0;
2352
1.83M
}
2353
2354
static void
2355
intern_blocks(opt_state_t *opt_state, struct icode *ic)
2356
31.6k
{
2357
31.6k
  struct block *p;
2358
31.6k
  u_int i, j;
2359
31.6k
  int done1; /* don't shadow global */
2360
34.2k
 top:
2361
34.2k
  done1 = 1;
2362
564k
  for (i = 0; i < opt_state->n_blocks; ++i)
2363
530k
    opt_state->blocks[i]->link = 0;
2364
2365
34.2k
  mark_code(ic);
2366
2367
530k
  for (i = opt_state->n_blocks - 1; i != 0; ) {
2368
496k
    --i;
2369
496k
    if (!isMarked(ic, opt_state->blocks[i]))
2370
317k
      continue;
2371
3.90M
    for (j = i + 1; j < opt_state->n_blocks; ++j) {
2372
3.73M
      if (!isMarked(ic, opt_state->blocks[j]))
2373
1.89M
        continue;
2374
1.83M
      if (eq_blk(opt_state->blocks[i], opt_state->blocks[j])) {
2375
3.59k
        opt_state->blocks[i]->link = opt_state->blocks[j]->link ?
2376
2.90k
          opt_state->blocks[j]->link : opt_state->blocks[j];
2377
3.59k
        break;
2378
3.59k
      }
2379
1.83M
    }
2380
178k
  }
2381
564k
  for (i = 0; i < opt_state->n_blocks; ++i) {
2382
530k
    p = opt_state->blocks[i];
2383
530k
    if (JT(p) == 0)
2384
63.0k
      continue;
2385
467k
    if (JT(p)->link) {
2386
3.62k
      done1 = 0;
2387
3.62k
      JT(p) = JT(p)->link;
2388
3.62k
    }
2389
467k
    if (JF(p)->link) {
2390
2.32k
      done1 = 0;
2391
2.32k
      JF(p) = JF(p)->link;
2392
2.32k
    }
2393
467k
  }
2394
34.2k
  if (!done1)
2395
2.57k
    goto top;
2396
34.2k
}
2397
2398
static void
2399
opt_cleanup(opt_state_t *opt_state)
2400
32.9k
{
2401
32.9k
  free((void *)opt_state->vnode_base);
2402
32.9k
  free((void *)opt_state->vmap);
2403
32.9k
  free((void *)opt_state->edges);
2404
32.9k
  free((void *)opt_state->space);
2405
32.9k
  free((void *)opt_state->levels);
2406
32.9k
  free((void *)opt_state->blocks);
2407
32.9k
}
2408
2409
/*
2410
 * For optimizer errors.
2411
 */
2412
static void PCAP_NORETURN
2413
opt_error(opt_state_t *opt_state, const char *fmt, ...)
2414
1.33k
{
2415
1.33k
  va_list ap;
2416
2417
1.33k
  if (opt_state->errbuf != NULL) {
2418
1.33k
    va_start(ap, fmt);
2419
1.33k
    (void)vsnprintf(opt_state->errbuf,
2420
1.33k
        PCAP_ERRBUF_SIZE, fmt, ap);
2421
1.33k
    va_end(ap);
2422
1.33k
  }
2423
1.33k
  longjmp(opt_state->top_ctx, 1);
2424
  /* NOTREACHED */
2425
#ifdef _AIX
2426
  PCAP_UNREACHABLE
2427
#endif /* _AIX */
2428
1.33k
}
2429
2430
/*
2431
 * Return the number of stmts in 's'.
2432
 */
2433
static u_int
2434
slength(struct slist *s)
2435
1.23M
{
2436
1.23M
  u_int n = 0;
2437
2438
4.46M
  for (; s; s = s->next)
2439
3.23M
    if (s->s.code != NOP)
2440
2.71M
      ++n;
2441
1.23M
  return n;
2442
1.23M
}
2443
2444
/*
2445
 * Return the number of nodes reachable by 'p'.
2446
 * All nodes should be initially unmarked.
2447
 */
2448
static int
2449
count_blocks(struct icode *ic, struct block *p)
2450
822k
{
2451
822k
  if (p == 0 || isMarked(ic, p))
2452
427k
    return 0;
2453
394k
  Mark(ic, p);
2454
394k
  return count_blocks(ic, JT(p)) + count_blocks(ic, JF(p)) + 1;
2455
822k
}
2456
2457
/*
2458
 * Do a depth first search on the flow graph, numbering the
2459
 * the basic blocks, and entering them into the 'blocks' array.`
2460
 */
2461
static void
2462
number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
2463
822k
{
2464
822k
  u_int n;
2465
2466
822k
  if (p == 0 || isMarked(ic, p))
2467
427k
    return;
2468
2469
394k
  Mark(ic, p);
2470
394k
  n = opt_state->n_blocks++;
2471
394k
  if (opt_state->n_blocks == 0) {
2472
    /*
2473
     * Overflow.
2474
     */
2475
0
    opt_error(opt_state, "filter is too complex to optimize");
2476
0
  }
2477
394k
  p->id = n;
2478
394k
  opt_state->blocks[n] = p;
2479
2480
394k
  number_blks_r(opt_state, ic, JT(p));
2481
394k
  number_blks_r(opt_state, ic, JF(p));
2482
394k
}
2483
2484
/*
2485
 * Return the number of stmts in the flowgraph reachable by 'p'.
2486
 * The nodes should be unmarked before calling.
2487
 *
2488
 * Note that "stmts" means "instructions", and that this includes
2489
 *
2490
 *  side-effect statements in 'p' (slength(p->stmts));
2491
 *
2492
 *  statements in the true branch from 'p' (count_stmts(JT(p)));
2493
 *
2494
 *  statements in the false branch from 'p' (count_stmts(JF(p)));
2495
 *
2496
 *  the conditional jump itself (1);
2497
 *
2498
 *  an extra long jump if the true branch requires it (p->longjt);
2499
 *
2500
 *  an extra long jump if the false branch requires it (p->longjf).
2501
 */
2502
static u_int
2503
count_stmts(struct icode *ic, struct block *p)
2504
913k
{
2505
913k
  u_int n;
2506
2507
913k
  if (p == 0 || isMarked(ic, p))
2508
472k
    return 0;
2509
441k
  Mark(ic, p);
2510
441k
  n = count_stmts(ic, JT(p)) + count_stmts(ic, JF(p));
2511
441k
  return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
2512
913k
}
2513
2514
/*
2515
 * Allocate memory.  All allocation is done before optimization
2516
 * is begun.  A linear bound on the size of all data structures is computed
2517
 * from the total number of blocks and/or statements.
2518
 */
2519
static void
2520
opt_init(opt_state_t *opt_state, struct icode *ic)
2521
32.9k
{
2522
32.9k
  bpf_u_int32 *p;
2523
32.9k
  int i, n, max_stmts;
2524
32.9k
  u_int product;
2525
32.9k
  size_t block_memsize, edge_memsize;
2526
2527
  /*
2528
   * First, count the blocks, so we can malloc an array to map
2529
   * block number to block.  Then, put the blocks into the array.
2530
   */
2531
32.9k
  unMarkAll(ic);
2532
32.9k
  n = count_blocks(ic, ic->root);
2533
32.9k
  opt_state->blocks = (struct block **)calloc(n, sizeof(*opt_state->blocks));
2534
32.9k
  if (opt_state->blocks == NULL)
2535
0
    opt_error(opt_state, "malloc");
2536
32.9k
  unMarkAll(ic);
2537
32.9k
  opt_state->n_blocks = 0;
2538
32.9k
  number_blks_r(opt_state, ic, ic->root);
2539
2540
  /*
2541
   * This "should not happen".
2542
   */
2543
32.9k
  if (opt_state->n_blocks == 0)
2544
0
    opt_error(opt_state, "filter has no instructions; please report this as a libpcap issue");
2545
2546
32.9k
  opt_state->n_edges = 2 * opt_state->n_blocks;
2547
32.9k
  if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
2548
    /*
2549
     * Overflow.
2550
     */
2551
0
    opt_error(opt_state, "filter is too complex to optimize");
2552
0
  }
2553
32.9k
  opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
2554
32.9k
  if (opt_state->edges == NULL) {
2555
0
    opt_error(opt_state, "malloc");
2556
0
  }
2557
2558
  /*
2559
   * The number of levels is bounded by the number of nodes.
2560
   */
2561
32.9k
  opt_state->levels = (struct block **)calloc(opt_state->n_blocks, sizeof(*opt_state->levels));
2562
32.9k
  if (opt_state->levels == NULL) {
2563
0
    opt_error(opt_state, "malloc");
2564
0
  }
2565
2566
32.9k
  opt_state->edgewords = opt_state->n_edges / BITS_PER_WORD + 1;
2567
32.9k
  opt_state->nodewords = opt_state->n_blocks / BITS_PER_WORD + 1;
2568
2569
  /*
2570
   * Make sure opt_state->n_blocks * opt_state->nodewords fits
2571
   * in a u_int; we use it as a u_int number-of-iterations
2572
   * value.
2573
   */
2574
32.9k
  product = opt_state->n_blocks * opt_state->nodewords;
2575
32.9k
  if ((product / opt_state->n_blocks) != opt_state->nodewords) {
2576
    /*
2577
     * XXX - just punt and don't try to optimize?
2578
     * In practice, this is unlikely to happen with
2579
     * a normal filter.
2580
     */
2581
0
    opt_error(opt_state, "filter is too complex to optimize");
2582
0
  }
2583
2584
  /*
2585
   * Make sure the total memory required for that doesn't
2586
   * overflow.
2587
   */
2588
32.9k
  block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
2589
32.9k
  if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
2590
0
    opt_error(opt_state, "filter is too complex to optimize");
2591
0
  }
2592
2593
  /*
2594
   * Make sure opt_state->n_edges * opt_state->edgewords fits
2595
   * in a u_int; we use it as a u_int number-of-iterations
2596
   * value.
2597
   */
2598
32.9k
  product = opt_state->n_edges * opt_state->edgewords;
2599
32.9k
  if ((product / opt_state->n_edges) != opt_state->edgewords) {
2600
0
    opt_error(opt_state, "filter is too complex to optimize");
2601
0
  }
2602
2603
  /*
2604
   * Make sure the total memory required for that doesn't
2605
   * overflow.
2606
   */
2607
32.9k
  edge_memsize = (size_t)product * sizeof(*opt_state->space);
2608
32.9k
  if (edge_memsize / product != sizeof(*opt_state->space)) {
2609
0
    opt_error(opt_state, "filter is too complex to optimize");
2610
0
  }
2611
2612
  /*
2613
   * Make sure the total memory required for both of them doesn't
2614
   * overflow.
2615
   */
2616
32.9k
  if (block_memsize > SIZE_MAX - edge_memsize) {
2617
0
    opt_error(opt_state, "filter is too complex to optimize");
2618
0
  }
2619
2620
  /* XXX */
2621
32.9k
  opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
2622
32.9k
  if (opt_state->space == NULL) {
2623
0
    opt_error(opt_state, "malloc");
2624
0
  }
2625
32.9k
  p = opt_state->space;
2626
32.9k
  opt_state->all_dom_sets = p;
2627
427k
  for (i = 0; i < n; ++i) {
2628
394k
    opt_state->blocks[i]->dom = p;
2629
394k
    p += opt_state->nodewords;
2630
394k
  }
2631
32.9k
  opt_state->all_closure_sets = p;
2632
427k
  for (i = 0; i < n; ++i) {
2633
394k
    opt_state->blocks[i]->closure = p;
2634
394k
    p += opt_state->nodewords;
2635
394k
  }
2636
32.9k
  opt_state->all_edge_sets = p;
2637
427k
  for (i = 0; i < n; ++i) {
2638
394k
    register struct block *b = opt_state->blocks[i];
2639
2640
394k
    b->et.edom = p;
2641
394k
    p += opt_state->edgewords;
2642
394k
    b->ef.edom = p;
2643
394k
    p += opt_state->edgewords;
2644
394k
    b->et.id = i;
2645
394k
    opt_state->edges[i] = &b->et;
2646
394k
    b->ef.id = opt_state->n_blocks + i;
2647
394k
    opt_state->edges[opt_state->n_blocks + i] = &b->ef;
2648
394k
    b->et.pred = b;
2649
394k
    b->ef.pred = b;
2650
394k
  }
2651
32.9k
  max_stmts = 0;
2652
427k
  for (i = 0; i < n; ++i)
2653
394k
    max_stmts += slength(opt_state->blocks[i]->stmts) + 1;
2654
  /*
2655
   * We allocate at most 3 value numbers per statement,
2656
   * so this is an upper bound on the number of valnodes
2657
   * we'll need.
2658
   */
2659
32.9k
  opt_state->maxval = 3 * max_stmts;
2660
32.9k
  opt_state->vmap = (struct vmapinfo *)calloc(opt_state->maxval, sizeof(*opt_state->vmap));
2661
32.9k
  if (opt_state->vmap == NULL) {
2662
0
    opt_error(opt_state, "malloc");
2663
0
  }
2664
32.9k
  opt_state->vnode_base = (struct valnode *)calloc(opt_state->maxval, sizeof(*opt_state->vnode_base));
2665
32.9k
  if (opt_state->vnode_base == NULL) {
2666
0
    opt_error(opt_state, "malloc");
2667
0
  }
2668
32.9k
}
2669
2670
/*
2671
 * This is only used when supporting optimizer debugging.  It is
2672
 * global state, so do *not* do more than one compile in parallel
2673
 * and expect it to provide meaningful information.
2674
 */
2675
#ifdef BDEBUG
2676
int bids[NBIDS];
2677
#endif
2678
2679
static void PCAP_NORETURN conv_error(conv_state_t *, const char *, ...)
2680
    PCAP_PRINTFLIKE(2, 3);
2681
2682
/*
2683
 * Returns true if successful.  Returns false if a branch has
2684
 * an offset that is too large.  If so, we have marked that
2685
 * branch so that on a subsequent iteration, it will be treated
2686
 * properly.
2687
 */
2688
static int
2689
convert_code_r(conv_state_t *conv_state, struct icode *ic, struct block *p)
2690
850k
{
2691
850k
  struct bpf_insn *dst;
2692
850k
  struct slist *src;
2693
850k
  u_int slen;
2694
850k
  u_int off;
2695
850k
  struct slist **offset = NULL;
2696
2697
850k
  if (p == 0 || isMarked(ic, p))
2698
431k
    return (1);
2699
418k
  Mark(ic, p);
2700
2701
418k
  if (convert_code_r(conv_state, ic, JF(p)) == 0)
2702
18.1k
    return (0);
2703
400k
  if (convert_code_r(conv_state, ic, JT(p)) == 0)
2704
5.31k
    return (0);
2705
2706
395k
  slen = slength(p->stmts);
2707
395k
  dst = conv_state->ftail -= (slen + 1 + p->longjt + p->longjf);
2708
    /* inflate length by any extra jumps */
2709
2710
395k
  p->offset = (int)(dst - conv_state->fstart);
2711
2712
  /* generate offset[] for convenience  */
2713
395k
  if (slen) {
2714
317k
    offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2715
317k
    if (!offset) {
2716
0
      conv_error(conv_state, "not enough core");
2717
      /*NOTREACHED*/
2718
0
    }
2719
317k
  }
2720
395k
  src = p->stmts;
2721
1.15M
  for (off = 0; off < slen && src; off++) {
2722
#if 0
2723
    printf("off=%d src=%x\n", off, src);
2724
#endif
2725
762k
    offset[off] = src;
2726
762k
    src = src->next;
2727
762k
  }
2728
2729
395k
  off = 0;
2730
1.42M
  for (src = p->stmts; src; src = src->next) {
2731
1.02M
    if (src->s.code == NOP)
2732
263k
      continue;
2733
762k
    dst->code = (u_short)src->s.code;
2734
762k
    dst->k = src->s.k;
2735
2736
    /* fill block-local relative jump */
2737
762k
    if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2738
#if 0
2739
      if (src->s.jt || src->s.jf) {
2740
        free(offset);
2741
        conv_error(conv_state, "illegal jmp destination");
2742
        /*NOTREACHED*/
2743
      }
2744
#endif
2745
746k
      goto filled;
2746
746k
    }
2747
16.3k
    if (off == slen - 2)  /*???*/
2748
0
      goto filled;
2749
2750
16.3k
      {
2751
16.3k
    u_int i;
2752
16.3k
    int jt, jf;
2753
16.3k
    const char ljerr[] = "%s for block-local relative jump: off=%d";
2754
2755
#if 0
2756
    printf("code=%x off=%d %x %x\n", src->s.code,
2757
      off, src->s.jt, src->s.jf);
2758
#endif
2759
2760
16.3k
    if (!src->s.jt || !src->s.jf) {
2761
0
      free(offset);
2762
0
      conv_error(conv_state, ljerr, "no jmp destination", off);
2763
      /*NOTREACHED*/
2764
0
    }
2765
2766
16.3k
    jt = jf = 0;
2767
478k
    for (i = 0; i < slen; i++) {
2768
462k
      if (offset[i] == src->s.jt) {
2769
16.3k
        if (jt) {
2770
0
          free(offset);
2771
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2772
          /*NOTREACHED*/
2773
0
        }
2774
2775
16.3k
        if (i - off - 1 >= 256) {
2776
0
          free(offset);
2777
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2778
          /*NOTREACHED*/
2779
0
        }
2780
16.3k
        dst->jt = (u_char)(i - off - 1);
2781
16.3k
        jt++;
2782
16.3k
      }
2783
462k
      if (offset[i] == src->s.jf) {
2784
16.3k
        if (jf) {
2785
0
          free(offset);
2786
0
          conv_error(conv_state, ljerr, "multiple matches", off);
2787
          /*NOTREACHED*/
2788
0
        }
2789
16.3k
        if (i - off - 1 >= 256) {
2790
0
          free(offset);
2791
0
          conv_error(conv_state, ljerr, "out-of-range jump", off);
2792
          /*NOTREACHED*/
2793
0
        }
2794
16.3k
        dst->jf = (u_char)(i - off - 1);
2795
16.3k
        jf++;
2796
16.3k
      }
2797
462k
    }
2798
16.3k
    if (!jt || !jf) {
2799
0
      free(offset);
2800
0
      conv_error(conv_state, ljerr, "no destination found", off);
2801
      /*NOTREACHED*/
2802
0
    }
2803
16.3k
      }
2804
762k
filled:
2805
762k
    ++dst;
2806
762k
    ++off;
2807
762k
  }
2808
395k
  if (offset)
2809
317k
    free(offset);
2810
2811
#ifdef BDEBUG
2812
  if (dst - conv_state->fstart < NBIDS)
2813
    bids[dst - conv_state->fstart] = p->id + 1;
2814
#endif
2815
395k
  dst->code = (u_short)p->s.code;
2816
395k
  dst->k = p->s.k;
2817
395k
  if (JT(p)) {
2818
    /* number of extra jumps inserted */
2819
343k
    u_char extrajmps = 0;
2820
343k
    off = JT(p)->offset - (p->offset + slen) - 1;
2821
343k
    if (off >= 256) {
2822
        /* offset too large for branch, must add a jump */
2823
7.02k
        if (p->longjt == 0) {
2824
      /* mark this instruction and retry */
2825
887
      p->longjt++;
2826
887
      return(0);
2827
887
        }
2828
6.13k
        dst->jt = extrajmps;
2829
6.13k
        extrajmps++;
2830
6.13k
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2831
6.13k
        dst[extrajmps].k = off - extrajmps;
2832
6.13k
    }
2833
336k
    else
2834
336k
        dst->jt = (u_char)off;
2835
342k
    off = JF(p)->offset - (p->offset + slen) - 1;
2836
342k
    if (off >= 256) {
2837
        /* offset too large for branch, must add a jump */
2838
5.16k
        if (p->longjf == 0) {
2839
      /* mark this instruction and retry */
2840
678
      p->longjf++;
2841
678
      return(0);
2842
678
        }
2843
        /* branch if F to following jump */
2844
        /* if two jumps are inserted, F goes to second one */
2845
4.48k
        dst->jf = extrajmps;
2846
4.48k
        extrajmps++;
2847
4.48k
        dst[extrajmps].code = BPF_JMP|BPF_JA;
2848
4.48k
        dst[extrajmps].k = off - extrajmps;
2849
4.48k
    }
2850
337k
    else
2851
337k
        dst->jf = (u_char)off;
2852
342k
  }
2853
393k
  return (1);
2854
395k
}
2855
2856
2857
/*
2858
 * Convert flowgraph intermediate representation to the
2859
 * BPF array representation.  Set *lenp to the number of instructions.
2860
 *
2861
 * This routine does *NOT* leak the memory pointed to by fp.  It *must
2862
 * not* do free(fp) before returning fp; doing so would make no sense,
2863
 * as the BPF array pointed to by the return value of icode_to_fcode()
2864
 * must be valid - it's being returned for use in a bpf_program structure.
2865
 *
2866
 * If it appears that icode_to_fcode() is leaking, the problem is that
2867
 * the program using pcap_compile() is failing to free the memory in
2868
 * the BPF program when it's done - the leak is in the program, not in
2869
 * the routine that happens to be allocating the memory.  (By analogy, if
2870
 * a program calls fopen() without ever calling fclose() on the FILE *,
2871
 * it will leak the FILE structure; the leak is not in fopen(), it's in
2872
 * the program.)  Change the program to use pcap_freecode() when it's
2873
 * done with the filter program.  See the pcap man page.
2874
 */
2875
struct bpf_insn *
2876
icode_to_fcode(struct icode *ic, struct block *root, u_int *lenp,
2877
    char *errbuf)
2878
29.6k
{
2879
29.6k
  u_int n;
2880
29.6k
  struct bpf_insn *fp;
2881
29.6k
  conv_state_t conv_state;
2882
2883
29.6k
  conv_state.fstart = NULL;
2884
29.6k
  conv_state.errbuf = errbuf;
2885
29.6k
  if (setjmp(conv_state.top_ctx) != 0) {
2886
0
    free(conv_state.fstart);
2887
0
    return NULL;
2888
0
  }
2889
2890
  /*
2891
   * Loop doing convert_code_r() until no branches remain
2892
   * with too-large offsets.
2893
   */
2894
31.1k
  for (;;) {
2895
31.1k
      unMarkAll(ic);
2896
31.1k
      n = *lenp = count_stmts(ic, root);
2897
2898
31.1k
      fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2899
31.1k
      if (fp == NULL) {
2900
0
    (void)snprintf(errbuf, PCAP_ERRBUF_SIZE,
2901
0
        "malloc");
2902
0
    return NULL;
2903
0
      }
2904
31.1k
      memset((char *)fp, 0, sizeof(*fp) * n);
2905
31.1k
      conv_state.fstart = fp;
2906
31.1k
      conv_state.ftail = fp + n;
2907
2908
31.1k
      unMarkAll(ic);
2909
31.1k
      if (convert_code_r(&conv_state, ic, root))
2910
29.6k
    break;
2911
1.56k
      free(fp);
2912
1.56k
  }
2913
2914
29.6k
  return fp;
2915
29.6k
}
2916
2917
/*
2918
 * For iconv_to_fconv() errors.
2919
 */
2920
static void PCAP_NORETURN
2921
conv_error(conv_state_t *conv_state, const char *fmt, ...)
2922
0
{
2923
0
  va_list ap;
2924
2925
0
  va_start(ap, fmt);
2926
0
  (void)vsnprintf(conv_state->errbuf,
2927
0
      PCAP_ERRBUF_SIZE, fmt, ap);
2928
0
  va_end(ap);
2929
0
  longjmp(conv_state->top_ctx, 1);
2930
  /* NOTREACHED */
2931
#ifdef _AIX
2932
  PCAP_UNREACHABLE
2933
#endif /* _AIX */
2934
0
}
2935
2936
/*
2937
 * Make a copy of a BPF program and put it in the "fcode" member of
2938
 * a "pcap_t".
2939
 *
2940
 * If we fail to allocate memory for the copy, fill in the "errbuf"
2941
 * member of the "pcap_t" with an error message, and return -1;
2942
 * otherwise, return 0.
2943
 */
2944
int
2945
install_bpf_program(pcap_t *p, struct bpf_program *fp)
2946
0
{
2947
0
  size_t prog_size;
2948
2949
  /*
2950
   * Validate the program.
2951
   */
2952
0
  if (!pcap_validate_filter(fp->bf_insns, fp->bf_len)) {
2953
0
    snprintf(p->errbuf, sizeof(p->errbuf),
2954
0
      "BPF program is not valid");
2955
0
    return (-1);
2956
0
  }
2957
2958
  /*
2959
   * Free up any already installed program.
2960
   */
2961
0
  pcap_freecode(&p->fcode);
2962
2963
0
  prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2964
0
  p->fcode.bf_len = fp->bf_len;
2965
0
  p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2966
0
  if (p->fcode.bf_insns == NULL) {
2967
0
    pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
2968
0
        errno, "malloc");
2969
0
    return (-1);
2970
0
  }
2971
0
  memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2972
0
  return (0);
2973
0
}
2974
2975
#ifdef BDEBUG
2976
static void
2977
dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
2978
    FILE *out)
2979
{
2980
  int icount, noffset;
2981
  int i;
2982
2983
  if (block == NULL || isMarked(ic, block))
2984
    return;
2985
  Mark(ic, block);
2986
2987
  icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
2988
  noffset = min(block->offset + icount, (int)prog->bf_len);
2989
2990
  fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
2991
  for (i = block->offset; i < noffset; i++) {
2992
    fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
2993
  }
2994
  fprintf(out, "\" tooltip=\"");
2995
  for (i = 0; i < BPF_MEMWORDS; i++)
2996
    if (block->val[i] != VAL_UNKNOWN)
2997
      fprintf(out, "val[%d]=%d ", i, block->val[i]);
2998
  fprintf(out, "val[A]=%d ", block->val[A_ATOM]);
2999
  fprintf(out, "val[X]=%d", block->val[X_ATOM]);
3000
  fprintf(out, "\"");
3001
  if (JT(block) == NULL)
3002
    fprintf(out, ", peripheries=2");
3003
  fprintf(out, "];\n");
3004
3005
  dot_dump_node(ic, JT(block), prog, out);
3006
  dot_dump_node(ic, JF(block), prog, out);
3007
}
3008
3009
static void
3010
dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
3011
{
3012
  if (block == NULL || isMarked(ic, block))
3013
    return;
3014
  Mark(ic, block);
3015
3016
  if (JT(block)) {
3017
    fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
3018
        block->id, JT(block)->id);
3019
    fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
3020
         block->id, JF(block)->id);
3021
  }
3022
  dot_dump_edge(ic, JT(block), out);
3023
  dot_dump_edge(ic, JF(block), out);
3024
}
3025
3026
/* Output the block CFG using graphviz/DOT language
3027
 * In the CFG, block's code, value index for each registers at EXIT,
3028
 * and the jump relationship is show.
3029
 *
3030
 * example DOT for BPF `ip src host 1.1.1.1' is:
3031
    digraph BPF {
3032
      block0 [shape=ellipse, id="block-0" label="BLOCK0\n\n(000) ldh      [12]\n(001) jeq      #0x800           jt 2  jf 5" tooltip="val[A]=0 val[X]=0"];
3033
      block1 [shape=ellipse, id="block-1" label="BLOCK1\n\n(002) ld       [26]\n(003) jeq      #0x1010101       jt 4  jf 5" tooltip="val[A]=0 val[X]=0"];
3034
      block2 [shape=ellipse, id="block-2" label="BLOCK2\n\n(004) ret      #68" tooltip="val[A]=0 val[X]=0", peripheries=2];
3035
      block3 [shape=ellipse, id="block-3" label="BLOCK3\n\n(005) ret      #0" tooltip="val[A]=0 val[X]=0", peripheries=2];
3036
      "block0":se -> "block1":n [label="T"];
3037
      "block0":sw -> "block3":n [label="F"];
3038
      "block1":se -> "block2":n [label="T"];
3039
      "block1":sw -> "block3":n [label="F"];
3040
    }
3041
 *
3042
 *  After install graphviz on https://www.graphviz.org/, save it as bpf.dot
3043
 *  and run `dot -Tpng -O bpf.dot' to draw the graph.
3044
 */
3045
static int
3046
dot_dump(struct icode *ic, char *errbuf)
3047
{
3048
  struct bpf_program f;
3049
  FILE *out = stdout;
3050
3051
  memset(bids, 0, sizeof bids);
3052
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3053
  if (f.bf_insns == NULL)
3054
    return -1;
3055
3056
  fprintf(out, "digraph BPF {\n");
3057
  unMarkAll(ic);
3058
  dot_dump_node(ic, ic->root, &f, out);
3059
  unMarkAll(ic);
3060
  dot_dump_edge(ic, ic->root, out);
3061
  fprintf(out, "}\n");
3062
3063
  free((char *)f.bf_insns);
3064
  return 0;
3065
}
3066
3067
static int
3068
plain_dump(struct icode *ic, char *errbuf)
3069
{
3070
  struct bpf_program f;
3071
3072
  memset(bids, 0, sizeof bids);
3073
  f.bf_insns = icode_to_fcode(ic, ic->root, &f.bf_len, errbuf);
3074
  if (f.bf_insns == NULL)
3075
    return -1;
3076
  bpf_dump(&f, 1);
3077
  putchar('\n');
3078
  free((char *)f.bf_insns);
3079
  return 0;
3080
}
3081
3082
static void
3083
opt_dump(opt_state_t *opt_state, struct icode *ic)
3084
{
3085
  int status;
3086
  char errbuf[PCAP_ERRBUF_SIZE];
3087
3088
  /*
3089
   * If the CFG, in DOT format, is requested, output it rather than
3090
   * the code that would be generated from that graph.
3091
   */
3092
  if (pcap_print_dot_graph)
3093
    status = dot_dump(ic, errbuf);
3094
  else
3095
    status = plain_dump(ic, errbuf);
3096
  if (status == -1)
3097
    opt_error(opt_state, "opt_dump: icode_to_fcode failed: %s", errbuf);
3098
}
3099
#endif