Coverage Report

Created: 2025-10-10 06:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/capstonenext/arch/BPF/BPFDisassembler.c
Line
Count
Source
1
/* Capstone Disassembly Engine */
2
/* BPF Backend by david942j <david942j@gmail.com>, 2019 */
3
/* SPDX-FileCopyrightText: 2024 Roee Toledano <roeetoledano10@gmail.com> */
4
/* SPDX-License-Identifier: BSD-3 */
5
6
#ifdef CAPSTONE_HAS_BPF
7
8
#include <string.h>
9
#include <stddef.h> // offsetof macro
10
11
#include "BPFConstants.h"
12
#include "BPFDisassembler.h"
13
#include "BPFMapping.h"
14
#include "../../Mapping.h"
15
#include "../../cs_priv.h"
16
#include "../../utils.h"
17
18
///< Malloc bpf_internal, also checks if code_len is large enough.
19
static bpf_internal *alloc_bpf_internal(const size_t code_len)
20
13.1k
{
21
13.1k
  bpf_internal *bpf;
22
23
13.1k
  if (code_len < 8)
24
143
    return NULL;
25
13.0k
  bpf = cs_mem_malloc(sizeof(bpf_internal));
26
13.0k
  if (bpf == NULL)
27
0
    return NULL;
28
  /* default value */
29
13.0k
  bpf->insn_size = 8;
30
13.0k
  return bpf;
31
13.0k
}
32
33
///< Fetch a cBPF structure from code
34
static bpf_internal *fetch_cbpf(MCInst *instr, const uint8_t *code,
35
        const size_t code_len)
36
5.51k
{
37
5.51k
  bpf_internal *bpf;
38
39
5.51k
  bpf = alloc_bpf_internal(code_len);
40
5.51k
  if (bpf == NULL)
41
46
    return NULL;
42
43
5.46k
  bpf->op = readBytes16(instr, code);
44
5.46k
  bpf->jt = code[2];
45
5.46k
  bpf->jf = code[3];
46
5.46k
  bpf->k = readBytes32(instr, code + 4);
47
5.46k
  return bpf;
48
5.51k
}
49
50
///< Fetch an eBPF structure from code
51
static bpf_internal *fetch_ebpf(MCInst *instr, const uint8_t *code,
52
        const size_t code_len)
53
7.66k
{
54
7.66k
  bpf_internal *bpf;
55
56
7.66k
  bpf = alloc_bpf_internal(code_len);
57
7.66k
  if (bpf == NULL)
58
97
    return NULL;
59
60
7.56k
  bpf->op = (uint16_t)code[0];
61
7.56k
  bpf->dst = code[1] & 0xf;
62
7.56k
  bpf->src = (code[1] & 0xf0) >> 4;
63
64
  // eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM,
65
  // in this case imm is combined with the next block's imm.
66
7.56k
  if (bpf->op == (BPF_CLASS_LD | BPF_SIZE_DW | BPF_MODE_IMM)) {
67
165
    if (code_len < 16) {
68
3
      cs_mem_free(bpf);
69
3
      return NULL;
70
3
    }
71
162
    bpf->k = readBytes32(instr, code + 4) |
72
162
       (((uint64_t)readBytes32(instr, code + 12)) << 32);
73
162
    bpf->insn_size = 16;
74
7.39k
  } else {
75
7.39k
    bpf->offset = readBytes16(instr, code + 2);
76
7.39k
    bpf->k = readBytes32(instr, code + 4);
77
7.39k
  }
78
7.56k
  return bpf;
79
7.56k
}
80
81
#define CHECK_READABLE_REG(ud, reg) \
82
3.96k
  do { \
83
3.96k
    if (!((reg) >= BPF_REG_R0 && (reg) <= BPF_REG_R10)) \
84
3.96k
      return false; \
85
3.96k
  } while (0)
86
87
#define CHECK_WRITEABLE_REG(ud, reg) \
88
1.80k
  do { \
89
1.80k
    if (!((reg) >= BPF_REG_R0 && (reg) < BPF_REG_R10)) \
90
1.80k
      return false; \
91
1.80k
  } while (0)
92
93
#define CHECK_READABLE_AND_PUSH(ud, MI, r) \
94
3.96k
  do { \
95
3.96k
    CHECK_READABLE_REG(ud, r + BPF_REG_R0); \
96
3.96k
    MCOperand_CreateReg0(MI, r + BPF_REG_R0); \
97
3.93k
  } while (0)
98
99
#define CHECK_WRITABLE_AND_PUSH(ud, MI, r) \
100
1.80k
  do { \
101
1.80k
    CHECK_WRITEABLE_REG(ud, r + BPF_REG_R0); \
102
1.80k
    MCOperand_CreateReg0(MI, r + BPF_REG_R0); \
103
1.80k
  } while (0)
104
105
static bool decodeLoad(MCInst *MI, bpf_internal *bpf)
106
3.45k
{
107
3.45k
  if (!EBPF_MODE(MI->csh->mode)) {
108
    /*
109
     *  +-----+-----------+--------------------+
110
     *  | ldb |    [k]    |       [x+k]        |
111
     *  | ldh |    [k]    |       [x+k]        |
112
     *  +-----+-----------+--------------------+
113
     */
114
1.82k
    if (BPF_SIZE(bpf->op) == BPF_SIZE_DW)
115
3
      return false;
116
1.82k
    if (BPF_SIZE(bpf->op) == BPF_SIZE_B ||
117
1.35k
        BPF_SIZE(bpf->op) == BPF_SIZE_H) {
118
      /* no ldx */
119
499
      if (BPF_CLASS(bpf->op) != BPF_CLASS_LD)
120
3
        return false;
121
      /* can only be BPF_ABS and BPF_IND */
122
496
      if (BPF_MODE(bpf->op) == BPF_MODE_ABS) {
123
351
        MCOperand_CreateImm0(MI, bpf->k);
124
351
        return true;
125
351
      } else if (BPF_MODE(bpf->op) == BPF_MODE_IND) {
126
141
        MCOperand_CreateReg0(MI, BPF_REG_X);
127
141
        MCOperand_CreateImm0(MI, bpf->k);
128
141
        return true;
129
141
      }
130
4
      return false;
131
496
    }
132
    /*
133
     *  +-----+----+------+------+-----+-------+
134
     *  | ld  | #k | #len | M[k] | [k] | [x+k] |
135
     *  +-----+----+------+------+-----+-------+
136
     *  | ldx | #k | #len | M[k] | 4*([k]&0xf) |
137
     *  +-----+----+------+------+-------------+
138
     */
139
1.32k
    switch (BPF_MODE(bpf->op)) {
140
378
    default:
141
378
      break;
142
447
    case BPF_MODE_IMM:
143
447
      MCOperand_CreateImm0(MI, bpf->k);
144
447
      return true;
145
182
    case BPF_MODE_LEN:
146
182
      return true;
147
314
    case BPF_MODE_MEM:
148
314
      MCOperand_CreateImm0(MI, bpf->k);
149
314
      return true;
150
1.32k
    }
151
378
    if (BPF_CLASS(bpf->op) == BPF_CLASS_LD) {
152
300
      if (BPF_MODE(bpf->op) == BPF_MODE_ABS) {
153
120
        MCOperand_CreateImm0(MI, bpf->k);
154
120
        return true;
155
180
      } else if (BPF_MODE(bpf->op) == BPF_MODE_IND) {
156
177
        MCOperand_CreateReg0(MI, BPF_REG_X);
157
177
        MCOperand_CreateImm0(MI, bpf->k);
158
177
        return true;
159
177
      }
160
300
    } else { /* LDX */
161
78
      if (BPF_MODE(bpf->op) == BPF_MODE_MSH) {
162
75
        MCOperand_CreateImm0(MI, bpf->k);
163
75
        return true;
164
75
      }
165
78
    }
166
6
    return false;
167
378
  }
168
169
  /* eBPF mode */
170
  /*
171
   * - IMM: lddw dst, imm64
172
   * - ABS: ld{w,h,b} [k]
173
   * - IND: ld{w,h,b} [src]
174
   * - MEM: ldx{w,h,b,dw} dst, [src+off]
175
   */
176
1.63k
  if (BPF_CLASS(bpf->op) == BPF_CLASS_LD) {
177
1.12k
    switch (BPF_MODE(bpf->op)) {
178
167
    case BPF_MODE_IMM:
179
167
      if (bpf->op !=
180
167
          (BPF_CLASS_LD | BPF_SIZE_DW | BPF_MODE_IMM))
181
5
        return false;
182
162
      CHECK_WRITABLE_AND_PUSH(ud, MI, bpf->dst);
183
160
      MCOperand_CreateImm0(MI, bpf->k);
184
160
      return true;
185
804
    case BPF_MODE_ABS:
186
804
      MCOperand_CreateImm0(MI, bpf->k);
187
804
      return true;
188
149
    case BPF_MODE_IND:
189
149
      CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
190
147
      return true;
191
1.12k
    }
192
2
    return false;
193
1.12k
  }
194
  /* LDX */
195
508
  if (BPF_MODE(bpf->op) == BPF_MODE_MEM) {
196
504
    CHECK_WRITABLE_AND_PUSH(ud, MI, bpf->dst);
197
503
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
198
503
    MCOperand_CreateImm0(MI, bpf->offset);
199
503
    return true;
200
503
  }
201
4
  return false;
202
508
}
203
204
static bool decodeStore(MCInst *MI, bpf_internal *bpf)
205
1.49k
{
206
  /* in cBPF, only BPF_ST* | BPF_MEM | BPF_W is valid
207
   * while in eBPF:
208
   * - BPF_STX | BPF_XADD | BPF_{W,DW}
209
   * - BPF_ST* | BPF_MEM | BPF_{W,H,B,DW}
210
   * are valid
211
   */
212
1.49k
  if (!EBPF_MODE(MI->csh->mode)) {
213
    /* can only store to M[] */
214
188
    if (bpf->op != (BPF_CLASS(bpf->op) | BPF_MODE_MEM | BPF_SIZE_W))
215
11
      return false;
216
177
    MCOperand_CreateImm0(MI, bpf->k);
217
177
    return true;
218
188
  }
219
220
  /* eBPF */
221
1.31k
  if (BPF_MODE(bpf->op) == BPF_MODE_ATOMIC) {
222
302
    if (BPF_CLASS(bpf->op) != BPF_CLASS_STX)
223
1
      return false;
224
301
    if (BPF_SIZE(bpf->op) != BPF_SIZE_W &&
225
87
        BPF_SIZE(bpf->op) != BPF_SIZE_DW)
226
1
      return false;
227
    /* xadd [dst + off], src */
228
300
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->dst);
229
296
    MCOperand_CreateImm0(MI, bpf->offset);
230
296
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
231
295
    return true;
232
296
  }
233
234
1.00k
  if (BPF_MODE(bpf->op) != BPF_MODE_MEM)
235
5
    return false;
236
237
  /* st [dst + off], src */
238
1.00k
  CHECK_READABLE_AND_PUSH(ud, MI, bpf->dst);
239
1.00k
  MCOperand_CreateImm0(MI, bpf->offset);
240
1.00k
  if (BPF_CLASS(bpf->op) == BPF_CLASS_ST)
241
597
    MCOperand_CreateImm0(MI, bpf->k);
242
405
  else
243
405
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
244
1.00k
  return true;
245
1.00k
}
246
247
static bool decodeALU(MCInst *MI, bpf_internal *bpf)
248
1.61k
{
249
  /* Set MI->Operands */
250
251
  /* cBPF */
252
1.61k
  if (!EBPF_MODE(MI->csh->mode)) {
253
461
    if (BPF_OP(bpf->op) > BPF_ALU_XOR)
254
1
      return false;
255
    /* cBPF's NEG has no operands */
256
460
    if (BPF_OP(bpf->op) == BPF_ALU_NEG)
257
41
      return true;
258
419
    if (BPF_SRC(bpf->op) == BPF_SRC_K)
259
123
      MCOperand_CreateImm0(MI, bpf->k);
260
296
    else /* BPF_SRC_X */
261
296
      MCOperand_CreateReg0(MI, BPF_REG_X);
262
419
    return true;
263
460
  }
264
265
  /* eBPF */
266
267
1.15k
  if (BPF_OP(bpf->op) > BPF_ALU_END)
268
1
    return false;
269
  /* ENDian's imm must be one of 16, 32, 64 */
270
1.14k
  if (BPF_OP(bpf->op) == BPF_ALU_END) {
271
179
    if (bpf->k != 16 && bpf->k != 32 && bpf->k != 64)
272
11
      return false;
273
168
    if (BPF_CLASS(bpf->op) == BPF_CLASS_ALU64 &&
274
28
        BPF_SRC(bpf->op) != BPF_SRC_LITTLE)
275
0
      return false;
276
168
  }
277
278
  /* - op dst, imm
279
   * - op dst, src
280
   * - neg dst
281
   * - le<imm> dst
282
   */
283
  /* every ALU instructions have dst op */
284
1.13k
  CHECK_WRITABLE_AND_PUSH(ud, MI, bpf->dst);
285
286
  /* special cases */
287
1.13k
  if (BPF_OP(bpf->op) == BPF_ALU_NEG)
288
38
    return true;
289
1.09k
  if (BPF_OP(bpf->op) == BPF_ALU_END) {
290
    /* bpf->k must be one of 16, 32, 64 */
291
168
    bpf->op |= ((uint32_t)bpf->k << 4);
292
168
    return true;
293
168
  }
294
295
  /* normal cases */
296
931
  if (BPF_SRC(bpf->op) == BPF_SRC_K) {
297
792
    MCOperand_CreateImm0(MI, bpf->k);
298
792
  } else { /* BPF_SRC_X */
299
139
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
300
139
  }
301
931
  return true;
302
931
}
303
304
static bool decodeJump(MCInst *MI, bpf_internal *bpf)
305
1.86k
{
306
  /* cBPF and eBPF are very different in class jump */
307
1.86k
  if (!EBPF_MODE(MI->csh->mode)) {
308
485
    if (BPF_OP(bpf->op) > BPF_JUMP_JSET)
309
2
      return false;
310
311
    /* ja is a special case of jumps */
312
483
    if (BPF_OP(bpf->op) == BPF_JUMP_JA) {
313
129
      MCOperand_CreateImm0(MI, bpf->k);
314
129
      return true;
315
129
    }
316
317
354
    if (BPF_SRC(bpf->op) == BPF_SRC_K)
318
164
      MCOperand_CreateImm0(MI, bpf->k);
319
190
    else /* BPF_SRC_X */
320
190
      MCOperand_CreateReg0(MI, BPF_REG_X);
321
354
    MCOperand_CreateImm0(MI, bpf->jt);
322
354
    MCOperand_CreateImm0(MI, bpf->jf);
323
1.37k
  } else {
324
1.37k
    if (BPF_OP(bpf->op) > BPF_JUMP_JSLE)
325
2
      return false;
326
327
    /* JMP32 has no CALL/EXIT instruction */
328
    /* No operands for exit */
329
1.37k
    if (BPF_OP(bpf->op) == BPF_JUMP_EXIT)
330
5
      return bpf->op == (BPF_CLASS_JMP | BPF_JUMP_EXIT);
331
1.37k
    if (BPF_OP(bpf->op) == BPF_JUMP_CALL) {
332
111
      if (bpf->op == (BPF_CLASS_JMP | BPF_JUMP_CALL)) {
333
70
        MCOperand_CreateImm0(MI, bpf->k);
334
70
        return true;
335
70
      }
336
41
      if (bpf->op ==
337
41
          (BPF_CLASS_JMP | BPF_JUMP_CALL | BPF_SRC_X)) {
338
41
        CHECK_READABLE_AND_PUSH(ud, MI, bpf->k);
339
24
        return true;
340
41
      }
341
0
      return false;
342
41
    }
343
344
    /* ja is a special case of jumps */
345
1.26k
    if (BPF_OP(bpf->op) == BPF_JUMP_JA) {
346
157
      if (BPF_SRC(bpf->op) != BPF_SRC_K)
347
1
        return false;
348
156
      if (BPF_CLASS(bpf->op) == BPF_CLASS_JMP)
349
138
        MCOperand_CreateImm0(MI, bpf->offset);
350
18
      else
351
18
        MCOperand_CreateImm0(MI, bpf->k);
352
353
156
      return true;
354
157
    }
355
356
    /* <j>  dst, src, +off */
357
1.10k
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->dst);
358
1.10k
    if (BPF_SRC(bpf->op) == BPF_SRC_K)
359
1.07k
      MCOperand_CreateImm0(MI, bpf->k);
360
27
    else
361
27
      CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
362
1.10k
    MCOperand_CreateImm0(MI, bpf->offset);
363
1.10k
  }
364
1.45k
  return true;
365
1.86k
}
366
367
static bool decodeReturn(MCInst *MI, bpf_internal *bpf)
368
552
{
369
  /* Here only handles the BPF_RET class in cBPF */
370
552
  switch (BPF_RVAL(bpf->op)) {
371
303
  case BPF_SRC_K:
372
303
    MCOperand_CreateImm0(MI, bpf->k);
373
303
    return true;
374
149
  case BPF_SRC_X:
375
149
    MCOperand_CreateReg0(MI, BPF_REG_X);
376
149
    return true;
377
98
  case BPF_SRC_A:
378
98
    MCOperand_CreateReg0(MI, BPF_REG_A);
379
98
    return true;
380
552
  }
381
2
  return false;
382
552
}
383
384
static bool decodeMISC(MCInst *MI, bpf_internal *bpf)
385
270
{
386
270
  uint16_t op = bpf->op ^ BPF_CLASS_MISC;
387
270
  return op == BPF_MISCOP_TAX || op == BPF_MISCOP_TXA;
388
270
}
389
390
///< 1. Check if the instruction is valid
391
///< 2. Set MI->opcode
392
///< 3. Set MI->Operands
393
static bool getInstruction(MCInst *MI, bpf_internal *bpf)
394
6.44k
{
395
6.44k
  cs_detail *detail;
396
397
6.44k
  detail = MI->flat_insn->detail;
398
  // initialize detail
399
6.44k
  if (detail) {
400
6.44k
    memset(detail, 0, offsetof(cs_detail, bpf) + sizeof(cs_bpf));
401
6.44k
  }
402
403
6.44k
  MCInst_clear(MI);
404
405
6.44k
  switch (BPF_CLASS(bpf->op)) {
406
0
  default: /* should never happen */
407
0
    return false;
408
1.22k
  case BPF_CLASS_LD:
409
1.78k
  case BPF_CLASS_LDX:
410
1.78k
    return decodeLoad(MI, bpf);
411
263
  case BPF_CLASS_ST:
412
642
  case BPF_CLASS_STX:
413
642
    return decodeStore(MI, bpf);
414
897
  case BPF_CLASS_ALU:
415
897
    return decodeALU(MI, bpf);
416
1.22k
  case BPF_CLASS_JMP:
417
1.22k
    return decodeJump(MI, bpf);
418
1.10k
  case BPF_CLASS_RET:
419
    /* case BPF_CLASS_JMP32: */
420
1.10k
    if (EBPF_MODE(MI->csh->mode))
421
643
      return decodeJump(MI, bpf);
422
466
    else
423
466
      return decodeReturn(MI, bpf);
424
787
  case BPF_CLASS_MISC:
425
    /* case BPF_CLASS_ALU64: */
426
787
    if (EBPF_MODE(MI->csh->mode))
427
714
      return decodeALU(MI, bpf);
428
73
    else
429
73
      return decodeMISC(MI, bpf);
430
6.44k
  }
431
6.44k
}
432
433
// Check for regular load instructions
434
#define REG_LOAD_CASE(c) \
435
1.29k
  case BPF_SIZE_##c: \
436
1.29k
    if (BPF_CLASS(opcode) == BPF_CLASS_LD) \
437
1.29k
      return BPF_INS_LD##c; \
438
1.29k
    else \
439
1.29k
      return BPF_INS_LDX##c;
440
441
static bpf_insn op2insn_ld_cbpf(unsigned opcode)
442
1.29k
{
443
1.29k
  switch (BPF_SIZE(opcode)) {
444
704
    REG_LOAD_CASE(W);
445
163
    REG_LOAD_CASE(H);
446
358
    REG_LOAD_CASE(B);
447
69
    REG_LOAD_CASE(DW);
448
1.29k
  }
449
450
0
  return BPF_INS_INVALID;
451
1.29k
}
452
#undef REG_LOAD_CASE
453
454
// Check for packet load instructions
455
#define PACKET_LOAD_CASE(c) \
456
478
  case BPF_SIZE_##c: \
457
478
    if (BPF_MODE(opcode) == BPF_MODE_ABS) \
458
478
      return BPF_INS_LDABS##c; \
459
478
    else if (BPF_MODE(opcode) == BPF_MODE_IND) \
460
49
      return BPF_INS_LDIND##c; \
461
49
    else \
462
49
      return BPF_INS_INVALID;
463
464
static bpf_insn op2insn_ld_ebpf(unsigned opcode)
465
893
{
466
893
  if (BPF_CLASS(opcode) == BPF_CLASS_LD) {
467
517
    switch (BPF_SIZE(opcode)) {
468
81
      PACKET_LOAD_CASE(W);
469
234
      PACKET_LOAD_CASE(H);
470
163
      PACKET_LOAD_CASE(B);
471
517
    }
472
517
  }
473
474
  // If it's not a packet load instruction, it must be a regular load instruction
475
415
  return op2insn_ld_cbpf(opcode);
476
893
}
477
#undef PACKET_LOAD_CASE
478
479
/* During parsing we already checked to make sure the size is D/DW and 
480
 * mode is STX and not ST, so we don't need to check again*/
481
#define ALU_CASE_REG(c) \
482
86
  case BPF_ALU_##c: \
483
86
    if (BPF_SIZE(opcode) == BPF_SIZE_W) \
484
86
      return BPF_INS_A##c; \
485
86
    else \
486
86
      return BPF_INS_A##c##64;
487
488
#define ALU_CASE_FETCH(c) \
489
53
  case BPF_ALU_##c | BPF_MODE_FETCH: \
490
53
    if (BPF_SIZE(opcode) == BPF_SIZE_W) \
491
53
      return BPF_INS_AF##c; \
492
53
    else \
493
53
      return BPF_INS_AF##c##64;
494
495
#define COMPLEX_CASE(c) \
496
7
  case BPF_ATOMIC_##c | BPF_MODE_FETCH: \
497
7
    if (BPF_SIZE(opcode) == BPF_SIZE_DW) \
498
7
      return BPF_INS_A##c##64; \
499
7
    else \
500
7
      return BPF_INS_INVALID;
501
502
#define CASE(c) \
503
487
  case BPF_SIZE_##c: \
504
487
    if (BPF_CLASS(opcode) == BPF_CLASS_ST) \
505
487
      return BPF_INS_ST##c; \
506
487
    else \
507
487
      return BPF_INS_STX##c;
508
509
static bpf_insn op2insn_st(unsigned opcode, const uint32_t imm)
510
633
{
511
  /*
512
   * - BPF_STX | ALU atomic operations | BPF_{W,DW}
513
   * - BPF_STX | Complex atomic operations | BPF_{DW}
514
   * - BPF_ST* | BPF_MEM | BPF_{W,H,B,DW}
515
   */
516
517
633
  if (BPF_MODE(opcode) == BPF_MODE_ATOMIC) {
518
146
    switch (imm) {
519
17
      ALU_CASE_REG(ADD);
520
26
      ALU_CASE_REG(OR);
521
27
      ALU_CASE_REG(AND);
522
16
      ALU_CASE_REG(XOR);
523
15
      ALU_CASE_FETCH(ADD);
524
17
      ALU_CASE_FETCH(OR);
525
17
      ALU_CASE_FETCH(AND);
526
4
      ALU_CASE_FETCH(XOR);
527
7
      COMPLEX_CASE(XCHG);
528
0
      COMPLEX_CASE(CMPXCHG);
529
0
    default: // Reached if complex atomic operation is used without fetch modifier
530
0
      return BPF_INS_INVALID;
531
146
    }
532
146
  }
533
534
  /* should be BPF_MEM */
535
487
  switch (BPF_SIZE(opcode)) {
536
235
    CASE(W);
537
52
    CASE(H);
538
167
    CASE(B);
539
33
    CASE(DW);
540
487
  }
541
542
0
  return BPF_INS_INVALID;
543
487
}
544
#undef CASE
545
546
#define CASE(c) \
547
1.14k
  case BPF_ALU_##c: \
548
1.14k
    CASE_IF(c)
549
550
#define CASE_IF(c) \
551
1.41k
  do { \
552
1.41k
    if (BPF_CLASS(opcode) == BPF_CLASS_ALU) \
553
1.41k
      return BPF_INS_##c; \
554
1.41k
    else \
555
1.41k
      return BPF_INS_##c##64; \
556
1.41k
  } while (0)
557
558
static bpf_insn op2insn_alu(unsigned opcode, const uint16_t off,
559
          const bool is_ebpf)
560
1.59k
{
561
  /* Endian is a special case */
562
1.59k
  if (BPF_OP(opcode) == BPF_ALU_END) {
563
168
    if (BPF_CLASS(opcode) == BPF_CLASS_ALU64) {
564
28
      switch (opcode ^ BPF_CLASS_ALU64 ^ BPF_ALU_END ^
565
28
        BPF_SRC_LITTLE) {
566
13
      case (16 << 4):
567
13
        return BPF_INS_BSWAP16;
568
0
      case (32 << 4):
569
0
        return BPF_INS_BSWAP32;
570
15
      case (64 << 4):
571
15
        return BPF_INS_BSWAP64;
572
0
      default:
573
0
        return BPF_INS_INVALID;
574
28
      }
575
28
    }
576
577
140
    switch (opcode ^ BPF_CLASS_ALU ^ BPF_ALU_END) {
578
7
    case BPF_SRC_LITTLE | (16 << 4):
579
7
      return BPF_INS_LE16;
580
4
    case BPF_SRC_LITTLE | (32 << 4):
581
4
      return BPF_INS_LE32;
582
10
    case BPF_SRC_LITTLE | (64 << 4):
583
10
      return BPF_INS_LE64;
584
33
    case BPF_SRC_BIG | (16 << 4):
585
33
      return BPF_INS_BE16;
586
77
    case BPF_SRC_BIG | (32 << 4):
587
77
      return BPF_INS_BE32;
588
9
    case BPF_SRC_BIG | (64 << 4):
589
9
      return BPF_INS_BE64;
590
140
    }
591
0
    return BPF_INS_INVALID;
592
140
  }
593
594
1.42k
  switch (BPF_OP(opcode)) {
595
33
    CASE(ADD);
596
56
    CASE(SUB);
597
150
    CASE(MUL);
598
58
    CASE(OR);
599
129
    CASE(AND);
600
78
    CASE(LSH);
601
52
    CASE(RSH);
602
79
    CASE(NEG);
603
198
    CASE(XOR);
604
313
    CASE(ARSH);
605
166
  case BPF_ALU_DIV:
606
166
    if (!is_ebpf || off == 0)
607
145
      CASE_IF(DIV);
608
21
    else if (off == 1)
609
18
      CASE_IF(SDIV);
610
3
    else
611
3
      return BPF_INS_INVALID;
612
45
  case BPF_ALU_MOD:
613
45
    if (!is_ebpf || off == 0)
614
15
      CASE_IF(MOD);
615
30
    else if (off == 1)
616
27
      CASE_IF(SMOD);
617
3
    else
618
3
      return BPF_INS_INVALID;
619
72
  case BPF_ALU_MOV:
620
    /* BPF_CLASS_ALU can have: mov, mov8s, mov16s
621
     * BPF_CLASS_ALU64 can have: mov, mov8s, mov16s, mov32s
622
     * */
623
72
    if (off == 0)
624
13
      CASE_IF(MOV);
625
59
    else if (off == 8)
626
39
      CASE_IF(MOVSB);
627
20
    else if (off == 16)
628
15
      CASE_IF(MOVSH);
629
5
    else if (off == 32 && BPF_CLASS(opcode) == BPF_CLASS_ALU64)
630
3
      return BPF_INS_MOVSW64;
631
2
    else
632
2
      return BPF_INS_INVALID;
633
1.42k
  }
634
635
0
  return BPF_INS_INVALID;
636
1.42k
}
637
#undef CASE_IF
638
#undef CASE
639
640
1.83k
#define BPF_CALLX (BPF_CLASS_JMP | BPF_JUMP_CALL | BPF_SRC_X)
641
642
#define CASE(c) \
643
1.45k
  case BPF_JUMP_##c: \
644
1.45k
    if (BPF_CLASS(opcode) == BPF_CLASS_JMP) \
645
1.45k
      return BPF_INS_##c; \
646
1.45k
    else \
647
1.45k
      return BPF_INS_##c##32;
648
649
#define SPEC_CASE(c) \
650
75
  case BPF_JUMP_##c: \
651
75
    if (BPF_CLASS(opcode) == BPF_CLASS_JMP) \
652
75
      return BPF_INS_##c; \
653
75
    else \
654
75
      return BPF_INS_INVALID;
655
656
static bpf_insn op2insn_jmp(unsigned opcode)
657
1.83k
{
658
1.83k
  if (opcode == BPF_CALLX) {
659
24
    return BPF_INS_CALLX;
660
24
  }
661
662
1.81k
  switch (BPF_OP(opcode)) {
663
285
  case BPF_JUMP_JA:
664
285
    if (BPF_CLASS(opcode) == BPF_CLASS_JMP)
665
267
      return BPF_INS_JA;
666
18
    else
667
18
      return BPF_INS_JAL;
668
346
    CASE(JEQ);
669
83
    CASE(JGT);
670
159
    CASE(JGE);
671
186
    CASE(JSET);
672
147
    CASE(JNE);
673
244
    CASE(JSGT);
674
56
    CASE(JSGE);
675
70
    SPEC_CASE(CALL);
676
5
    SPEC_CASE(EXIT);
677
40
    CASE(JLT);
678
104
    CASE(JLE);
679
32
    CASE(JSLT);
680
58
    CASE(JSLE);
681
1.81k
  }
682
683
0
  return BPF_INS_INVALID;
684
1.81k
}
685
#undef SPEC_CASE
686
#undef CASE
687
#undef BPF_CALLX
688
689
#ifndef CAPSTONE_DIET
690
691
static void update_regs_access(MCInst *MI, cs_detail *detail, bpf_insn insn_id,
692
             unsigned int opcode)
693
6.36k
{
694
6.36k
  if (insn_id == BPF_INS_INVALID)
695
0
    return;
696
  /*
697
   * In eBPF mode, only these instructions have implicit registers access:
698
   * - legacy ld{w,h,b,dw} * // w: r0
699
   * - exit // r: r0
700
   */
701
6.36k
  if (EBPF_MODE(MI->csh->mode)) {
702
3.99k
    switch (insn_id) {
703
3.47k
    default:
704
3.47k
      break;
705
3.47k
    case BPF_INS_LDABSW:
706
267
    case BPF_INS_LDABSH:
707
429
    case BPF_INS_LDABSB:
708
447
    case BPF_INS_LDINDW:
709
477
    case BPF_INS_LDINDH:
710
478
    case BPF_INS_LDINDB:
711
517
    case BPF_INS_LDDW:
712
517
      if (BPF_MODE(opcode) == BPF_MODE_ABS ||
713
64
          BPF_MODE(opcode) == BPF_MODE_IND)
714
510
        map_add_implicit_write(MI, BPF_REG_R0);
715
517
      break;
716
5
    case BPF_INS_EXIT:
717
5
      map_add_implicit_read(MI, BPF_REG_R0);
718
5
      break;
719
3.99k
    }
720
3.99k
    return;
721
3.99k
  }
722
723
  /* cBPF mode */
724
2.37k
  switch (BPF_CLASS(opcode)) {
725
465
  default:
726
465
    break;
727
692
  case BPF_CLASS_LD:
728
692
    map_add_implicit_write(MI, BPF_REG_A);
729
692
    break;
730
187
  case BPF_CLASS_LDX:
731
187
    map_add_implicit_write(MI, BPF_REG_X);
732
187
    break;
733
1
  case BPF_CLASS_ST:
734
1
    map_add_implicit_read(MI, BPF_REG_A);
735
1
    break;
736
15
  case BPF_CLASS_STX:
737
15
    map_add_implicit_read(MI, BPF_REG_X);
738
15
    break;
739
460
  case BPF_CLASS_ALU:
740
460
    map_add_implicit_read(MI, BPF_REG_A);
741
460
    map_add_implicit_write(MI, BPF_REG_A);
742
460
    break;
743
483
  case BPF_CLASS_JMP:
744
483
    if (insn_id != BPF_INS_JA) // except the unconditional jump
745
354
      map_add_implicit_read(MI, BPF_REG_A);
746
483
    break;
747
  /* case BPF_CLASS_RET: */
748
70
  case BPF_CLASS_MISC:
749
70
    if (insn_id == BPF_INS_TAX) {
750
41
      map_add_implicit_read(MI, BPF_REG_A);
751
41
      map_add_implicit_write(MI, BPF_REG_X);
752
41
    } else {
753
29
      map_add_implicit_read(MI, BPF_REG_X);
754
29
      map_add_implicit_write(MI, BPF_REG_A);
755
29
    }
756
70
    break;
757
2.37k
  }
758
2.37k
}
759
#endif
760
761
static bool setFinalOpcode(MCInst *MI, const bpf_internal *bpf)
762
6.37k
{
763
6.37k
  bpf_insn id = BPF_INS_INVALID;
764
6.37k
#ifndef CAPSTONE_DIET
765
6.37k
  cs_detail *detail;
766
767
6.37k
  detail = get_detail(MI);
768
6.37k
#endif
769
770
6.37k
  const uint16_t opcode = bpf->op;
771
6.37k
  switch (BPF_CLASS(opcode)) {
772
0
  default: // will never happen
773
0
    break;
774
1.20k
  case BPF_CLASS_LD:
775
1.77k
  case BPF_CLASS_LDX:
776
1.77k
    if (EBPF_MODE(MI->csh->mode))
777
893
      id = op2insn_ld_ebpf(opcode);
778
879
    else
779
879
      id = op2insn_ld_cbpf(opcode);
780
1.77k
    add_group(MI, BPF_GRP_LOAD);
781
1.77k
    break;
782
260
  case BPF_CLASS_ST:
783
633
  case BPF_CLASS_STX:
784
633
    id = op2insn_st(opcode, bpf->k);
785
633
    add_group(MI, BPF_GRP_STORE);
786
633
    break;
787
890
  case BPF_CLASS_ALU:
788
890
    id = op2insn_alu(opcode, bpf->offset, EBPF_MODE(MI->csh->mode));
789
890
    add_group(MI, BPF_GRP_ALU);
790
890
    break;
791
1.19k
  case BPF_CLASS_JMP:
792
1.19k
    id = op2insn_jmp(opcode);
793
1.19k
#ifndef CAPSTONE_DIET
794
1.19k
    if (id == BPF_INS_CALL || id == BPF_INS_CALLX)
795
94
      add_group(MI, BPF_GRP_CALL);
796
1.10k
    else if (id == BPF_INS_EXIT)
797
5
      add_group(MI, BPF_GRP_RETURN);
798
1.10k
    else
799
1.10k
      add_group(MI, BPF_GRP_JUMP);
800
1.19k
#endif
801
1.19k
    break;
802
1.10k
  case BPF_CLASS_RET:
803
    /* case BPF_CLASS_JMP32: */
804
1.10k
    if (EBPF_MODE(MI->csh->mode)) {
805
640
      id = op2insn_jmp(opcode);
806
640
      add_group(MI, BPF_GRP_JUMP);
807
640
    } else {
808
465
      id = BPF_INS_RET;
809
465
      add_group(MI, BPF_GRP_RETURN);
810
465
    }
811
1.10k
    break;
812
  // BPF_CLASS_MISC and BPF_CLASS_ALU64 have exactly same value
813
777
  case BPF_CLASS_MISC:
814
    /* case BPF_CLASS_ALU64: */
815
777
    if (EBPF_MODE(MI->csh->mode)) {
816
      // ALU64 in eBPF
817
707
      id = op2insn_alu(opcode, bpf->offset, true);
818
707
      add_group(MI, BPF_GRP_ALU);
819
707
    } else {
820
70
      if (BPF_MISCOP(opcode) == BPF_MISCOP_TXA)
821
29
        id = BPF_INS_TXA;
822
41
      else
823
41
        id = BPF_INS_TAX;
824
70
      add_group(MI, BPF_GRP_MISC);
825
70
    }
826
777
    break;
827
6.37k
  }
828
829
6.37k
  if (id == BPF_INS_INVALID)
830
8
    return false;
831
832
6.36k
  MCInst_setOpcodePub(MI, id);
833
6.36k
#undef PUSH_GROUP
834
835
6.36k
#ifndef CAPSTONE_DIET
836
6.36k
  if (detail) {
837
6.36k
    update_regs_access(MI, detail, id, opcode);
838
6.36k
  }
839
6.36k
#endif
840
6.36k
  return true;
841
6.37k
}
842
843
bool BPF_getInstruction(csh ud, const uint8_t *code, size_t code_len,
844
      MCInst *instr, uint16_t *size, uint64_t address,
845
      void *info)
846
6.54k
{
847
6.54k
  bpf_internal *bpf;
848
849
6.54k
  if (EBPF_MODE(instr->csh->mode))
850
4.12k
    bpf = fetch_ebpf(instr, code, code_len);
851
2.41k
  else
852
2.41k
    bpf = fetch_cbpf(instr, code, code_len);
853
6.54k
  if (bpf == NULL)
854
98
    return false;
855
6.44k
  if (!getInstruction(instr, bpf) || !setFinalOpcode(instr, bpf)) {
856
76
    cs_mem_free(bpf);
857
76
    return false;
858
76
  }
859
6.36k
  MCInst_setOpcode(instr, bpf->op);
860
861
6.36k
  *size = bpf->insn_size;
862
6.36k
  cs_mem_free(bpf);
863
864
  return true;
865
6.44k
}
866
867
#endif