Coverage Report

Created: 2025-11-09 07:00

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/capstonenext/arch/BPF/BPFDisassembler.c
Line
Count
Source
1
/* Capstone Disassembly Engine */
2
/* BPF Backend by david942j <david942j@gmail.com>, 2019 */
3
/* SPDX-FileCopyrightText: 2024 Roee Toledano <roeetoledano10@gmail.com> */
4
/* SPDX-License-Identifier: BSD-3 */
5
6
#ifdef CAPSTONE_HAS_BPF
7
8
#include <string.h>
9
#include <stddef.h> // offsetof macro
10
11
#include "BPFConstants.h"
12
#include "BPFDisassembler.h"
13
#include "BPFMapping.h"
14
#include "../../Mapping.h"
15
#include "../../cs_priv.h"
16
#include "../../utils.h"
17
18
///< Malloc bpf_internal, also checks if code_len is large enough.
19
static bpf_internal *alloc_bpf_internal(const size_t code_len)
20
16.9k
{
21
16.9k
  bpf_internal *bpf;
22
23
16.9k
  if (code_len < 8)
24
213
    return NULL;
25
16.7k
  bpf = cs_mem_malloc(sizeof(bpf_internal));
26
16.7k
  if (bpf == NULL)
27
0
    return NULL;
28
  /* default value */
29
16.7k
  bpf->insn_size = 8;
30
16.7k
  return bpf;
31
16.7k
}
32
33
///< Fetch a cBPF structure from code
34
static bpf_internal *fetch_cbpf(MCInst *instr, const uint8_t *code,
35
        const size_t code_len)
36
6.73k
{
37
6.73k
  bpf_internal *bpf;
38
39
6.73k
  bpf = alloc_bpf_internal(code_len);
40
6.73k
  if (bpf == NULL)
41
81
    return NULL;
42
43
6.65k
  bpf->op = readBytes16(instr, code);
44
6.65k
  bpf->jt = code[2];
45
6.65k
  bpf->jf = code[3];
46
6.65k
  bpf->k = readBytes32(instr, code + 4);
47
6.65k
  return bpf;
48
6.73k
}
49
50
///< Fetch an eBPF structure from code
51
static bpf_internal *fetch_ebpf(MCInst *instr, const uint8_t *code,
52
        const size_t code_len)
53
10.1k
{
54
10.1k
  bpf_internal *bpf;
55
56
10.1k
  bpf = alloc_bpf_internal(code_len);
57
10.1k
  if (bpf == NULL)
58
132
    return NULL;
59
60
10.0k
  bpf->op = (uint16_t)code[0];
61
10.0k
  bpf->dst = code[1] & 0xf;
62
10.0k
  bpf->src = (code[1] & 0xf0) >> 4;
63
64
  // eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM,
65
  // in this case imm is combined with the next block's imm.
66
10.0k
  if (bpf->op == (BPF_CLASS_LD | BPF_SIZE_DW | BPF_MODE_IMM)) {
67
402
    if (code_len < 16) {
68
1
      cs_mem_free(bpf);
69
1
      return NULL;
70
1
    }
71
401
    bpf->k = readBytes32(instr, code + 4) |
72
401
       (((uint64_t)readBytes32(instr, code + 12)) << 32);
73
401
    bpf->insn_size = 16;
74
9.66k
  } else {
75
9.66k
    bpf->offset = readBytes16(instr, code + 2);
76
9.66k
    bpf->k = readBytes32(instr, code + 4);
77
9.66k
  }
78
10.0k
  return bpf;
79
10.0k
}
80
81
#define CHECK_READABLE_REG(ud, reg) \
82
3.97k
  do { \
83
3.97k
    if (!((reg) >= BPF_REG_R0 && (reg) <= BPF_REG_R10)) \
84
3.97k
      return false; \
85
3.97k
  } while (0)
86
87
#define CHECK_WRITEABLE_REG(ud, reg) \
88
1.52k
  do { \
89
1.52k
    if (!((reg) >= BPF_REG_R0 && (reg) < BPF_REG_R10)) \
90
1.52k
      return false; \
91
1.52k
  } while (0)
92
93
#define CHECK_READABLE_AND_PUSH(ud, MI, r) \
94
3.97k
  do { \
95
3.97k
    CHECK_READABLE_REG(ud, r + BPF_REG_R0); \
96
3.97k
    MCOperand_CreateReg0(MI, r + BPF_REG_R0); \
97
3.96k
  } while (0)
98
99
#define CHECK_WRITABLE_AND_PUSH(ud, MI, r) \
100
1.52k
  do { \
101
1.52k
    CHECK_WRITEABLE_REG(ud, r + BPF_REG_R0); \
102
1.52k
    MCOperand_CreateReg0(MI, r + BPF_REG_R0); \
103
1.51k
  } while (0)
104
105
static bool decodeLoad(MCInst *MI, bpf_internal *bpf)
106
5.11k
{
107
5.11k
  if (!EBPF_MODE(MI->csh->mode)) {
108
    /*
109
     *  +-----+-----------+--------------------+
110
     *  | ldb |    [k]    |       [x+k]        |
111
     *  | ldh |    [k]    |       [x+k]        |
112
     *  +-----+-----------+--------------------+
113
     */
114
2.66k
    if (BPF_SIZE(bpf->op) == BPF_SIZE_DW)
115
3
      return false;
116
2.66k
    if (BPF_SIZE(bpf->op) == BPF_SIZE_B ||
117
2.41k
        BPF_SIZE(bpf->op) == BPF_SIZE_H) {
118
      /* no ldx */
119
625
      if (BPF_CLASS(bpf->op) != BPF_CLASS_LD)
120
2
        return false;
121
      /* can only be BPF_ABS and BPF_IND */
122
623
      if (BPF_MODE(bpf->op) == BPF_MODE_ABS) {
123
387
        MCOperand_CreateImm0(MI, bpf->k);
124
387
        return true;
125
387
      } else if (BPF_MODE(bpf->op) == BPF_MODE_IND) {
126
233
        MCOperand_CreateReg0(MI, BPF_REG_X);
127
233
        MCOperand_CreateImm0(MI, bpf->k);
128
233
        return true;
129
233
      }
130
3
      return false;
131
623
    }
132
    /*
133
     *  +-----+----+------+------+-----+-------+
134
     *  | ld  | #k | #len | M[k] | [k] | [x+k] |
135
     *  +-----+----+------+------+-----+-------+
136
     *  | ldx | #k | #len | M[k] | 4*([k]&0xf) |
137
     *  +-----+----+------+------+-------------+
138
     */
139
2.03k
    switch (BPF_MODE(bpf->op)) {
140
944
    default:
141
944
      break;
142
944
    case BPF_MODE_IMM:
143
587
      MCOperand_CreateImm0(MI, bpf->k);
144
587
      return true;
145
277
    case BPF_MODE_LEN:
146
277
      return true;
147
227
    case BPF_MODE_MEM:
148
227
      MCOperand_CreateImm0(MI, bpf->k);
149
227
      return true;
150
2.03k
    }
151
944
    if (BPF_CLASS(bpf->op) == BPF_CLASS_LD) {
152
595
      if (BPF_MODE(bpf->op) == BPF_MODE_ABS) {
153
372
        MCOperand_CreateImm0(MI, bpf->k);
154
372
        return true;
155
372
      } else if (BPF_MODE(bpf->op) == BPF_MODE_IND) {
156
219
        MCOperand_CreateReg0(MI, BPF_REG_X);
157
219
        MCOperand_CreateImm0(MI, bpf->k);
158
219
        return true;
159
219
      }
160
595
    } else { /* LDX */
161
349
      if (BPF_MODE(bpf->op) == BPF_MODE_MSH) {
162
345
        MCOperand_CreateImm0(MI, bpf->k);
163
345
        return true;
164
345
      }
165
349
    }
166
8
    return false;
167
944
  }
168
169
  /* eBPF mode */
170
  /*
171
   * - IMM: lddw dst, imm64
172
   * - ABS: ld{w,h,b} [k]
173
   * - IND: ld{w,h,b} [src]
174
   * - MEM: ldx{w,h,b,dw} dst, [src+off]
175
   */
176
2.45k
  if (BPF_CLASS(bpf->op) == BPF_CLASS_LD) {
177
1.85k
    switch (BPF_MODE(bpf->op)) {
178
407
    case BPF_MODE_IMM:
179
407
      if (bpf->op !=
180
407
          (BPF_CLASS_LD | BPF_SIZE_DW | BPF_MODE_IMM))
181
6
        return false;
182
401
      CHECK_WRITABLE_AND_PUSH(ud, MI, bpf->dst);
183
398
      MCOperand_CreateImm0(MI, bpf->k);
184
398
      return true;
185
706
    case BPF_MODE_ABS:
186
706
      MCOperand_CreateImm0(MI, bpf->k);
187
706
      return true;
188
739
    case BPF_MODE_IND:
189
739
      CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
190
737
      return true;
191
1.85k
    }
192
3
    return false;
193
1.85k
  }
194
  /* LDX */
195
600
  if (BPF_MODE(bpf->op) == BPF_MODE_MEM) {
196
594
    CHECK_WRITABLE_AND_PUSH(ud, MI, bpf->dst);
197
593
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
198
592
    MCOperand_CreateImm0(MI, bpf->offset);
199
592
    return true;
200
593
  }
201
6
  return false;
202
600
}
203
204
static bool decodeStore(MCInst *MI, bpf_internal *bpf)
205
1.75k
{
206
  /* in cBPF, only BPF_ST* | BPF_MEM | BPF_W is valid
207
   * while in eBPF:
208
   * - BPF_STX | BPF_XADD | BPF_{W,DW}
209
   * - BPF_ST* | BPF_MEM | BPF_{W,H,B,DW}
210
   * are valid
211
   */
212
1.75k
  if (!EBPF_MODE(MI->csh->mode)) {
213
    /* can only store to M[] */
214
280
    if (bpf->op != (BPF_CLASS(bpf->op) | BPF_MODE_MEM | BPF_SIZE_W))
215
5
      return false;
216
275
    MCOperand_CreateImm0(MI, bpf->k);
217
275
    return true;
218
280
  }
219
220
  /* eBPF */
221
1.47k
  if (BPF_MODE(bpf->op) == BPF_MODE_ATOMIC) {
222
350
    if (BPF_CLASS(bpf->op) != BPF_CLASS_STX)
223
1
      return false;
224
349
    if (BPF_SIZE(bpf->op) != BPF_SIZE_W &&
225
87
        BPF_SIZE(bpf->op) != BPF_SIZE_DW)
226
1
      return false;
227
    /* xadd [dst + off], src */
228
348
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->dst);
229
346
    MCOperand_CreateImm0(MI, bpf->offset);
230
346
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
231
344
    return true;
232
346
  }
233
234
1.12k
  if (BPF_MODE(bpf->op) != BPF_MODE_MEM)
235
8
    return false;
236
237
  /* st [dst + off], src */
238
1.11k
  CHECK_READABLE_AND_PUSH(ud, MI, bpf->dst);
239
1.11k
  MCOperand_CreateImm0(MI, bpf->offset);
240
1.11k
  if (BPF_CLASS(bpf->op) == BPF_CLASS_ST)
241
608
    MCOperand_CreateImm0(MI, bpf->k);
242
510
  else
243
510
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
244
1.11k
  return true;
245
1.11k
}
246
247
static bool decodeALU(MCInst *MI, bpf_internal *bpf)
248
736
{
249
  /* Set MI->Operands */
250
251
  /* cBPF */
252
736
  if (!EBPF_MODE(MI->csh->mode)) {
253
203
    if (BPF_OP(bpf->op) > BPF_ALU_XOR)
254
1
      return false;
255
    /* cBPF's NEG has no operands */
256
202
    if (BPF_OP(bpf->op) == BPF_ALU_NEG)
257
1
      return true;
258
201
    if (BPF_SRC(bpf->op) == BPF_SRC_K)
259
57
      MCOperand_CreateImm0(MI, bpf->k);
260
144
    else /* BPF_SRC_X */
261
144
      MCOperand_CreateReg0(MI, BPF_REG_X);
262
201
    return true;
263
202
  }
264
265
  /* eBPF */
266
267
533
  if (BPF_OP(bpf->op) > BPF_ALU_END)
268
1
    return false;
269
  /* ENDian's imm must be one of 16, 32, 64 */
270
532
  if (BPF_OP(bpf->op) == BPF_ALU_END) {
271
52
    if (bpf->k != 16 && bpf->k != 32 && bpf->k != 64)
272
6
      return false;
273
46
    if (BPF_CLASS(bpf->op) == BPF_CLASS_ALU64 &&
274
3
        BPF_SRC(bpf->op) != BPF_SRC_LITTLE)
275
0
      return false;
276
46
  }
277
278
  /* - op dst, imm
279
   * - op dst, src
280
   * - neg dst
281
   * - le<imm> dst
282
   */
283
  /* every ALU instructions have dst op */
284
526
  CHECK_WRITABLE_AND_PUSH(ud, MI, bpf->dst);
285
286
  /* special cases */
287
526
  if (BPF_OP(bpf->op) == BPF_ALU_NEG)
288
37
    return true;
289
489
  if (BPF_OP(bpf->op) == BPF_ALU_END) {
290
    /* bpf->k must be one of 16, 32, 64 */
291
46
    bpf->op |= ((uint32_t)bpf->k << 4);
292
46
    return true;
293
46
  }
294
295
  /* normal cases */
296
443
  if (BPF_SRC(bpf->op) == BPF_SRC_K) {
297
409
    MCOperand_CreateImm0(MI, bpf->k);
298
409
  } else { /* BPF_SRC_X */
299
34
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
300
34
  }
301
443
  return true;
302
443
}
303
304
static bool decodeJump(MCInst *MI, bpf_internal *bpf)
305
512
{
306
  /* cBPF and eBPF are very different in class jump */
307
512
  if (!EBPF_MODE(MI->csh->mode)) {
308
70
    if (BPF_OP(bpf->op) > BPF_JUMP_JSET)
309
1
      return false;
310
311
    /* ja is a special case of jumps */
312
69
    if (BPF_OP(bpf->op) == BPF_JUMP_JA) {
313
18
      MCOperand_CreateImm0(MI, bpf->k);
314
18
      return true;
315
18
    }
316
317
51
    if (BPF_SRC(bpf->op) == BPF_SRC_K)
318
20
      MCOperand_CreateImm0(MI, bpf->k);
319
31
    else /* BPF_SRC_X */
320
31
      MCOperand_CreateReg0(MI, BPF_REG_X);
321
51
    MCOperand_CreateImm0(MI, bpf->jt);
322
51
    MCOperand_CreateImm0(MI, bpf->jf);
323
442
  } else {
324
442
    if (BPF_OP(bpf->op) > BPF_JUMP_JSLE)
325
0
      return false;
326
327
    /* JMP32 has no CALL/EXIT instruction */
328
    /* No operands for exit */
329
442
    if (BPF_OP(bpf->op) == BPF_JUMP_EXIT)
330
6
      return bpf->op == (BPF_CLASS_JMP | BPF_JUMP_EXIT);
331
436
    if (BPF_OP(bpf->op) == BPF_JUMP_CALL) {
332
149
      if (bpf->op == (BPF_CLASS_JMP | BPF_JUMP_CALL)) {
333
147
        MCOperand_CreateImm0(MI, bpf->k);
334
147
        return true;
335
147
      }
336
2
      if (bpf->op ==
337
2
          (BPF_CLASS_JMP | BPF_JUMP_CALL | BPF_SRC_X)) {
338
2
        CHECK_READABLE_AND_PUSH(ud, MI, bpf->k);
339
0
        return true;
340
2
      }
341
0
      return false;
342
2
    }
343
344
    /* ja is a special case of jumps */
345
287
    if (BPF_OP(bpf->op) == BPF_JUMP_JA) {
346
6
      if (BPF_SRC(bpf->op) != BPF_SRC_K)
347
1
        return false;
348
5
      if (BPF_CLASS(bpf->op) == BPF_CLASS_JMP)
349
1
        MCOperand_CreateImm0(MI, bpf->offset);
350
4
      else
351
4
        MCOperand_CreateImm0(MI, bpf->k);
352
353
5
      return true;
354
6
    }
355
356
    /* <j>  dst, src, +off */
357
281
    CHECK_READABLE_AND_PUSH(ud, MI, bpf->dst);
358
281
    if (BPF_SRC(bpf->op) == BPF_SRC_K)
359
275
      MCOperand_CreateImm0(MI, bpf->k);
360
6
    else
361
6
      CHECK_READABLE_AND_PUSH(ud, MI, bpf->src);
362
281
    MCOperand_CreateImm0(MI, bpf->offset);
363
281
  }
364
332
  return true;
365
512
}
366
367
static bool decodeReturn(MCInst *MI, bpf_internal *bpf)
368
650
{
369
  /* Here only handles the BPF_RET class in cBPF */
370
650
  switch (BPF_RVAL(bpf->op)) {
371
217
  case BPF_SRC_K:
372
217
    MCOperand_CreateImm0(MI, bpf->k);
373
217
    return true;
374
223
  case BPF_SRC_X:
375
223
    MCOperand_CreateReg0(MI, BPF_REG_X);
376
223
    return true;
377
207
  case BPF_SRC_A:
378
207
    MCOperand_CreateReg0(MI, BPF_REG_A);
379
207
    return true;
380
650
  }
381
3
  return false;
382
650
}
383
384
static bool decodeMISC(MCInst *MI, bpf_internal *bpf)
385
322
{
386
322
  uint16_t op = bpf->op ^ BPF_CLASS_MISC;
387
322
  return op == BPF_MISCOP_TAX || op == BPF_MISCOP_TXA;
388
322
}
389
390
///< 1. Check if the instruction is valid
391
///< 2. Set MI->opcode
392
///< 3. Set MI->Operands
393
static bool getInstruction(MCInst *MI, bpf_internal *bpf)
394
2.21k
{
395
2.21k
  cs_detail *detail;
396
397
2.21k
  detail = MI->flat_insn->detail;
398
  // initialize detail
399
2.21k
  if (detail) {
400
2.21k
    memset(detail, 0, offsetof(cs_detail, bpf) + sizeof(cs_bpf));
401
2.21k
  }
402
403
2.21k
  MCInst_clear(MI);
404
405
2.21k
  switch (BPF_CLASS(bpf->op)) {
406
0
  default: /* should never happen */
407
0
    return false;
408
492
  case BPF_CLASS_LD:
409
804
  case BPF_CLASS_LDX:
410
804
    return decodeLoad(MI, bpf);
411
27
  case BPF_CLASS_ST:
412
106
  case BPF_CLASS_STX:
413
106
    return decodeStore(MI, bpf);
414
362
  case BPF_CLASS_ALU:
415
362
    return decodeALU(MI, bpf);
416
466
  case BPF_CLASS_JMP:
417
466
    return decodeJump(MI, bpf);
418
93
  case BPF_CLASS_RET:
419
    /* case BPF_CLASS_JMP32: */
420
93
    if (EBPF_MODE(MI->csh->mode))
421
46
      return decodeJump(MI, bpf);
422
47
    else
423
47
      return decodeReturn(MI, bpf);
424
387
  case BPF_CLASS_MISC:
425
    /* case BPF_CLASS_ALU64: */
426
387
    if (EBPF_MODE(MI->csh->mode))
427
374
      return decodeALU(MI, bpf);
428
13
    else
429
13
      return decodeMISC(MI, bpf);
430
2.21k
  }
431
2.21k
}
432
433
// Check for regular load instructions
434
#define REG_LOAD_CASE(c) \
435
406
  case BPF_SIZE_##c: \
436
406
    if (BPF_CLASS(opcode) == BPF_CLASS_LD) \
437
406
      return BPF_INS_LD##c; \
438
406
    else \
439
406
      return BPF_INS_LDX##c;
440
441
static bpf_insn op2insn_ld_cbpf(unsigned opcode)
442
406
{
443
406
  switch (BPF_SIZE(opcode)) {
444
237
    REG_LOAD_CASE(W);
445
152
    REG_LOAD_CASE(H);
446
7
    REG_LOAD_CASE(B);
447
10
    REG_LOAD_CASE(DW);
448
406
  }
449
450
0
  return BPF_INS_INVALID;
451
406
}
452
#undef REG_LOAD_CASE
453
454
// Check for packet load instructions
455
#define PACKET_LOAD_CASE(c) \
456
384
  case BPF_SIZE_##c: \
457
384
    if (BPF_MODE(opcode) == BPF_MODE_ABS) \
458
384
      return BPF_INS_LDABS##c; \
459
384
    else if (BPF_MODE(opcode) == BPF_MODE_IND) \
460
356
      return BPF_INS_LDIND##c; \
461
356
    else \
462
356
      return BPF_INS_INVALID;
463
464
static bpf_insn op2insn_ld_ebpf(unsigned opcode)
465
526
{
466
526
  if (BPF_CLASS(opcode) == BPF_CLASS_LD) {
467
394
    switch (BPF_SIZE(opcode)) {
468
232
      PACKET_LOAD_CASE(W);
469
136
      PACKET_LOAD_CASE(H);
470
16
      PACKET_LOAD_CASE(B);
471
394
    }
472
394
  }
473
474
  // If it's not a packet load instruction, it must be a regular load instruction
475
142
  return op2insn_ld_cbpf(opcode);
476
526
}
477
#undef PACKET_LOAD_CASE
478
479
/* During parsing we already checked to make sure the size is D/DW and 
480
 * mode is STX and not ST, so we don't need to check again*/
481
#define ALU_CASE_REG(c) \
482
33
  case BPF_ALU_##c: \
483
33
    if (BPF_SIZE(opcode) == BPF_SIZE_W) \
484
33
      return BPF_INS_A##c; \
485
33
    else \
486
33
      return BPF_INS_A##c##64;
487
488
#define ALU_CASE_FETCH(c) \
489
7
  case BPF_ALU_##c | BPF_MODE_FETCH: \
490
7
    if (BPF_SIZE(opcode) == BPF_SIZE_W) \
491
7
      return BPF_INS_AF##c; \
492
7
    else \
493
7
      return BPF_INS_AF##c##64;
494
495
#define COMPLEX_CASE(c) \
496
5
  case BPF_ATOMIC_##c | BPF_MODE_FETCH: \
497
5
    if (BPF_SIZE(opcode) == BPF_SIZE_DW) \
498
5
      return BPF_INS_A##c##64; \
499
5
    else \
500
5
      return BPF_INS_INVALID;
501
502
#define CASE(c) \
503
54
  case BPF_SIZE_##c: \
504
54
    if (BPF_CLASS(opcode) == BPF_CLASS_ST) \
505
54
      return BPF_INS_ST##c; \
506
54
    else \
507
54
      return BPF_INS_STX##c;
508
509
static bpf_insn op2insn_st(unsigned opcode, const uint32_t imm)
510
99
{
511
  /*
512
   * - BPF_STX | ALU atomic operations | BPF_{W,DW}
513
   * - BPF_STX | Complex atomic operations | BPF_{DW}
514
   * - BPF_ST* | BPF_MEM | BPF_{W,H,B,DW}
515
   */
516
517
99
  if (BPF_MODE(opcode) == BPF_MODE_ATOMIC) {
518
45
    switch (imm) {
519
4
      ALU_CASE_REG(ADD);
520
8
      ALU_CASE_REG(OR);
521
17
      ALU_CASE_REG(AND);
522
4
      ALU_CASE_REG(XOR);
523
0
      ALU_CASE_FETCH(ADD);
524
1
      ALU_CASE_FETCH(OR);
525
6
      ALU_CASE_FETCH(AND);
526
0
      ALU_CASE_FETCH(XOR);
527
0
      COMPLEX_CASE(XCHG);
528
5
      COMPLEX_CASE(CMPXCHG);
529
0
    default: // Reached if complex atomic operation is used without fetch modifier
530
0
      return BPF_INS_INVALID;
531
45
    }
532
45
  }
533
534
  /* should be BPF_MEM */
535
54
  switch (BPF_SIZE(opcode)) {
536
3
    CASE(W);
537
28
    CASE(H);
538
20
    CASE(B);
539
3
    CASE(DW);
540
54
  }
541
542
0
  return BPF_INS_INVALID;
543
54
}
544
#undef CASE
545
546
#define CASE(c) \
547
474
  case BPF_ALU_##c: \
548
474
    CASE_IF(c)
549
550
#define CASE_IF(c) \
551
676
  do { \
552
676
    if (BPF_CLASS(opcode) == BPF_CLASS_ALU) \
553
676
      return BPF_INS_##c; \
554
676
    else \
555
676
      return BPF_INS_##c##64; \
556
676
  } while (0)
557
558
static bpf_insn op2insn_alu(unsigned opcode, const uint16_t off,
559
          const bool is_ebpf)
560
728
{
561
  /* Endian is a special case */
562
728
  if (BPF_OP(opcode) == BPF_ALU_END) {
563
46
    if (BPF_CLASS(opcode) == BPF_CLASS_ALU64) {
564
3
      switch (opcode ^ BPF_CLASS_ALU64 ^ BPF_ALU_END ^
565
3
        BPF_SRC_LITTLE) {
566
0
      case (16 << 4):
567
0
        return BPF_INS_BSWAP16;
568
0
      case (32 << 4):
569
0
        return BPF_INS_BSWAP32;
570
3
      case (64 << 4):
571
3
        return BPF_INS_BSWAP64;
572
0
      default:
573
0
        return BPF_INS_INVALID;
574
3
      }
575
3
    }
576
577
43
    switch (opcode ^ BPF_CLASS_ALU ^ BPF_ALU_END) {
578
2
    case BPF_SRC_LITTLE | (16 << 4):
579
2
      return BPF_INS_LE16;
580
0
    case BPF_SRC_LITTLE | (32 << 4):
581
0
      return BPF_INS_LE32;
582
4
    case BPF_SRC_LITTLE | (64 << 4):
583
4
      return BPF_INS_LE64;
584
10
    case BPF_SRC_BIG | (16 << 4):
585
10
      return BPF_INS_BE16;
586
23
    case BPF_SRC_BIG | (32 << 4):
587
23
      return BPF_INS_BE32;
588
4
    case BPF_SRC_BIG | (64 << 4):
589
4
      return BPF_INS_BE64;
590
43
    }
591
0
    return BPF_INS_INVALID;
592
43
  }
593
594
682
  switch (BPF_OP(opcode)) {
595
59
    CASE(ADD);
596
5
    CASE(SUB);
597
2
    CASE(MUL);
598
46
    CASE(OR);
599
176
    CASE(AND);
600
67
    CASE(LSH);
601
3
    CASE(RSH);
602
38
    CASE(NEG);
603
32
    CASE(XOR);
604
46
    CASE(ARSH);
605
13
  case BPF_ALU_DIV:
606
13
    if (!is_ebpf || off == 0)
607
10
      CASE_IF(DIV);
608
3
    else if (off == 1)
609
0
      CASE_IF(SDIV);
610
3
    else
611
3
      return BPF_INS_INVALID;
612
129
  case BPF_ALU_MOD:
613
129
    if (!is_ebpf || off == 0)
614
128
      CASE_IF(MOD);
615
1
    else if (off == 1)
616
0
      CASE_IF(SMOD);
617
1
    else
618
1
      return BPF_INS_INVALID;
619
66
  case BPF_ALU_MOV:
620
    /* BPF_CLASS_ALU can have: mov, mov8s, mov16s
621
     * BPF_CLASS_ALU64 can have: mov, mov8s, mov16s, mov32s
622
     * */
623
66
    if (off == 0)
624
18
      CASE_IF(MOV);
625
48
    else if (off == 8)
626
42
      CASE_IF(MOVSB);
627
6
    else if (off == 16)
628
4
      CASE_IF(MOVSH);
629
2
    else if (off == 32 && BPF_CLASS(opcode) == BPF_CLASS_ALU64)
630
0
      return BPF_INS_MOVSW64;
631
2
    else
632
2
      return BPF_INS_INVALID;
633
682
  }
634
635
0
  return BPF_INS_INVALID;
636
682
}
637
#undef CASE_IF
638
#undef CASE
639
640
508
#define BPF_CALLX (BPF_CLASS_JMP | BPF_JUMP_CALL | BPF_SRC_X)
641
642
#define CASE(c) \
643
332
  case BPF_JUMP_##c: \
644
332
    if (BPF_CLASS(opcode) == BPF_CLASS_JMP) \
645
332
      return BPF_INS_##c; \
646
332
    else \
647
332
      return BPF_INS_##c##32;
648
649
#define SPEC_CASE(c) \
650
153
  case BPF_JUMP_##c: \
651
153
    if (BPF_CLASS(opcode) == BPF_CLASS_JMP) \
652
153
      return BPF_INS_##c; \
653
153
    else \
654
153
      return BPF_INS_INVALID;
655
656
static bpf_insn op2insn_jmp(unsigned opcode)
657
508
{
658
508
  if (opcode == BPF_CALLX) {
659
0
    return BPF_INS_CALLX;
660
0
  }
661
662
508
  switch (BPF_OP(opcode)) {
663
23
  case BPF_JUMP_JA:
664
23
    if (BPF_CLASS(opcode) == BPF_CLASS_JMP)
665
19
      return BPF_INS_JA;
666
4
    else
667
4
      return BPF_INS_JAL;
668
38
    CASE(JEQ);
669
18
    CASE(JGT);
670
16
    CASE(JGE);
671
51
    CASE(JSET);
672
5
    CASE(JNE);
673
17
    CASE(JSGT);
674
15
    CASE(JSGE);
675
147
    SPEC_CASE(CALL);
676
6
    SPEC_CASE(EXIT);
677
0
    CASE(JLT);
678
168
    CASE(JLE);
679
1
    CASE(JSLT);
680
3
    CASE(JSLE);
681
508
  }
682
683
0
  return BPF_INS_INVALID;
684
508
}
685
#undef SPEC_CASE
686
#undef CASE
687
#undef BPF_CALLX
688
689
#ifndef CAPSTONE_DIET
690
691
static void update_regs_access(MCInst *MI, cs_detail *detail, bpf_insn insn_id,
692
             unsigned int opcode)
693
2.17k
{
694
2.17k
  if (insn_id == BPF_INS_INVALID)
695
0
    return;
696
  /*
697
   * In eBPF mode, only these instructions have implicit registers access:
698
   * - legacy ld{w,h,b,dw} * // w: r0
699
   * - exit // r: r0
700
   */
701
2.17k
  if (EBPF_MODE(MI->csh->mode)) {
702
1.58k
    switch (insn_id) {
703
1.18k
    default:
704
1.18k
      break;
705
1.18k
    case BPF_INS_LDABSW:
706
21
    case BPF_INS_LDABSH:
707
28
    case BPF_INS_LDABSB:
708
242
    case BPF_INS_LDINDW:
709
375
    case BPF_INS_LDINDH:
710
384
    case BPF_INS_LDINDB:
711
394
    case BPF_INS_LDDW:
712
394
      if (BPF_MODE(opcode) == BPF_MODE_ABS ||
713
360
          BPF_MODE(opcode) == BPF_MODE_IND)
714
391
        map_add_implicit_write(MI, BPF_REG_R0);
715
394
      break;
716
6
    case BPF_INS_EXIT:
717
6
      map_add_implicit_read(MI, BPF_REG_R0);
718
6
      break;
719
1.58k
    }
720
1.58k
    return;
721
1.58k
  }
722
723
  /* cBPF mode */
724
589
  switch (BPF_CLASS(opcode)) {
725
46
  default:
726
46
    break;
727
90
  case BPF_CLASS_LD:
728
90
    map_add_implicit_write(MI, BPF_REG_A);
729
90
    break;
730
174
  case BPF_CLASS_LDX:
731
174
    map_add_implicit_write(MI, BPF_REG_X);
732
174
    break;
733
0
  case BPF_CLASS_ST:
734
0
    map_add_implicit_read(MI, BPF_REG_A);
735
0
    break;
736
0
  case BPF_CLASS_STX:
737
0
    map_add_implicit_read(MI, BPF_REG_X);
738
0
    break;
739
202
  case BPF_CLASS_ALU:
740
202
    map_add_implicit_read(MI, BPF_REG_A);
741
202
    map_add_implicit_write(MI, BPF_REG_A);
742
202
    break;
743
69
  case BPF_CLASS_JMP:
744
69
    if (insn_id != BPF_INS_JA) // except the unconditional jump
745
51
      map_add_implicit_read(MI, BPF_REG_A);
746
69
    break;
747
  /* case BPF_CLASS_RET: */
748
8
  case BPF_CLASS_MISC:
749
8
    if (insn_id == BPF_INS_TAX) {
750
0
      map_add_implicit_read(MI, BPF_REG_A);
751
0
      map_add_implicit_write(MI, BPF_REG_X);
752
8
    } else {
753
8
      map_add_implicit_read(MI, BPF_REG_X);
754
8
      map_add_implicit_write(MI, BPF_REG_A);
755
8
    }
756
8
    break;
757
589
  }
758
589
}
759
#endif
760
761
static bool setFinalOpcode(MCInst *MI, const bpf_internal *bpf)
762
2.17k
{
763
2.17k
  bpf_insn id = BPF_INS_INVALID;
764
2.17k
#ifndef CAPSTONE_DIET
765
2.17k
  cs_detail *detail;
766
767
2.17k
  detail = get_detail(MI);
768
2.17k
#endif
769
770
2.17k
  const uint16_t opcode = bpf->op;
771
2.17k
  switch (BPF_CLASS(opcode)) {
772
0
  default: // will never happen
773
0
    break;
774
484
  case BPF_CLASS_LD:
775
790
  case BPF_CLASS_LDX:
776
790
    if (EBPF_MODE(MI->csh->mode))
777
526
      id = op2insn_ld_ebpf(opcode);
778
264
    else
779
264
      id = op2insn_ld_cbpf(opcode);
780
790
    add_group(MI, BPF_GRP_LOAD);
781
790
    break;
782
24
  case BPF_CLASS_ST:
783
99
  case BPF_CLASS_STX:
784
99
    id = op2insn_st(opcode, bpf->k);
785
99
    add_group(MI, BPF_GRP_STORE);
786
99
    break;
787
357
  case BPF_CLASS_ALU:
788
357
    id = op2insn_alu(opcode, bpf->offset, EBPF_MODE(MI->csh->mode));
789
357
    add_group(MI, BPF_GRP_ALU);
790
357
    break;
791
463
  case BPF_CLASS_JMP:
792
463
    id = op2insn_jmp(opcode);
793
463
#ifndef CAPSTONE_DIET
794
463
    if (id == BPF_INS_CALL || id == BPF_INS_CALLX)
795
147
      add_group(MI, BPF_GRP_CALL);
796
316
    else if (id == BPF_INS_EXIT)
797
6
      add_group(MI, BPF_GRP_RETURN);
798
310
    else
799
310
      add_group(MI, BPF_GRP_JUMP);
800
463
#endif
801
463
    break;
802
91
  case BPF_CLASS_RET:
803
    /* case BPF_CLASS_JMP32: */
804
91
    if (EBPF_MODE(MI->csh->mode)) {
805
45
      id = op2insn_jmp(opcode);
806
45
      add_group(MI, BPF_GRP_JUMP);
807
46
    } else {
808
46
      id = BPF_INS_RET;
809
46
      add_group(MI, BPF_GRP_RETURN);
810
46
    }
811
91
    break;
812
  // BPF_CLASS_MISC and BPF_CLASS_ALU64 have exactly same value
813
379
  case BPF_CLASS_MISC:
814
    /* case BPF_CLASS_ALU64: */
815
379
    if (EBPF_MODE(MI->csh->mode)) {
816
      // ALU64 in eBPF
817
371
      id = op2insn_alu(opcode, bpf->offset, true);
818
371
      add_group(MI, BPF_GRP_ALU);
819
371
    } else {
820
8
      if (BPF_MISCOP(opcode) == BPF_MISCOP_TXA)
821
8
        id = BPF_INS_TXA;
822
0
      else
823
0
        id = BPF_INS_TAX;
824
8
      add_group(MI, BPF_GRP_MISC);
825
8
    }
826
379
    break;
827
2.17k
  }
828
829
2.17k
  if (id == BPF_INS_INVALID)
830
7
    return false;
831
832
2.17k
  MCInst_setOpcodePub(MI, id);
833
2.17k
#undef PUSH_GROUP
834
835
2.17k
#ifndef CAPSTONE_DIET
836
2.17k
  if (detail) {
837
2.17k
    update_regs_access(MI, detail, id, opcode);
838
2.17k
  }
839
2.17k
#endif
840
2.17k
  return true;
841
2.17k
}
842
843
bool BPF_getInstruction(csh ud, const uint8_t *code, size_t code_len,
844
      MCInst *instr, uint16_t *size, uint64_t address,
845
      void *info)
846
2.24k
{
847
2.24k
  bpf_internal *bpf;
848
849
2.24k
  if (EBPF_MODE(instr->csh->mode))
850
1.63k
    bpf = fetch_ebpf(instr, code, code_len);
851
616
  else
852
616
    bpf = fetch_cbpf(instr, code, code_len);
853
2.24k
  if (bpf == NULL)
854
28
    return false;
855
2.21k
  if (!getInstruction(instr, bpf) || !setFinalOpcode(instr, bpf)) {
856
46
    cs_mem_free(bpf);
857
46
    return false;
858
46
  }
859
2.17k
  MCInst_setOpcode(instr, bpf->op);
860
861
2.17k
  *size = bpf->insn_size;
862
2.17k
  cs_mem_free(bpf);
863
864
  return true;
865
2.21k
}
866
867
#endif