/src/crosvm/third_party/minijail/bpf.c
Line | Count | Source |
1 | | /* Copyright 2012 The ChromiumOS Authors |
2 | | * Use of this source code is governed by a BSD-style license that can be |
3 | | * found in the LICENSE file. |
4 | | */ |
5 | | |
6 | | #include <stdint.h> |
7 | | #include <stdio.h> |
8 | | #include <stdlib.h> |
9 | | #include <string.h> |
10 | | |
11 | | #include "bpf.h" |
12 | | #include "util.h" |
13 | | |
14 | | /* Architecture validation. */ |
15 | | size_t bpf_validate_arch(struct sock_filter *filter) |
16 | 0 | { |
17 | 0 | struct sock_filter *curr_block = filter; |
18 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, arch_nr); |
19 | 0 | set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, MINIJAIL_ARCH_NR, |
20 | 0 | SKIP, NEXT); |
21 | 0 | set_bpf_ret_kill(curr_block++); |
22 | 0 | return curr_block - filter; |
23 | 0 | } |
24 | | |
25 | | /* Syscall number eval functions. */ |
26 | | size_t bpf_allow_syscall(struct sock_filter *filter, int nr) |
27 | 0 | { |
28 | 0 | struct sock_filter *curr_block = filter; |
29 | 0 | set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP); |
30 | 0 | set_bpf_stmt(curr_block++, BPF_RET + BPF_K, SECCOMP_RET_ALLOW); |
31 | 0 | return curr_block - filter; |
32 | 0 | } |
33 | | |
34 | | size_t bpf_allow_syscall_args(struct sock_filter *filter, int nr, |
35 | | unsigned int id) |
36 | 0 | { |
37 | 0 | struct sock_filter *curr_block = filter; |
38 | 0 | set_bpf_jump(curr_block++, BPF_JMP + BPF_JEQ + BPF_K, nr, NEXT, SKIP); |
39 | 0 | set_bpf_jump_lbl(curr_block++, id); |
40 | 0 | return curr_block - filter; |
41 | 0 | } |
42 | | |
43 | | /* Size-aware arg loaders. */ |
44 | | #if defined(BITS32) |
45 | | size_t bpf_load_arg(struct sock_filter *filter, int argidx) |
46 | | { |
47 | | set_bpf_stmt(filter, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx)); |
48 | | return 1U; |
49 | | } |
50 | | #elif defined(BITS64) |
51 | | size_t bpf_load_arg(struct sock_filter *filter, int argidx) |
52 | 0 | { |
53 | 0 | struct sock_filter *curr_block = filter; |
54 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, LO_ARG(argidx)); |
55 | 0 | set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */ |
56 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_W + BPF_ABS, HI_ARG(argidx)); |
57 | 0 | set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */ |
58 | 0 | return curr_block - filter; |
59 | 0 | } |
60 | | #endif |
61 | | |
62 | | /* Size-aware comparisons. */ |
63 | | size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c, |
64 | | unsigned char jt, unsigned char jf) |
65 | 0 | { |
66 | 0 | unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); |
67 | 0 | set_bpf_jump(filter, BPF_JMP + BPF_JEQ + BPF_K, lo, jt, jf); |
68 | 0 | return 1U; |
69 | 0 | } |
70 | | |
71 | | /* |
72 | | * On 64 bits, we have to do two 32-bit comparisons. |
73 | | * We jump true when *both* comparisons are true. |
74 | | */ |
75 | | #if defined(BITS64) |
76 | | size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c, unsigned char jt, |
77 | | unsigned char jf) |
78 | 0 | { |
79 | 0 | unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); |
80 | 0 | unsigned int hi = (unsigned int)(c >> 32); |
81 | |
|
82 | 0 | struct sock_filter *curr_block = filter; |
83 | | |
84 | | /* bpf_load_arg leaves |hi| in A */ |
85 | 0 | curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf); |
86 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */ |
87 | 0 | curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf); |
88 | |
|
89 | 0 | return curr_block - filter; |
90 | 0 | } |
91 | | #endif |
92 | | |
93 | | size_t bpf_comp_jgt32(struct sock_filter *filter, unsigned long c, |
94 | | unsigned char jt, unsigned char jf) |
95 | 0 | { |
96 | 0 | unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); |
97 | 0 | set_bpf_jump(filter, BPF_JMP + BPF_JGT + BPF_K, lo, jt, jf); |
98 | 0 | return 1U; |
99 | 0 | } |
100 | | |
101 | | size_t bpf_comp_jge32(struct sock_filter *filter, unsigned long c, |
102 | | unsigned char jt, unsigned char jf) |
103 | 0 | { |
104 | 0 | unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); |
105 | 0 | set_bpf_jump(filter, BPF_JMP + BPF_JGE + BPF_K, lo, jt, jf); |
106 | 0 | return 1U; |
107 | 0 | } |
108 | | |
109 | | /* |
110 | | * On 64 bits, we have to do two/three 32-bit comparisons. |
111 | | * We jump true when the |hi| comparison is true *or* |hi| is equal and the |
112 | | * |lo| comparison is true. |
113 | | */ |
114 | | #if defined(BITS64) |
115 | | size_t bpf_comp_jgt64(struct sock_filter *filter, uint64_t c, unsigned char jt, |
116 | | unsigned char jf) |
117 | 0 | { |
118 | 0 | unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); |
119 | 0 | unsigned int hi = (unsigned int)(c >> 32); |
120 | |
|
121 | 0 | struct sock_filter *curr_block = filter; |
122 | | |
123 | | /* bpf_load_arg leaves |hi| in A. */ |
124 | 0 | if (hi == 0) { |
125 | 0 | curr_block += |
126 | 0 | bpf_comp_jgt32(curr_block, hi, SKIPN(2) + jt, NEXT); |
127 | 0 | } else { |
128 | 0 | curr_block += |
129 | 0 | bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT); |
130 | 0 | curr_block += |
131 | 0 | bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf); |
132 | 0 | } |
133 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */ |
134 | 0 | curr_block += bpf_comp_jgt32(curr_block, lo, jt, jf); |
135 | |
|
136 | 0 | return curr_block - filter; |
137 | 0 | } |
138 | | |
139 | | size_t bpf_comp_jge64(struct sock_filter *filter, uint64_t c, unsigned char jt, |
140 | | unsigned char jf) |
141 | 0 | { |
142 | 0 | unsigned int lo = (unsigned int)(c & 0xFFFFFFFF); |
143 | 0 | unsigned int hi = (unsigned int)(c >> 32); |
144 | |
|
145 | 0 | struct sock_filter *curr_block = filter; |
146 | | |
147 | | /* bpf_load_arg leaves |hi| in A. */ |
148 | 0 | if (hi == 0) { |
149 | 0 | curr_block += |
150 | 0 | bpf_comp_jgt32(curr_block, hi, SKIPN(2) + jt, NEXT); |
151 | 0 | } else { |
152 | 0 | curr_block += |
153 | 0 | bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT); |
154 | 0 | curr_block += |
155 | 0 | bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf); |
156 | 0 | } |
157 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */ |
158 | 0 | curr_block += bpf_comp_jge32(curr_block, lo, jt, jf); |
159 | |
|
160 | 0 | return curr_block - filter; |
161 | 0 | } |
162 | | #endif |
163 | | |
164 | | /* Size-aware bitwise AND. */ |
165 | | size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask, |
166 | | unsigned char jt, unsigned char jf) |
167 | 0 | { |
168 | 0 | unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF); |
169 | 0 | set_bpf_jump(filter, BPF_JMP + BPF_JSET + BPF_K, mask_lo, jt, jf); |
170 | 0 | return 1U; |
171 | 0 | } |
172 | | |
173 | | /* |
174 | | * On 64 bits, we have to do two 32-bit bitwise ANDs. |
175 | | * We jump true when *either* bitwise AND is true (non-zero). |
176 | | */ |
177 | | #if defined(BITS64) |
178 | | size_t bpf_comp_jset64(struct sock_filter *filter, uint64_t mask, |
179 | | unsigned char jt, unsigned char jf) |
180 | 0 | { |
181 | 0 | unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF); |
182 | 0 | unsigned int mask_hi = (unsigned int)(mask >> 32); |
183 | |
|
184 | 0 | struct sock_filter *curr_block = filter; |
185 | | |
186 | | /* bpf_load_arg leaves |hi| in A */ |
187 | 0 | curr_block += bpf_comp_jset32(curr_block, mask_hi, SKIPN(2) + jt, NEXT); |
188 | 0 | set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */ |
189 | 0 | curr_block += bpf_comp_jset32(curr_block, mask_lo, jt, jf); |
190 | |
|
191 | 0 | return curr_block - filter; |
192 | 0 | } |
193 | | #endif |
194 | | |
195 | | size_t bpf_comp_jin(struct sock_filter *filter, unsigned long mask, |
196 | | unsigned char jt, unsigned char jf) |
197 | 0 | { |
198 | 0 | unsigned long negative_mask = ~mask; |
199 | | /* |
200 | | * The mask is negated, so the comparison will be true when the argument |
201 | | * includes a flag that wasn't listed in the original (non-negated) |
202 | | * mask. This would be the failure case, so we switch |jt| and |jf|. |
203 | | */ |
204 | 0 | return bpf_comp_jset(filter, negative_mask, jf, jt); |
205 | 0 | } |
206 | | |
207 | | static size_t bpf_arg_comp_len(int op, unsigned long c attribute_unused) |
208 | 0 | { |
209 | | /* The comparisons that use gt/ge internally may have extra opcodes. */ |
210 | 0 | switch (op) { |
211 | 0 | case LT: |
212 | 0 | case LE: |
213 | 0 | case GT: |
214 | 0 | case GE: |
215 | 0 | #if defined(BITS64) |
216 | | /* |
217 | | * |c| can only have a high 32-bit part when running on 64 bits. |
218 | | */ |
219 | 0 | if ((c >> 32) == 0) |
220 | 0 | return BPF_ARG_SHORT_GT_GE_COMP_LEN + 1; |
221 | 0 | #endif |
222 | 0 | return BPF_ARG_GT_GE_COMP_LEN + 1; |
223 | 0 | default: |
224 | 0 | return BPF_ARG_COMP_LEN + 1; |
225 | 0 | } |
226 | 0 | } |
227 | | |
228 | | size_t bpf_arg_comp(struct sock_filter **pfilter, int op, int argidx, |
229 | | unsigned long c, unsigned int label_id) |
230 | 0 | { |
231 | 0 | size_t filter_len = bpf_arg_comp_len(op, c); |
232 | 0 | struct sock_filter *filter = |
233 | 0 | calloc(filter_len, sizeof(struct sock_filter)); |
234 | 0 | struct sock_filter *curr_block = filter; |
235 | 0 | size_t (*comp_function)(struct sock_filter *filter, unsigned long k, |
236 | 0 | unsigned char jt, unsigned char jf); |
237 | 0 | int flip = 0; |
238 | |
|
239 | 0 | if (!filter) { |
240 | 0 | *pfilter = NULL; |
241 | 0 | return 0; |
242 | 0 | } |
243 | | |
244 | | /* Load arg */ |
245 | 0 | curr_block += bpf_load_arg(curr_block, argidx); |
246 | | |
247 | | /* Jump type */ |
248 | 0 | switch (op) { |
249 | 0 | case EQ: |
250 | 0 | comp_function = bpf_comp_jeq; |
251 | 0 | flip = 0; |
252 | 0 | break; |
253 | 0 | case NE: |
254 | 0 | comp_function = bpf_comp_jeq; |
255 | 0 | flip = 1; |
256 | 0 | break; |
257 | 0 | case LT: |
258 | 0 | comp_function = bpf_comp_jge; |
259 | 0 | flip = 1; |
260 | 0 | break; |
261 | 0 | case LE: |
262 | 0 | comp_function = bpf_comp_jgt; |
263 | 0 | flip = 1; |
264 | 0 | break; |
265 | 0 | case GT: |
266 | 0 | comp_function = bpf_comp_jgt; |
267 | 0 | flip = 0; |
268 | 0 | break; |
269 | 0 | case GE: |
270 | 0 | comp_function = bpf_comp_jge; |
271 | 0 | flip = 0; |
272 | 0 | break; |
273 | 0 | case SET: |
274 | 0 | comp_function = bpf_comp_jset; |
275 | 0 | flip = 0; |
276 | 0 | break; |
277 | 0 | case IN: |
278 | 0 | comp_function = bpf_comp_jin; |
279 | 0 | flip = 0; |
280 | 0 | break; |
281 | 0 | default: |
282 | 0 | curr_block = filter; |
283 | 0 | free(filter); |
284 | 0 | *pfilter = NULL; |
285 | 0 | return 0; |
286 | 0 | } |
287 | | |
288 | | /* |
289 | | * It's easier for the rest of the code to have the true branch |
290 | | * skip and the false branch fall through. |
291 | | */ |
292 | 0 | unsigned char jt = flip ? NEXT : SKIP; |
293 | 0 | unsigned char jf = flip ? SKIP : NEXT; |
294 | 0 | curr_block += comp_function(curr_block, c, jt, jf); |
295 | 0 | curr_block += set_bpf_jump_lbl(curr_block, label_id); |
296 | |
|
297 | 0 | *pfilter = filter; |
298 | 0 | return curr_block - filter; |
299 | 0 | } |
300 | | |
301 | | int bpf_resolve_jumps(struct bpf_labels *labels, struct sock_filter *filter, |
302 | | size_t len) |
303 | 0 | { |
304 | 0 | struct sock_filter *instr; |
305 | 0 | size_t i, offset; |
306 | |
|
307 | 0 | if (len > BPF_MAXINSNS) |
308 | 0 | return -1; |
309 | | |
310 | | /* |
311 | | * Walk it once, backwards, to build the label table and do fixups. |
312 | | * Since backward jumps are disallowed by BPF, this is easy. |
313 | | */ |
314 | 0 | for (i = 0; i < len; i++) { |
315 | 0 | offset = len - i - 1; |
316 | 0 | instr = &filter[offset]; |
317 | 0 | if (instr->code != (BPF_JMP + BPF_JA)) |
318 | 0 | continue; |
319 | 0 | switch ((instr->jt << 8) | instr->jf) { |
320 | 0 | case (JUMP_JT << 8) | JUMP_JF: |
321 | 0 | if (instr->k >= labels->count) { |
322 | 0 | warn("nonexistent label id: %u", instr->k); |
323 | 0 | return -1; |
324 | 0 | } |
325 | 0 | if (labels->labels[instr->k].location == 0xffffffff) { |
326 | 0 | warn("unresolved label: '%s'", |
327 | 0 | labels->labels[instr->k].label); |
328 | 0 | return -1; |
329 | 0 | } |
330 | 0 | instr->k = |
331 | 0 | labels->labels[instr->k].location - (offset + 1); |
332 | 0 | instr->jt = 0; |
333 | 0 | instr->jf = 0; |
334 | 0 | continue; |
335 | 0 | case (LABEL_JT << 8) | LABEL_JF: |
336 | 0 | if (labels->labels[instr->k].location != 0xffffffff) { |
337 | 0 | warn("duplicate label: '%s'", |
338 | 0 | labels->labels[instr->k].label); |
339 | 0 | return -1; |
340 | 0 | } |
341 | 0 | labels->labels[instr->k].location = offset; |
342 | 0 | instr->k = 0; /* Fall through. */ |
343 | 0 | instr->jt = 0; |
344 | 0 | instr->jf = 0; |
345 | 0 | continue; |
346 | 0 | } |
347 | 0 | } |
348 | 0 | return 0; |
349 | 0 | } |
350 | | |
351 | | /* Simple lookup table for labels. */ |
352 | | int bpf_label_id(struct bpf_labels *labels, const char *label) |
353 | 0 | { |
354 | 0 | struct __bpf_label *begin = labels->labels, *end; |
355 | 0 | int id; |
356 | 0 | if (labels->count == 0) { |
357 | 0 | begin->label = strndup(label, MAX_BPF_LABEL_LEN); |
358 | 0 | if (!begin->label) { |
359 | 0 | return -1; |
360 | 0 | } |
361 | 0 | begin->location = 0xffffffff; |
362 | 0 | labels->count++; |
363 | 0 | return 0; |
364 | 0 | } |
365 | 0 | end = begin + labels->count; |
366 | 0 | for (id = 0; begin < end; ++begin, ++id) { |
367 | 0 | if (streq(label, begin->label)) { |
368 | 0 | return id; |
369 | 0 | } |
370 | 0 | } |
371 | | |
372 | | /* The label wasn't found. Insert it only if there's space. */ |
373 | 0 | if (labels->count == BPF_LABELS_MAX) { |
374 | 0 | return -1; |
375 | 0 | } |
376 | 0 | begin->label = strndup(label, MAX_BPF_LABEL_LEN); |
377 | 0 | if (!begin->label) { |
378 | 0 | return -1; |
379 | 0 | } |
380 | 0 | begin->location = 0xffffffff; |
381 | 0 | labels->count++; |
382 | 0 | return id; |
383 | 0 | } |
384 | | |
385 | | void free_label_strings(struct bpf_labels *labels) |
386 | 0 | { |
387 | 0 | if (labels->count == 0) |
388 | 0 | return; |
389 | | |
390 | 0 | struct __bpf_label *begin = labels->labels, *end; |
391 | |
|
392 | 0 | end = begin + labels->count; |
393 | 0 | for (; begin < end; ++begin) { |
394 | 0 | if (begin->label) |
395 | 0 | free((void *)(begin->label)); |
396 | 0 | } |
397 | |
|
398 | 0 | labels->count = 0; |
399 | 0 | } |