1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
17 /* possible instruction node colour */
25 /* possible edge types */
34 struct bpf_reg_state {
38 struct bpf_eval_state {
39 struct bpf_reg_state rs[EBPF_REG_NUM];
48 uint8_t edge_type[MAX_EDGES];
49 uint32_t edge_dest[MAX_EDGES];
51 struct bpf_eval_state *evst;
55 const struct rte_bpf_prm *prm;
59 uint32_t nb_jcc_nodes;
60 uint32_t node_colour[MAX_NODE_COLOUR];
61 uint32_t edge_type[MAX_EDGE_TYPE];
62 struct bpf_eval_state *evst;
66 struct bpf_eval_state *ent;
70 struct bpf_ins_check {
83 const char * (*check)(const struct ebpf_insn *);
84 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
87 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
88 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
89 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
92 * check and evaluate functions for particular instruction types.
96 check_alu_bele(const struct ebpf_insn *ins)
98 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
99 return "invalid imm field";
104 eval_stack(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
110 if (ofs >= 0 || ofs < -MAX_BPF_STACK_SIZE)
111 return "stack boundary violation";
114 bvf->stack_sz = RTE_MAX(bvf->stack_sz, ofs);
119 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
121 if (ins->dst_reg == EBPF_REG_10)
122 return eval_stack(bvf, ins);
127 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
129 if (ins->src_reg == EBPF_REG_10)
130 return eval_stack(bvf, ins);
135 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
141 if (idx >= bvf->prm->nb_xsym ||
142 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
143 return "invalid external function index";
145 /* for now don't support function calls on 32 bit platform */
146 if (sizeof(uint64_t) != sizeof(uintptr_t))
147 return "function calls are supported only for 64 bit apps";
152 * validate parameters for each instruction type.
154 static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
155 /* ALU IMM 32-bit instructions */
156 [(BPF_ALU | BPF_ADD | BPF_K)] = {
157 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
158 .off = { .min = 0, .max = 0},
159 .imm = { .min = 0, .max = UINT32_MAX,},
161 [(BPF_ALU | BPF_SUB | BPF_K)] = {
162 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
163 .off = { .min = 0, .max = 0},
164 .imm = { .min = 0, .max = UINT32_MAX,},
166 [(BPF_ALU | BPF_AND | BPF_K)] = {
167 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
168 .off = { .min = 0, .max = 0},
169 .imm = { .min = 0, .max = UINT32_MAX,},
171 [(BPF_ALU | BPF_OR | BPF_K)] = {
172 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
173 .off = { .min = 0, .max = 0},
174 .imm = { .min = 0, .max = UINT32_MAX,},
176 [(BPF_ALU | BPF_LSH | BPF_K)] = {
177 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
178 .off = { .min = 0, .max = 0},
179 .imm = { .min = 0, .max = UINT32_MAX,},
181 [(BPF_ALU | BPF_RSH | BPF_K)] = {
182 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
183 .off = { .min = 0, .max = 0},
184 .imm = { .min = 0, .max = UINT32_MAX,},
186 [(BPF_ALU | BPF_XOR | BPF_K)] = {
187 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
188 .off = { .min = 0, .max = 0},
189 .imm = { .min = 0, .max = UINT32_MAX,},
191 [(BPF_ALU | BPF_MUL | BPF_K)] = {
192 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
193 .off = { .min = 0, .max = 0},
194 .imm = { .min = 0, .max = UINT32_MAX,},
196 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
197 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
198 .off = { .min = 0, .max = 0},
199 .imm = { .min = 0, .max = UINT32_MAX,},
201 [(BPF_ALU | BPF_DIV | BPF_K)] = {
202 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
203 .off = { .min = 0, .max = 0},
204 .imm = { .min = 1, .max = UINT32_MAX},
206 [(BPF_ALU | BPF_MOD | BPF_K)] = {
207 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
208 .off = { .min = 0, .max = 0},
209 .imm = { .min = 1, .max = UINT32_MAX},
211 /* ALU IMM 64-bit instructions */
212 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
213 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
214 .off = { .min = 0, .max = 0},
215 .imm = { .min = 0, .max = UINT32_MAX,},
217 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
218 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
219 .off = { .min = 0, .max = 0},
220 .imm = { .min = 0, .max = UINT32_MAX,},
222 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
223 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
224 .off = { .min = 0, .max = 0},
225 .imm = { .min = 0, .max = UINT32_MAX,},
227 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
228 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
229 .off = { .min = 0, .max = 0},
230 .imm = { .min = 0, .max = UINT32_MAX,},
232 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
233 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
234 .off = { .min = 0, .max = 0},
235 .imm = { .min = 0, .max = UINT32_MAX,},
237 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
238 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
239 .off = { .min = 0, .max = 0},
240 .imm = { .min = 0, .max = UINT32_MAX,},
242 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
243 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
244 .off = { .min = 0, .max = 0},
245 .imm = { .min = 0, .max = UINT32_MAX,},
247 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
248 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
249 .off = { .min = 0, .max = 0},
250 .imm = { .min = 0, .max = UINT32_MAX,},
252 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
253 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
254 .off = { .min = 0, .max = 0},
255 .imm = { .min = 0, .max = UINT32_MAX,},
257 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
258 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
259 .off = { .min = 0, .max = 0},
260 .imm = { .min = 0, .max = UINT32_MAX,},
262 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
263 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
264 .off = { .min = 0, .max = 0},
265 .imm = { .min = 1, .max = UINT32_MAX},
267 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
268 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
269 .off = { .min = 0, .max = 0},
270 .imm = { .min = 1, .max = UINT32_MAX},
272 /* ALU REG 32-bit instructions */
273 [(BPF_ALU | BPF_ADD | BPF_X)] = {
274 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
275 .off = { .min = 0, .max = 0},
276 .imm = { .min = 0, .max = 0},
278 [(BPF_ALU | BPF_SUB | BPF_X)] = {
279 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
280 .off = { .min = 0, .max = 0},
281 .imm = { .min = 0, .max = 0},
283 [(BPF_ALU | BPF_AND | BPF_X)] = {
284 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
285 .off = { .min = 0, .max = 0},
286 .imm = { .min = 0, .max = 0},
288 [(BPF_ALU | BPF_OR | BPF_X)] = {
289 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
290 .off = { .min = 0, .max = 0},
291 .imm = { .min = 0, .max = 0},
293 [(BPF_ALU | BPF_LSH | BPF_X)] = {
294 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
295 .off = { .min = 0, .max = 0},
296 .imm = { .min = 0, .max = 0},
298 [(BPF_ALU | BPF_RSH | BPF_X)] = {
299 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
300 .off = { .min = 0, .max = 0},
301 .imm = { .min = 0, .max = 0},
303 [(BPF_ALU | BPF_XOR | BPF_X)] = {
304 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
305 .off = { .min = 0, .max = 0},
306 .imm = { .min = 0, .max = 0},
308 [(BPF_ALU | BPF_MUL | BPF_X)] = {
309 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
310 .off = { .min = 0, .max = 0},
311 .imm = { .min = 0, .max = 0},
313 [(BPF_ALU | BPF_DIV | BPF_X)] = {
314 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
315 .off = { .min = 0, .max = 0},
316 .imm = { .min = 0, .max = 0},
318 [(BPF_ALU | BPF_MOD | BPF_X)] = {
319 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
320 .off = { .min = 0, .max = 0},
321 .imm = { .min = 0, .max = 0},
323 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
324 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
325 .off = { .min = 0, .max = 0},
326 .imm = { .min = 0, .max = 0},
328 [(BPF_ALU | BPF_NEG)] = {
329 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
330 .off = { .min = 0, .max = 0},
331 .imm = { .min = 0, .max = 0},
333 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
334 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
335 .off = { .min = 0, .max = 0},
336 .imm = { .min = 16, .max = 64},
337 .check = check_alu_bele,
339 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
340 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
341 .off = { .min = 0, .max = 0},
342 .imm = { .min = 16, .max = 64},
343 .check = check_alu_bele,
345 /* ALU REG 64-bit instructions */
346 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
347 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
348 .off = { .min = 0, .max = 0},
349 .imm = { .min = 0, .max = 0},
351 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
352 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
353 .off = { .min = 0, .max = 0},
354 .imm = { .min = 0, .max = 0},
356 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
357 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
358 .off = { .min = 0, .max = 0},
359 .imm = { .min = 0, .max = 0},
361 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
362 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
363 .off = { .min = 0, .max = 0},
364 .imm = { .min = 0, .max = 0},
366 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
367 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
368 .off = { .min = 0, .max = 0},
369 .imm = { .min = 0, .max = 0},
371 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
372 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
373 .off = { .min = 0, .max = 0},
374 .imm = { .min = 0, .max = 0},
376 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
377 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
378 .off = { .min = 0, .max = 0},
379 .imm = { .min = 0, .max = 0},
381 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
382 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
383 .off = { .min = 0, .max = 0},
384 .imm = { .min = 0, .max = 0},
386 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
387 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
388 .off = { .min = 0, .max = 0},
389 .imm = { .min = 0, .max = 0},
391 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
392 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
393 .off = { .min = 0, .max = 0},
394 .imm = { .min = 0, .max = 0},
396 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
397 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
398 .off = { .min = 0, .max = 0},
399 .imm = { .min = 0, .max = 0},
401 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
402 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
403 .off = { .min = 0, .max = 0},
404 .imm = { .min = 0, .max = 0},
406 [(EBPF_ALU64 | BPF_NEG)] = {
407 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
408 .off = { .min = 0, .max = 0},
409 .imm = { .min = 0, .max = 0},
411 /* load instructions */
412 [(BPF_LDX | BPF_MEM | BPF_B)] = {
413 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
414 .off = { .min = 0, .max = UINT16_MAX},
415 .imm = { .min = 0, .max = 0},
418 [(BPF_LDX | BPF_MEM | BPF_H)] = {
419 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
420 .off = { .min = 0, .max = UINT16_MAX},
421 .imm = { .min = 0, .max = 0},
424 [(BPF_LDX | BPF_MEM | BPF_W)] = {
425 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
426 .off = { .min = 0, .max = UINT16_MAX},
427 .imm = { .min = 0, .max = 0},
430 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
431 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
432 .off = { .min = 0, .max = UINT16_MAX},
433 .imm = { .min = 0, .max = 0},
436 /* load 64 bit immediate value */
437 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
438 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
439 .off = { .min = 0, .max = 0},
440 .imm = { .min = 0, .max = UINT32_MAX},
442 /* store REG instructions */
443 [(BPF_STX | BPF_MEM | BPF_B)] = {
444 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
445 .off = { .min = 0, .max = UINT16_MAX},
446 .imm = { .min = 0, .max = 0},
449 [(BPF_STX | BPF_MEM | BPF_H)] = {
450 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
451 .off = { .min = 0, .max = UINT16_MAX},
452 .imm = { .min = 0, .max = 0},
455 [(BPF_STX | BPF_MEM | BPF_W)] = {
456 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
457 .off = { .min = 0, .max = UINT16_MAX},
458 .imm = { .min = 0, .max = 0},
461 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
462 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
463 .off = { .min = 0, .max = UINT16_MAX},
464 .imm = { .min = 0, .max = 0},
467 /* atomic add instructions */
468 [(BPF_STX | EBPF_XADD | BPF_W)] = {
469 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
470 .off = { .min = 0, .max = UINT16_MAX},
471 .imm = { .min = 0, .max = 0},
474 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
475 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
476 .off = { .min = 0, .max = UINT16_MAX},
477 .imm = { .min = 0, .max = 0},
480 /* store IMM instructions */
481 [(BPF_ST | BPF_MEM | BPF_B)] = {
482 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
483 .off = { .min = 0, .max = UINT16_MAX},
484 .imm = { .min = 0, .max = UINT32_MAX},
487 [(BPF_ST | BPF_MEM | BPF_H)] = {
488 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
489 .off = { .min = 0, .max = UINT16_MAX},
490 .imm = { .min = 0, .max = UINT32_MAX},
493 [(BPF_ST | BPF_MEM | BPF_W)] = {
494 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
495 .off = { .min = 0, .max = UINT16_MAX},
496 .imm = { .min = 0, .max = UINT32_MAX},
499 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
500 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
501 .off = { .min = 0, .max = UINT16_MAX},
502 .imm = { .min = 0, .max = UINT32_MAX},
505 /* jump instruction */
506 [(BPF_JMP | BPF_JA)] = {
507 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
508 .off = { .min = 0, .max = UINT16_MAX},
509 .imm = { .min = 0, .max = 0},
511 /* jcc IMM instructions */
512 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
513 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
514 .off = { .min = 0, .max = UINT16_MAX},
515 .imm = { .min = 0, .max = UINT32_MAX},
517 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
518 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
519 .off = { .min = 0, .max = UINT16_MAX},
520 .imm = { .min = 0, .max = UINT32_MAX},
522 [(BPF_JMP | BPF_JGT | BPF_K)] = {
523 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
524 .off = { .min = 0, .max = UINT16_MAX},
525 .imm = { .min = 0, .max = UINT32_MAX},
527 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
528 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
529 .off = { .min = 0, .max = UINT16_MAX},
530 .imm = { .min = 0, .max = UINT32_MAX},
532 [(BPF_JMP | BPF_JGE | BPF_K)] = {
533 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
534 .off = { .min = 0, .max = UINT16_MAX},
535 .imm = { .min = 0, .max = UINT32_MAX},
537 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
538 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
539 .off = { .min = 0, .max = UINT16_MAX},
540 .imm = { .min = 0, .max = UINT32_MAX},
542 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
543 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
544 .off = { .min = 0, .max = UINT16_MAX},
545 .imm = { .min = 0, .max = UINT32_MAX},
547 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
548 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
549 .off = { .min = 0, .max = UINT16_MAX},
550 .imm = { .min = 0, .max = UINT32_MAX},
552 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
553 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
554 .off = { .min = 0, .max = UINT16_MAX},
555 .imm = { .min = 0, .max = UINT32_MAX},
557 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
558 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
559 .off = { .min = 0, .max = UINT16_MAX},
560 .imm = { .min = 0, .max = UINT32_MAX},
562 [(BPF_JMP | BPF_JSET | BPF_K)] = {
563 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
564 .off = { .min = 0, .max = UINT16_MAX},
565 .imm = { .min = 0, .max = UINT32_MAX},
567 /* jcc REG instructions */
568 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
569 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
570 .off = { .min = 0, .max = UINT16_MAX},
571 .imm = { .min = 0, .max = 0},
573 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
574 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
575 .off = { .min = 0, .max = UINT16_MAX},
576 .imm = { .min = 0, .max = 0},
578 [(BPF_JMP | BPF_JGT | BPF_X)] = {
579 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
580 .off = { .min = 0, .max = UINT16_MAX},
581 .imm = { .min = 0, .max = 0},
583 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
584 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
585 .off = { .min = 0, .max = UINT16_MAX},
586 .imm = { .min = 0, .max = 0},
588 [(BPF_JMP | BPF_JGE | BPF_X)] = {
589 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
590 .off = { .min = 0, .max = UINT16_MAX},
591 .imm = { .min = 0, .max = 0},
593 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
594 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
595 .off = { .min = 0, .max = UINT16_MAX},
596 .imm = { .min = 0, .max = 0},
598 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
599 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
600 .off = { .min = 0, .max = UINT16_MAX},
601 .imm = { .min = 0, .max = 0},
603 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
604 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
605 .off = { .min = 0, .max = UINT16_MAX},
606 .imm = { .min = 0, .max = 0},
608 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
609 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
610 .off = { .min = 0, .max = UINT16_MAX},
611 .imm = { .min = 0, .max = 0},
613 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
614 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
615 .off = { .min = 0, .max = UINT16_MAX},
616 .imm = { .min = 0, .max = 0},
618 [(BPF_JMP | BPF_JSET | BPF_X)] = {
619 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
620 .off = { .min = 0, .max = UINT16_MAX},
621 .imm = { .min = 0, .max = 0},
623 /* call instruction */
624 [(BPF_JMP | EBPF_CALL)] = {
625 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
626 .off = { .min = 0, .max = 0},
627 .imm = { .min = 0, .max = UINT32_MAX},
630 /* ret instruction */
631 [(BPF_JMP | EBPF_EXIT)] = {
632 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
633 .off = { .min = 0, .max = 0},
634 .imm = { .min = 0, .max = 0},
639 * make sure that instruction syntax is valid,
640 * and it fields don't violate partciular instrcution type restrictions.
643 check_syntax(const struct ebpf_insn *ins)
652 if (ins_chk[op].mask.dreg == 0)
653 return "invalid opcode";
655 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
656 return "invalid dst-reg field";
658 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
659 return "invalid src-reg field";
662 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
663 return "invalid off field";
666 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
667 return "invalid imm field";
669 if (ins_chk[op].check != NULL)
670 return ins_chk[op].check(ins);
676 * helper function, return instruction index for the given node.
679 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
681 return node - bvf->in;
685 * helper function, used to walk through constructed CFG.
687 static struct inst_node *
688 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
690 uint32_t ce, ne, dst;
698 dst = node->edge_dest[ce];
699 return bvf->in + dst;
703 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
711 bvf->node_colour[prev]--;
712 bvf->node_colour[new]++;
716 * helper function, add new edge between two nodes.
719 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
723 if (nidx > bvf->prm->nb_ins) {
724 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
726 __func__, get_node_idx(bvf, node), nidx);
731 if (ne >= RTE_DIM(node->edge_dest)) {
732 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
733 __func__, get_node_idx(bvf, node));
737 node->edge_dest[ne] = nidx;
738 node->nb_edge = ne + 1;
743 * helper function, determine type of edge between two nodes.
746 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
747 const struct inst_node *next)
749 uint32_t ce, clr, type;
751 ce = node->cur_edge - 1;
758 else if (clr == GREY)
760 else if (clr == BLACK)
762 * in fact it could be either direct or cross edge,
763 * but for now, we don't need to distinguish between them.
767 node->edge_type[ce] = type;
768 bvf->edge_type[type]++;
771 static struct inst_node *
772 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
774 return bvf->in + node->prev_node;
778 * Depth-First Search (DFS) through previously constructed
779 * Control Flow Graph (CFG).
780 * Information collected at this path would be used later
781 * to determine is there any loops, and/or unreachable instructions.
784 dfs(struct bpf_verifier *bvf)
786 struct inst_node *next, *node;
789 while (node != NULL) {
791 if (node->colour == WHITE)
792 set_node_colour(bvf, node, GREY);
794 if (node->colour == GREY) {
796 /* find next unprocessed child node */
798 next = get_next_node(bvf, node);
801 set_edge_type(bvf, node, next);
802 } while (next->colour != WHITE);
805 /* proceed with next child */
806 next->prev_node = get_node_idx(bvf, node);
810 * finished with current node and all it's kids,
811 * proceed with parent
813 set_node_colour(bvf, node, BLACK);
815 node = get_prev_node(bvf, node);
823 * report unreachable instructions.
826 log_unreachable(const struct bpf_verifier *bvf)
829 struct inst_node *node;
830 const struct ebpf_insn *ins;
832 for (i = 0; i != bvf->prm->nb_ins; i++) {
835 ins = bvf->prm->ins + i;
837 if (node->colour == WHITE &&
838 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
839 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
844 * report loops detected.
847 log_loop(const struct bpf_verifier *bvf)
850 struct inst_node *node;
852 for (i = 0; i != bvf->prm->nb_ins; i++) {
855 if (node->colour != BLACK)
858 for (j = 0; j != node->nb_edge; j++) {
859 if (node->edge_type[j] == BACK_EDGE)
861 "loop at pc:%u --> pc:%u;\n",
862 i, node->edge_dest[j]);
868 * First pass goes though all instructions in the set, checks that each
869 * instruction is a valid one (correct syntax, valid field values, etc.)
870 * and constructs control flow graph (CFG).
871 * Then deapth-first search is performed over the constructed graph.
872 * Programs with unreachable instructions and/or loops will be rejected.
875 validate(struct bpf_verifier *bvf)
879 struct inst_node *node;
880 const struct ebpf_insn *ins;
884 for (i = 0; i < bvf->prm->nb_ins; i++) {
886 ins = bvf->prm->ins + i;
889 err = check_syntax(ins);
891 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
897 * construct CFG, jcc nodes have to outgoing edges,
898 * 'exit' nodes - none, all others nodes have exaclty one
902 case (BPF_JMP | EBPF_EXIT):
904 case (BPF_JMP | BPF_JEQ | BPF_K):
905 case (BPF_JMP | EBPF_JNE | BPF_K):
906 case (BPF_JMP | BPF_JGT | BPF_K):
907 case (BPF_JMP | EBPF_JLT | BPF_K):
908 case (BPF_JMP | BPF_JGE | BPF_K):
909 case (BPF_JMP | EBPF_JLE | BPF_K):
910 case (BPF_JMP | EBPF_JSGT | BPF_K):
911 case (BPF_JMP | EBPF_JSLT | BPF_K):
912 case (BPF_JMP | EBPF_JSGE | BPF_K):
913 case (BPF_JMP | EBPF_JSLE | BPF_K):
914 case (BPF_JMP | BPF_JSET | BPF_K):
915 case (BPF_JMP | BPF_JEQ | BPF_X):
916 case (BPF_JMP | EBPF_JNE | BPF_X):
917 case (BPF_JMP | BPF_JGT | BPF_X):
918 case (BPF_JMP | EBPF_JLT | BPF_X):
919 case (BPF_JMP | BPF_JGE | BPF_X):
920 case (BPF_JMP | EBPF_JLE | BPF_X):
921 case (BPF_JMP | EBPF_JSGT | BPF_X):
922 case (BPF_JMP | EBPF_JSLT | BPF_X):
923 case (BPF_JMP | EBPF_JSGE | BPF_X):
924 case (BPF_JMP | EBPF_JSLE | BPF_X):
925 case (BPF_JMP | BPF_JSET | BPF_X):
926 rc |= add_edge(bvf, node, i + ins->off + 1);
927 rc |= add_edge(bvf, node, i + 1);
930 case (BPF_JMP | BPF_JA):
931 rc |= add_edge(bvf, node, i + ins->off + 1);
933 /* load 64 bit immediate value */
934 case (BPF_LD | BPF_IMM | EBPF_DW):
935 rc |= add_edge(bvf, node, i + 2);
939 rc |= add_edge(bvf, node, i + 1);
944 bvf->node_colour[WHITE]++;
952 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
955 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
956 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
960 bvf->node_colour[WHITE], bvf->node_colour[GREY],
961 bvf->node_colour[BLACK],
962 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
963 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
965 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
966 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
968 log_unreachable(bvf);
972 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
973 bvf->edge_type[UNKNOWN_EDGE] != 0) {
974 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
979 if (bvf->edge_type[BACK_EDGE] != 0) {
980 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
990 * helper functions get/free eval states.
992 static struct bpf_eval_state *
993 pull_eval_state(struct bpf_verifier *bvf)
997 n = bvf->evst_pool.cur;
998 if (n == bvf->evst_pool.num)
1001 bvf->evst_pool.cur = n + 1;
1002 return bvf->evst_pool.ent + n;
1006 push_eval_state(struct bpf_verifier *bvf)
1008 bvf->evst_pool.cur--;
1012 evst_pool_fini(struct bpf_verifier *bvf)
1015 free(bvf->evst_pool.ent);
1016 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
1020 evst_pool_init(struct bpf_verifier *bvf)
1024 n = bvf->nb_jcc_nodes + 1;
1026 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
1027 if (bvf->evst_pool.ent == NULL)
1030 bvf->evst_pool.num = n;
1031 bvf->evst_pool.cur = 0;
1033 bvf->evst = pull_eval_state(bvf);
1038 * Save current eval state.
1041 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
1043 struct bpf_eval_state *st;
1045 /* get new eval_state for this node */
1046 st = pull_eval_state(bvf);
1049 "%s: internal error (out of space) at pc: %u",
1050 __func__, get_node_idx(bvf, node));
1054 /* make a copy of current state */
1055 memcpy(st, bvf->evst, sizeof(*st));
1057 /* swap current state with new one */
1058 node->evst = bvf->evst;
1061 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
1062 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
1068 * Restore previous eval state and mark current eval state as free.
1071 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
1073 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
1074 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
1076 bvf->evst = node->evst;
1078 push_eval_state(bvf);
1082 * Do second pass through CFG and try to evaluate instructions
1083 * via each possible path.
1084 * Right now evaluation functionality is quite limited.
1085 * Still need to add extra checks for:
1086 * - use/return uninitialized registers.
1087 * - use uninitialized data from the stack.
1088 * - memory boundaries violation.
1091 evaluate(struct bpf_verifier *bvf)
1096 const struct ebpf_insn *ins;
1097 struct inst_node *next, *node;
1100 ins = bvf->prm->ins;
1103 while (node != NULL && rc == 0) {
1105 /* current node evaluation */
1106 idx = get_node_idx(bvf, node);
1109 if (ins_chk[op].eval != NULL) {
1110 err = ins_chk[op].eval(bvf, ins + idx);
1112 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1113 __func__, err, idx);
1118 /* proceed through CFG */
1119 next = get_next_node(bvf, node);
1122 /* proceed with next child */
1123 if (node->cur_edge != node->nb_edge)
1124 rc |= save_eval_state(bvf, node);
1125 else if (node->evst != NULL)
1126 restore_eval_state(bvf, node);
1128 next->prev_node = get_node_idx(bvf, node);
1132 * finished with current node and all it's kids,
1133 * proceed with parent
1136 node = get_prev_node(bvf, node);
1139 if (node == bvf->in)
1148 bpf_validate(struct rte_bpf *bpf)
1151 struct bpf_verifier bvf;
1153 /* check input argument type, don't allow mbuf ptr on 32-bit */
1154 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
1155 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
1156 (sizeof(uint64_t) != sizeof(uintptr_t) ||
1157 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
1158 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
1162 memset(&bvf, 0, sizeof(bvf));
1163 bvf.prm = &bpf->prm;
1164 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
1168 rc = validate(&bvf);
1171 rc = evst_pool_init(&bvf);
1173 rc = evaluate(&bvf);
1174 evst_pool_fini(&bvf);
1179 /* copy collected info */
1181 bpf->stack_sz = bvf.stack_sz;