1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_debug.h>
15 #include <rte_memory.h>
17 #include <rte_byteorder.h>
21 #define BPF_JMP_UNC(ins) ((ins) += (ins)->off)
23 #define BPF_JMP_CND_REG(reg, ins, op, type) \
25 ((type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg]) ? \
28 #define BPF_JMP_CND_IMM(reg, ins, op, type) \
30 ((type)(reg)[(ins)->dst_reg] op (type)(ins)->imm) ? \
33 #define BPF_NEG_ALU(reg, ins, type) \
34 ((reg)[(ins)->dst_reg] = (type)(-(reg)[(ins)->dst_reg]))
36 #define EBPF_MOV_ALU_REG(reg, ins, type) \
37 ((reg)[(ins)->dst_reg] = (type)(reg)[(ins)->src_reg])
39 #define BPF_OP_ALU_REG(reg, ins, op, type) \
40 ((reg)[(ins)->dst_reg] = \
41 (type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg])
43 #define EBPF_MOV_ALU_IMM(reg, ins, type) \
44 ((reg)[(ins)->dst_reg] = (type)(ins)->imm)
46 #define BPF_OP_ALU_IMM(reg, ins, op, type) \
47 ((reg)[(ins)->dst_reg] = \
48 (type)(reg)[(ins)->dst_reg] op (type)(ins)->imm)
50 #define BPF_DIV_ZERO_CHECK(bpf, reg, ins, type) do { \
51 if ((type)(reg)[(ins)->src_reg] == 0) { \
53 "%s(%p): division by 0 at pc: %#zx;\n", \
55 (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins); \
60 #define BPF_LD_REG(reg, ins, type) \
61 ((reg)[(ins)->dst_reg] = \
62 *(type *)(uintptr_t)((reg)[(ins)->src_reg] + (ins)->off))
64 #define BPF_ST_IMM(reg, ins, type) \
65 (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
68 #define BPF_ST_REG(reg, ins, type) \
69 (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
70 (type)(reg)[(ins)->src_reg])
72 #define BPF_ST_XADD_REG(reg, ins, tp) \
73 (rte_atomic##tp##_add((rte_atomic##tp##_t *) \
74 (uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off), \
77 /* BPF_LD | BPF_ABS/BPF_IND */
81 #define BPF_LD_ABS(bpf, reg, ins, type, op) do { \
82 const type *p = bpf_ld_mbuf(bpf, reg, ins, (ins)->imm, sizeof(type)); \
85 reg[EBPF_REG_0] = op(p[0]); \
88 #define BPF_LD_IND(bpf, reg, ins, type, op) do { \
89 uint32_t ofs = reg[ins->src_reg] + (ins)->imm; \
90 const type *p = bpf_ld_mbuf(bpf, reg, ins, ofs, sizeof(type)); \
93 reg[EBPF_REG_0] = op(p[0]); \
98 bpf_alu_be(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
102 v = reg + ins->dst_reg;
105 *v = rte_cpu_to_be_16(*v);
108 *v = rte_cpu_to_be_32(*v);
111 *v = rte_cpu_to_be_64(*v);
117 bpf_alu_le(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
121 v = reg + ins->dst_reg;
124 *v = rte_cpu_to_le_16(*v);
127 *v = rte_cpu_to_le_32(*v);
130 *v = rte_cpu_to_le_64(*v);
135 static inline const void *
136 bpf_ld_mbuf(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM],
137 const struct ebpf_insn *ins, uint32_t off, uint32_t len)
139 const struct rte_mbuf *mb;
142 mb = (const struct rte_mbuf *)(uintptr_t)reg[EBPF_REG_6];
143 p = rte_pktmbuf_read(mb, off, len, reg + EBPF_REG_0);
145 RTE_BPF_LOG(DEBUG, "%s(bpf=%p, mbuf=%p, ofs=%u, len=%u): "
146 "load beyond packet boundary at pc: %#zx;\n",
147 __func__, bpf, mb, off, len,
148 (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins);
152 static inline uint64_t
153 bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
155 const struct ebpf_insn *ins;
157 for (ins = bpf->prm.ins; ; ins++) {
159 /* 32 bit ALU IMM operations */
160 case (BPF_ALU | BPF_ADD | BPF_K):
161 BPF_OP_ALU_IMM(reg, ins, +, uint32_t);
163 case (BPF_ALU | BPF_SUB | BPF_K):
164 BPF_OP_ALU_IMM(reg, ins, -, uint32_t);
166 case (BPF_ALU | BPF_AND | BPF_K):
167 BPF_OP_ALU_IMM(reg, ins, &, uint32_t);
169 case (BPF_ALU | BPF_OR | BPF_K):
170 BPF_OP_ALU_IMM(reg, ins, |, uint32_t);
172 case (BPF_ALU | BPF_LSH | BPF_K):
173 BPF_OP_ALU_IMM(reg, ins, <<, uint32_t);
175 case (BPF_ALU | BPF_RSH | BPF_K):
176 BPF_OP_ALU_IMM(reg, ins, >>, uint32_t);
178 case (BPF_ALU | BPF_XOR | BPF_K):
179 BPF_OP_ALU_IMM(reg, ins, ^, uint32_t);
181 case (BPF_ALU | BPF_MUL | BPF_K):
182 BPF_OP_ALU_IMM(reg, ins, *, uint32_t);
184 case (BPF_ALU | BPF_DIV | BPF_K):
185 BPF_OP_ALU_IMM(reg, ins, /, uint32_t);
187 case (BPF_ALU | BPF_MOD | BPF_K):
188 BPF_OP_ALU_IMM(reg, ins, %, uint32_t);
190 case (BPF_ALU | EBPF_MOV | BPF_K):
191 EBPF_MOV_ALU_IMM(reg, ins, uint32_t);
193 /* 32 bit ALU REG operations */
194 case (BPF_ALU | BPF_ADD | BPF_X):
195 BPF_OP_ALU_REG(reg, ins, +, uint32_t);
197 case (BPF_ALU | BPF_SUB | BPF_X):
198 BPF_OP_ALU_REG(reg, ins, -, uint32_t);
200 case (BPF_ALU | BPF_AND | BPF_X):
201 BPF_OP_ALU_REG(reg, ins, &, uint32_t);
203 case (BPF_ALU | BPF_OR | BPF_X):
204 BPF_OP_ALU_REG(reg, ins, |, uint32_t);
206 case (BPF_ALU | BPF_LSH | BPF_X):
207 BPF_OP_ALU_REG(reg, ins, <<, uint32_t);
209 case (BPF_ALU | BPF_RSH | BPF_X):
210 BPF_OP_ALU_REG(reg, ins, >>, uint32_t);
212 case (BPF_ALU | BPF_XOR | BPF_X):
213 BPF_OP_ALU_REG(reg, ins, ^, uint32_t);
215 case (BPF_ALU | BPF_MUL | BPF_X):
216 BPF_OP_ALU_REG(reg, ins, *, uint32_t);
218 case (BPF_ALU | BPF_DIV | BPF_X):
219 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
220 BPF_OP_ALU_REG(reg, ins, /, uint32_t);
222 case (BPF_ALU | BPF_MOD | BPF_X):
223 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
224 BPF_OP_ALU_REG(reg, ins, %, uint32_t);
226 case (BPF_ALU | EBPF_MOV | BPF_X):
227 EBPF_MOV_ALU_REG(reg, ins, uint32_t);
229 case (BPF_ALU | BPF_NEG):
230 BPF_NEG_ALU(reg, ins, uint32_t);
232 case (BPF_ALU | EBPF_END | EBPF_TO_BE):
233 bpf_alu_be(reg, ins);
235 case (BPF_ALU | EBPF_END | EBPF_TO_LE):
236 bpf_alu_le(reg, ins);
238 /* 64 bit ALU IMM operations */
239 case (EBPF_ALU64 | BPF_ADD | BPF_K):
240 BPF_OP_ALU_IMM(reg, ins, +, uint64_t);
242 case (EBPF_ALU64 | BPF_SUB | BPF_K):
243 BPF_OP_ALU_IMM(reg, ins, -, uint64_t);
245 case (EBPF_ALU64 | BPF_AND | BPF_K):
246 BPF_OP_ALU_IMM(reg, ins, &, uint64_t);
248 case (EBPF_ALU64 | BPF_OR | BPF_K):
249 BPF_OP_ALU_IMM(reg, ins, |, uint64_t);
251 case (EBPF_ALU64 | BPF_LSH | BPF_K):
252 BPF_OP_ALU_IMM(reg, ins, <<, uint64_t);
254 case (EBPF_ALU64 | BPF_RSH | BPF_K):
255 BPF_OP_ALU_IMM(reg, ins, >>, uint64_t);
257 case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
258 BPF_OP_ALU_IMM(reg, ins, >>, int64_t);
260 case (EBPF_ALU64 | BPF_XOR | BPF_K):
261 BPF_OP_ALU_IMM(reg, ins, ^, uint64_t);
263 case (EBPF_ALU64 | BPF_MUL | BPF_K):
264 BPF_OP_ALU_IMM(reg, ins, *, uint64_t);
266 case (EBPF_ALU64 | BPF_DIV | BPF_K):
267 BPF_OP_ALU_IMM(reg, ins, /, uint64_t);
269 case (EBPF_ALU64 | BPF_MOD | BPF_K):
270 BPF_OP_ALU_IMM(reg, ins, %, uint64_t);
272 case (EBPF_ALU64 | EBPF_MOV | BPF_K):
273 EBPF_MOV_ALU_IMM(reg, ins, uint64_t);
275 /* 64 bit ALU REG operations */
276 case (EBPF_ALU64 | BPF_ADD | BPF_X):
277 BPF_OP_ALU_REG(reg, ins, +, uint64_t);
279 case (EBPF_ALU64 | BPF_SUB | BPF_X):
280 BPF_OP_ALU_REG(reg, ins, -, uint64_t);
282 case (EBPF_ALU64 | BPF_AND | BPF_X):
283 BPF_OP_ALU_REG(reg, ins, &, uint64_t);
285 case (EBPF_ALU64 | BPF_OR | BPF_X):
286 BPF_OP_ALU_REG(reg, ins, |, uint64_t);
288 case (EBPF_ALU64 | BPF_LSH | BPF_X):
289 BPF_OP_ALU_REG(reg, ins, <<, uint64_t);
291 case (EBPF_ALU64 | BPF_RSH | BPF_X):
292 BPF_OP_ALU_REG(reg, ins, >>, uint64_t);
294 case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
295 BPF_OP_ALU_REG(reg, ins, >>, int64_t);
297 case (EBPF_ALU64 | BPF_XOR | BPF_X):
298 BPF_OP_ALU_REG(reg, ins, ^, uint64_t);
300 case (EBPF_ALU64 | BPF_MUL | BPF_X):
301 BPF_OP_ALU_REG(reg, ins, *, uint64_t);
303 case (EBPF_ALU64 | BPF_DIV | BPF_X):
304 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
305 BPF_OP_ALU_REG(reg, ins, /, uint64_t);
307 case (EBPF_ALU64 | BPF_MOD | BPF_X):
308 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
309 BPF_OP_ALU_REG(reg, ins, %, uint64_t);
311 case (EBPF_ALU64 | EBPF_MOV | BPF_X):
312 EBPF_MOV_ALU_REG(reg, ins, uint64_t);
314 case (EBPF_ALU64 | BPF_NEG):
315 BPF_NEG_ALU(reg, ins, uint64_t);
317 /* load instructions */
318 case (BPF_LDX | BPF_MEM | BPF_B):
319 BPF_LD_REG(reg, ins, uint8_t);
321 case (BPF_LDX | BPF_MEM | BPF_H):
322 BPF_LD_REG(reg, ins, uint16_t);
324 case (BPF_LDX | BPF_MEM | BPF_W):
325 BPF_LD_REG(reg, ins, uint32_t);
327 case (BPF_LDX | BPF_MEM | EBPF_DW):
328 BPF_LD_REG(reg, ins, uint64_t);
330 /* load 64 bit immediate value */
331 case (BPF_LD | BPF_IMM | EBPF_DW):
332 reg[ins->dst_reg] = (uint32_t)ins[0].imm |
333 (uint64_t)(uint32_t)ins[1].imm << 32;
336 /* load absolute instructions */
337 case (BPF_LD | BPF_ABS | BPF_B):
338 BPF_LD_ABS(bpf, reg, ins, uint8_t, NOP);
340 case (BPF_LD | BPF_ABS | BPF_H):
341 BPF_LD_ABS(bpf, reg, ins, uint16_t, rte_be_to_cpu_16);
343 case (BPF_LD | BPF_ABS | BPF_W):
344 BPF_LD_ABS(bpf, reg, ins, uint32_t, rte_be_to_cpu_32);
346 /* load indirect instructions */
347 case (BPF_LD | BPF_IND | BPF_B):
348 BPF_LD_IND(bpf, reg, ins, uint8_t, NOP);
350 case (BPF_LD | BPF_IND | BPF_H):
351 BPF_LD_IND(bpf, reg, ins, uint16_t, rte_be_to_cpu_16);
353 case (BPF_LD | BPF_IND | BPF_W):
354 BPF_LD_IND(bpf, reg, ins, uint32_t, rte_be_to_cpu_32);
356 /* store instructions */
357 case (BPF_STX | BPF_MEM | BPF_B):
358 BPF_ST_REG(reg, ins, uint8_t);
360 case (BPF_STX | BPF_MEM | BPF_H):
361 BPF_ST_REG(reg, ins, uint16_t);
363 case (BPF_STX | BPF_MEM | BPF_W):
364 BPF_ST_REG(reg, ins, uint32_t);
366 case (BPF_STX | BPF_MEM | EBPF_DW):
367 BPF_ST_REG(reg, ins, uint64_t);
369 case (BPF_ST | BPF_MEM | BPF_B):
370 BPF_ST_IMM(reg, ins, uint8_t);
372 case (BPF_ST | BPF_MEM | BPF_H):
373 BPF_ST_IMM(reg, ins, uint16_t);
375 case (BPF_ST | BPF_MEM | BPF_W):
376 BPF_ST_IMM(reg, ins, uint32_t);
378 case (BPF_ST | BPF_MEM | EBPF_DW):
379 BPF_ST_IMM(reg, ins, uint64_t);
381 /* atomic add instructions */
382 case (BPF_STX | EBPF_XADD | BPF_W):
383 BPF_ST_XADD_REG(reg, ins, 32);
385 case (BPF_STX | EBPF_XADD | EBPF_DW):
386 BPF_ST_XADD_REG(reg, ins, 64);
388 /* jump instructions */
389 case (BPF_JMP | BPF_JA):
392 /* jump IMM instructions */
393 case (BPF_JMP | BPF_JEQ | BPF_K):
394 BPF_JMP_CND_IMM(reg, ins, ==, uint64_t);
396 case (BPF_JMP | EBPF_JNE | BPF_K):
397 BPF_JMP_CND_IMM(reg, ins, !=, uint64_t);
399 case (BPF_JMP | BPF_JGT | BPF_K):
400 BPF_JMP_CND_IMM(reg, ins, >, uint64_t);
402 case (BPF_JMP | EBPF_JLT | BPF_K):
403 BPF_JMP_CND_IMM(reg, ins, <, uint64_t);
405 case (BPF_JMP | BPF_JGE | BPF_K):
406 BPF_JMP_CND_IMM(reg, ins, >=, uint64_t);
408 case (BPF_JMP | EBPF_JLE | BPF_K):
409 BPF_JMP_CND_IMM(reg, ins, <=, uint64_t);
411 case (BPF_JMP | EBPF_JSGT | BPF_K):
412 BPF_JMP_CND_IMM(reg, ins, >, int64_t);
414 case (BPF_JMP | EBPF_JSLT | BPF_K):
415 BPF_JMP_CND_IMM(reg, ins, <, int64_t);
417 case (BPF_JMP | EBPF_JSGE | BPF_K):
418 BPF_JMP_CND_IMM(reg, ins, >=, int64_t);
420 case (BPF_JMP | EBPF_JSLE | BPF_K):
421 BPF_JMP_CND_IMM(reg, ins, <=, int64_t);
423 case (BPF_JMP | BPF_JSET | BPF_K):
424 BPF_JMP_CND_IMM(reg, ins, &, uint64_t);
426 /* jump REG instructions */
427 case (BPF_JMP | BPF_JEQ | BPF_X):
428 BPF_JMP_CND_REG(reg, ins, ==, uint64_t);
430 case (BPF_JMP | EBPF_JNE | BPF_X):
431 BPF_JMP_CND_REG(reg, ins, !=, uint64_t);
433 case (BPF_JMP | BPF_JGT | BPF_X):
434 BPF_JMP_CND_REG(reg, ins, >, uint64_t);
436 case (BPF_JMP | EBPF_JLT | BPF_X):
437 BPF_JMP_CND_REG(reg, ins, <, uint64_t);
439 case (BPF_JMP | BPF_JGE | BPF_X):
440 BPF_JMP_CND_REG(reg, ins, >=, uint64_t);
442 case (BPF_JMP | EBPF_JLE | BPF_X):
443 BPF_JMP_CND_REG(reg, ins, <=, uint64_t);
445 case (BPF_JMP | EBPF_JSGT | BPF_X):
446 BPF_JMP_CND_REG(reg, ins, >, int64_t);
448 case (BPF_JMP | EBPF_JSLT | BPF_X):
449 BPF_JMP_CND_REG(reg, ins, <, int64_t);
451 case (BPF_JMP | EBPF_JSGE | BPF_X):
452 BPF_JMP_CND_REG(reg, ins, >=, int64_t);
454 case (BPF_JMP | EBPF_JSLE | BPF_X):
455 BPF_JMP_CND_REG(reg, ins, <=, int64_t);
457 case (BPF_JMP | BPF_JSET | BPF_X):
458 BPF_JMP_CND_REG(reg, ins, &, uint64_t);
460 /* call instructions */
461 case (BPF_JMP | EBPF_CALL):
462 reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func.val(
463 reg[EBPF_REG_1], reg[EBPF_REG_2],
464 reg[EBPF_REG_3], reg[EBPF_REG_4],
467 /* return instruction */
468 case (BPF_JMP | EBPF_EXIT):
469 return reg[EBPF_REG_0];
472 "%s(%p): invalid opcode %#x at pc: %#zx;\n",
473 __func__, bpf, ins->code,
474 (uintptr_t)ins - (uintptr_t)bpf->prm.ins);
479 /* should never be reached */
485 rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
489 uint64_t reg[EBPF_REG_NUM];
490 uint64_t stack[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
492 for (i = 0; i != num; i++) {
494 reg[EBPF_REG_1] = (uintptr_t)ctx[i];
495 reg[EBPF_REG_10] = (uintptr_t)(stack + RTE_DIM(stack));
497 rc[i] = bpf_exec(bpf, reg);
504 rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
508 rte_bpf_exec_burst(bpf, &ctx, &rc, 1);