1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_debug.h>
15 #include <rte_memory.h>
17 #include <rte_byteorder.h>
21 #define BPF_JMP_UNC(ins) ((ins) += (ins)->off)
23 #define BPF_JMP_CND_REG(reg, ins, op, type) \
25 ((type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg]) ? \
28 #define BPF_JMP_CND_IMM(reg, ins, op, type) \
30 ((type)(reg)[(ins)->dst_reg] op (type)(ins)->imm) ? \
33 #define BPF_NEG_ALU(reg, ins, type) \
34 ((reg)[(ins)->dst_reg] = (type)(-(reg)[(ins)->dst_reg]))
36 #define EBPF_MOV_ALU_REG(reg, ins, type) \
37 ((reg)[(ins)->dst_reg] = (type)(reg)[(ins)->src_reg])
39 #define BPF_OP_ALU_REG(reg, ins, op, type) \
40 ((reg)[(ins)->dst_reg] = \
41 (type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg])
43 #define EBPF_MOV_ALU_IMM(reg, ins, type) \
44 ((reg)[(ins)->dst_reg] = (type)(ins)->imm)
46 #define BPF_OP_ALU_IMM(reg, ins, op, type) \
47 ((reg)[(ins)->dst_reg] = \
48 (type)(reg)[(ins)->dst_reg] op (type)(ins)->imm)
50 #define BPF_DIV_ZERO_CHECK(bpf, reg, ins, type) do { \
51 if ((type)(reg)[(ins)->src_reg] == 0) { \
53 "%s(%p): division by 0 at pc: %#zx;\n", \
55 (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins); \
60 #define BPF_LD_REG(reg, ins, type) \
61 ((reg)[(ins)->dst_reg] = \
62 *(type *)(uintptr_t)((reg)[(ins)->src_reg] + (ins)->off))
64 #define BPF_ST_IMM(reg, ins, type) \
65 (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
68 #define BPF_ST_REG(reg, ins, type) \
69 (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
70 (type)(reg)[(ins)->src_reg])
72 #define BPF_ST_XADD_REG(reg, ins, tp) \
73 (rte_atomic##tp##_add((rte_atomic##tp##_t *) \
74 (uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off), \
78 bpf_alu_be(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
82 v = reg + ins->dst_reg;
85 *v = rte_cpu_to_be_16(*v);
88 *v = rte_cpu_to_be_32(*v);
91 *v = rte_cpu_to_be_64(*v);
97 bpf_alu_le(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
101 v = reg + ins->dst_reg;
104 *v = rte_cpu_to_le_16(*v);
107 *v = rte_cpu_to_le_32(*v);
110 *v = rte_cpu_to_le_64(*v);
115 static inline uint64_t
116 bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
118 const struct ebpf_insn *ins;
120 for (ins = bpf->prm.ins; ; ins++) {
122 /* 32 bit ALU IMM operations */
123 case (BPF_ALU | BPF_ADD | BPF_K):
124 BPF_OP_ALU_IMM(reg, ins, +, uint32_t);
126 case (BPF_ALU | BPF_SUB | BPF_K):
127 BPF_OP_ALU_IMM(reg, ins, -, uint32_t);
129 case (BPF_ALU | BPF_AND | BPF_K):
130 BPF_OP_ALU_IMM(reg, ins, &, uint32_t);
132 case (BPF_ALU | BPF_OR | BPF_K):
133 BPF_OP_ALU_IMM(reg, ins, |, uint32_t);
135 case (BPF_ALU | BPF_LSH | BPF_K):
136 BPF_OP_ALU_IMM(reg, ins, <<, uint32_t);
138 case (BPF_ALU | BPF_RSH | BPF_K):
139 BPF_OP_ALU_IMM(reg, ins, >>, uint32_t);
141 case (BPF_ALU | BPF_XOR | BPF_K):
142 BPF_OP_ALU_IMM(reg, ins, ^, uint32_t);
144 case (BPF_ALU | BPF_MUL | BPF_K):
145 BPF_OP_ALU_IMM(reg, ins, *, uint32_t);
147 case (BPF_ALU | BPF_DIV | BPF_K):
148 BPF_OP_ALU_IMM(reg, ins, /, uint32_t);
150 case (BPF_ALU | BPF_MOD | BPF_K):
151 BPF_OP_ALU_IMM(reg, ins, %, uint32_t);
153 case (BPF_ALU | EBPF_MOV | BPF_K):
154 EBPF_MOV_ALU_IMM(reg, ins, uint32_t);
156 /* 32 bit ALU REG operations */
157 case (BPF_ALU | BPF_ADD | BPF_X):
158 BPF_OP_ALU_REG(reg, ins, +, uint32_t);
160 case (BPF_ALU | BPF_SUB | BPF_X):
161 BPF_OP_ALU_REG(reg, ins, -, uint32_t);
163 case (BPF_ALU | BPF_AND | BPF_X):
164 BPF_OP_ALU_REG(reg, ins, &, uint32_t);
166 case (BPF_ALU | BPF_OR | BPF_X):
167 BPF_OP_ALU_REG(reg, ins, |, uint32_t);
169 case (BPF_ALU | BPF_LSH | BPF_X):
170 BPF_OP_ALU_REG(reg, ins, <<, uint32_t);
172 case (BPF_ALU | BPF_RSH | BPF_X):
173 BPF_OP_ALU_REG(reg, ins, >>, uint32_t);
175 case (BPF_ALU | BPF_XOR | BPF_X):
176 BPF_OP_ALU_REG(reg, ins, ^, uint32_t);
178 case (BPF_ALU | BPF_MUL | BPF_X):
179 BPF_OP_ALU_REG(reg, ins, *, uint32_t);
181 case (BPF_ALU | BPF_DIV | BPF_X):
182 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
183 BPF_OP_ALU_REG(reg, ins, /, uint32_t);
185 case (BPF_ALU | BPF_MOD | BPF_X):
186 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
187 BPF_OP_ALU_REG(reg, ins, %, uint32_t);
189 case (BPF_ALU | EBPF_MOV | BPF_X):
190 EBPF_MOV_ALU_REG(reg, ins, uint32_t);
192 case (BPF_ALU | BPF_NEG):
193 BPF_NEG_ALU(reg, ins, uint32_t);
195 case (BPF_ALU | EBPF_END | EBPF_TO_BE):
196 bpf_alu_be(reg, ins);
198 case (BPF_ALU | EBPF_END | EBPF_TO_LE):
199 bpf_alu_le(reg, ins);
201 /* 64 bit ALU IMM operations */
202 case (EBPF_ALU64 | BPF_ADD | BPF_K):
203 BPF_OP_ALU_IMM(reg, ins, +, uint64_t);
205 case (EBPF_ALU64 | BPF_SUB | BPF_K):
206 BPF_OP_ALU_IMM(reg, ins, -, uint64_t);
208 case (EBPF_ALU64 | BPF_AND | BPF_K):
209 BPF_OP_ALU_IMM(reg, ins, &, uint64_t);
211 case (EBPF_ALU64 | BPF_OR | BPF_K):
212 BPF_OP_ALU_IMM(reg, ins, |, uint64_t);
214 case (EBPF_ALU64 | BPF_LSH | BPF_K):
215 BPF_OP_ALU_IMM(reg, ins, <<, uint64_t);
217 case (EBPF_ALU64 | BPF_RSH | BPF_K):
218 BPF_OP_ALU_IMM(reg, ins, >>, uint64_t);
220 case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
221 BPF_OP_ALU_IMM(reg, ins, >>, int64_t);
223 case (EBPF_ALU64 | BPF_XOR | BPF_K):
224 BPF_OP_ALU_IMM(reg, ins, ^, uint64_t);
226 case (EBPF_ALU64 | BPF_MUL | BPF_K):
227 BPF_OP_ALU_IMM(reg, ins, *, uint64_t);
229 case (EBPF_ALU64 | BPF_DIV | BPF_K):
230 BPF_OP_ALU_IMM(reg, ins, /, uint64_t);
232 case (EBPF_ALU64 | BPF_MOD | BPF_K):
233 BPF_OP_ALU_IMM(reg, ins, %, uint64_t);
235 case (EBPF_ALU64 | EBPF_MOV | BPF_K):
236 EBPF_MOV_ALU_IMM(reg, ins, uint64_t);
238 /* 64 bit ALU REG operations */
239 case (EBPF_ALU64 | BPF_ADD | BPF_X):
240 BPF_OP_ALU_REG(reg, ins, +, uint64_t);
242 case (EBPF_ALU64 | BPF_SUB | BPF_X):
243 BPF_OP_ALU_REG(reg, ins, -, uint64_t);
245 case (EBPF_ALU64 | BPF_AND | BPF_X):
246 BPF_OP_ALU_REG(reg, ins, &, uint64_t);
248 case (EBPF_ALU64 | BPF_OR | BPF_X):
249 BPF_OP_ALU_REG(reg, ins, |, uint64_t);
251 case (EBPF_ALU64 | BPF_LSH | BPF_X):
252 BPF_OP_ALU_REG(reg, ins, <<, uint64_t);
254 case (EBPF_ALU64 | BPF_RSH | BPF_X):
255 BPF_OP_ALU_REG(reg, ins, >>, uint64_t);
257 case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
258 BPF_OP_ALU_REG(reg, ins, >>, int64_t);
260 case (EBPF_ALU64 | BPF_XOR | BPF_X):
261 BPF_OP_ALU_REG(reg, ins, ^, uint64_t);
263 case (EBPF_ALU64 | BPF_MUL | BPF_X):
264 BPF_OP_ALU_REG(reg, ins, *, uint64_t);
266 case (EBPF_ALU64 | BPF_DIV | BPF_X):
267 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
268 BPF_OP_ALU_REG(reg, ins, /, uint64_t);
270 case (EBPF_ALU64 | BPF_MOD | BPF_X):
271 BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
272 BPF_OP_ALU_REG(reg, ins, %, uint64_t);
274 case (EBPF_ALU64 | EBPF_MOV | BPF_X):
275 EBPF_MOV_ALU_REG(reg, ins, uint64_t);
277 case (EBPF_ALU64 | BPF_NEG):
278 BPF_NEG_ALU(reg, ins, uint64_t);
280 /* load instructions */
281 case (BPF_LDX | BPF_MEM | BPF_B):
282 BPF_LD_REG(reg, ins, uint8_t);
284 case (BPF_LDX | BPF_MEM | BPF_H):
285 BPF_LD_REG(reg, ins, uint16_t);
287 case (BPF_LDX | BPF_MEM | BPF_W):
288 BPF_LD_REG(reg, ins, uint32_t);
290 case (BPF_LDX | BPF_MEM | EBPF_DW):
291 BPF_LD_REG(reg, ins, uint64_t);
293 /* load 64 bit immediate value */
294 case (BPF_LD | BPF_IMM | EBPF_DW):
295 reg[ins->dst_reg] = (uint32_t)ins[0].imm |
296 (uint64_t)(uint32_t)ins[1].imm << 32;
299 /* store instructions */
300 case (BPF_STX | BPF_MEM | BPF_B):
301 BPF_ST_REG(reg, ins, uint8_t);
303 case (BPF_STX | BPF_MEM | BPF_H):
304 BPF_ST_REG(reg, ins, uint16_t);
306 case (BPF_STX | BPF_MEM | BPF_W):
307 BPF_ST_REG(reg, ins, uint32_t);
309 case (BPF_STX | BPF_MEM | EBPF_DW):
310 BPF_ST_REG(reg, ins, uint64_t);
312 case (BPF_ST | BPF_MEM | BPF_B):
313 BPF_ST_IMM(reg, ins, uint8_t);
315 case (BPF_ST | BPF_MEM | BPF_H):
316 BPF_ST_IMM(reg, ins, uint16_t);
318 case (BPF_ST | BPF_MEM | BPF_W):
319 BPF_ST_IMM(reg, ins, uint32_t);
321 case (BPF_ST | BPF_MEM | EBPF_DW):
322 BPF_ST_IMM(reg, ins, uint64_t);
324 /* atomic add instructions */
325 case (BPF_STX | EBPF_XADD | BPF_W):
326 BPF_ST_XADD_REG(reg, ins, 32);
328 case (BPF_STX | EBPF_XADD | EBPF_DW):
329 BPF_ST_XADD_REG(reg, ins, 64);
331 /* jump instructions */
332 case (BPF_JMP | BPF_JA):
335 /* jump IMM instructions */
336 case (BPF_JMP | BPF_JEQ | BPF_K):
337 BPF_JMP_CND_IMM(reg, ins, ==, uint64_t);
339 case (BPF_JMP | EBPF_JNE | BPF_K):
340 BPF_JMP_CND_IMM(reg, ins, !=, uint64_t);
342 case (BPF_JMP | BPF_JGT | BPF_K):
343 BPF_JMP_CND_IMM(reg, ins, >, uint64_t);
345 case (BPF_JMP | EBPF_JLT | BPF_K):
346 BPF_JMP_CND_IMM(reg, ins, <, uint64_t);
348 case (BPF_JMP | BPF_JGE | BPF_K):
349 BPF_JMP_CND_IMM(reg, ins, >=, uint64_t);
351 case (BPF_JMP | EBPF_JLE | BPF_K):
352 BPF_JMP_CND_IMM(reg, ins, <=, uint64_t);
354 case (BPF_JMP | EBPF_JSGT | BPF_K):
355 BPF_JMP_CND_IMM(reg, ins, >, int64_t);
357 case (BPF_JMP | EBPF_JSLT | BPF_K):
358 BPF_JMP_CND_IMM(reg, ins, <, int64_t);
360 case (BPF_JMP | EBPF_JSGE | BPF_K):
361 BPF_JMP_CND_IMM(reg, ins, >=, int64_t);
363 case (BPF_JMP | EBPF_JSLE | BPF_K):
364 BPF_JMP_CND_IMM(reg, ins, <=, int64_t);
366 case (BPF_JMP | BPF_JSET | BPF_K):
367 BPF_JMP_CND_IMM(reg, ins, &, uint64_t);
369 /* jump REG instructions */
370 case (BPF_JMP | BPF_JEQ | BPF_X):
371 BPF_JMP_CND_REG(reg, ins, ==, uint64_t);
373 case (BPF_JMP | EBPF_JNE | BPF_X):
374 BPF_JMP_CND_REG(reg, ins, !=, uint64_t);
376 case (BPF_JMP | BPF_JGT | BPF_X):
377 BPF_JMP_CND_REG(reg, ins, >, uint64_t);
379 case (BPF_JMP | EBPF_JLT | BPF_X):
380 BPF_JMP_CND_REG(reg, ins, <, uint64_t);
382 case (BPF_JMP | BPF_JGE | BPF_X):
383 BPF_JMP_CND_REG(reg, ins, >=, uint64_t);
385 case (BPF_JMP | EBPF_JLE | BPF_X):
386 BPF_JMP_CND_REG(reg, ins, <=, uint64_t);
388 case (BPF_JMP | EBPF_JSGT | BPF_X):
389 BPF_JMP_CND_REG(reg, ins, >, int64_t);
391 case (BPF_JMP | EBPF_JSLT | BPF_X):
392 BPF_JMP_CND_REG(reg, ins, <, int64_t);
394 case (BPF_JMP | EBPF_JSGE | BPF_X):
395 BPF_JMP_CND_REG(reg, ins, >=, int64_t);
397 case (BPF_JMP | EBPF_JSLE | BPF_X):
398 BPF_JMP_CND_REG(reg, ins, <=, int64_t);
400 case (BPF_JMP | BPF_JSET | BPF_X):
401 BPF_JMP_CND_REG(reg, ins, &, uint64_t);
403 /* call instructions */
404 case (BPF_JMP | EBPF_CALL):
405 reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func(
406 reg[EBPF_REG_1], reg[EBPF_REG_2],
407 reg[EBPF_REG_3], reg[EBPF_REG_4],
410 /* return instruction */
411 case (BPF_JMP | EBPF_EXIT):
412 return reg[EBPF_REG_0];
415 "%s(%p): invalid opcode %#x at pc: %#zx;\n",
416 __func__, bpf, ins->code,
417 (uintptr_t)ins - (uintptr_t)bpf->prm.ins);
422 /* should never be reached */
427 __rte_experimental uint32_t
428 rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
432 uint64_t reg[EBPF_REG_NUM];
433 uint64_t stack[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
435 for (i = 0; i != num; i++) {
437 reg[EBPF_REG_1] = (uintptr_t)ctx[i];
438 reg[EBPF_REG_10] = (uintptr_t)(stack + RTE_DIM(stack));
440 rc[i] = bpf_exec(bpf, reg);
446 __rte_experimental uint64_t
447 rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
451 rte_bpf_exec_burst(bpf, &ctx, &rc, 1);