1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_common.h>
12 #include <rte_debug.h>
13 #include <rte_memory.h>
15 #include <rte_byteorder.h>
19 #define GET_BPF_OP(op) (BPF_OP(op) >> 4)
22 RAX = 0, /* scratch, return value */
23 RCX = 1, /* scratch, 4th arg */
24 RDX = 2, /* scratch, 3rd arg */
25 RBX = 3, /* callee saved */
26 RSP = 4, /* stack pointer */
27 RBP = 5, /* frame pointer, callee saved */
28 RSI = 6, /* scratch, 2nd arg */
29 RDI = 7, /* scratch, 1st arg */
30 R8 = 8, /* scratch, 5th arg */
31 R9 = 9, /* scratch, 6th arg */
32 R10 = 10, /* scratch */
33 R11 = 11, /* scratch */
34 R12 = 12, /* callee saved */
35 R13 = 13, /* callee saved */
36 R14 = 14, /* callee saved */
37 R15 = 15, /* callee saved */
40 #define IS_EXT_REG(r) ((r) >= R8)
43 REX_PREFIX = 0x40, /* fixed value 0100 */
44 REX_W = 0x8, /* 64bit operand size */
45 REX_R = 0x4, /* extension of the ModRM.reg field */
46 REX_X = 0x2, /* extension of the SIB.index field */
47 REX_B = 0x1, /* extension of the ModRM.rm field */
65 * eBPF to x86_64 register mappings.
67 static const uint32_t ebpf2x86[] = {
82 * r10 and r11 are used as a scratch temporary registers.
91 * callee saved registers list.
92 * keep RBP as the last one.
94 static const uint32_t save_regs[] = {RBX, R12, R13, R14, R15, RBP};
96 struct bpf_jit_state {
108 #define INUSE(v, r) (((v) >> (r)) & 1)
109 #define USED(v, r) ((v) |= 1 << (r))
117 * In many cases for imm8 we can produce shorter code.
123 return sizeof(int8_t);
124 return sizeof(int32_t);
128 emit_bytes(struct bpf_jit_state *st, const uint8_t ins[], uint32_t sz)
132 if (st->ins != NULL) {
133 for (i = 0; i != sz; i++)
134 st->ins[st->sz + i] = ins[i];
140 emit_imm(struct bpf_jit_state *st, const uint32_t imm, uint32_t sz)
145 emit_bytes(st, v.u8, sz);
152 emit_rex(struct bpf_jit_state *st, uint32_t op, uint32_t reg, uint32_t rm)
156 /* mark operand registers as used*/
157 USED(st->reguse, reg);
158 USED(st->reguse, rm);
161 if (BPF_CLASS(op) == EBPF_ALU64 ||
162 op == (BPF_ST | BPF_MEM | EBPF_DW) ||
163 op == (BPF_STX | BPF_MEM | EBPF_DW) ||
164 op == (BPF_STX | EBPF_XADD | EBPF_DW) ||
165 op == (BPF_LD | BPF_IMM | EBPF_DW) ||
166 (BPF_CLASS(op) == BPF_LDX &&
167 BPF_MODE(op) == BPF_MEM &&
168 BPF_SIZE(op) != BPF_W))
177 /* store using SIL, DIL */
178 if (op == (BPF_STX | BPF_MEM | BPF_B) && (reg == RDI || reg == RSI))
183 emit_bytes(st, &rex, sizeof(rex));
191 emit_modregrm(struct bpf_jit_state *st, uint32_t mod, uint32_t reg, uint32_t rm)
195 v = mod << 6 | (reg & 7) << 3 | (rm & 7);
196 emit_bytes(st, &v, sizeof(v));
203 emit_sib(struct bpf_jit_state *st, uint32_t scale, uint32_t idx, uint32_t base)
207 v = scale << 6 | (idx & 7) << 3 | (base & 7);
208 emit_bytes(st, &v, sizeof(v));
212 * emit OPCODE+REGIDX byte
215 emit_opcode(struct bpf_jit_state *st, uint8_t ops, uint32_t reg)
220 emit_bytes(st, &v, sizeof(v));
225 * emit xchg %<sreg>, %<dreg>
228 emit_xchg_reg(struct bpf_jit_state *st, uint32_t sreg, uint32_t dreg)
230 const uint8_t ops = 0x87;
232 emit_rex(st, EBPF_ALU64, sreg, dreg);
233 emit_bytes(st, &ops, sizeof(ops));
234 emit_modregrm(st, MOD_DIRECT, sreg, dreg);
241 emit_neg(struct bpf_jit_state *st, uint32_t op, uint32_t dreg)
243 const uint8_t ops = 0xF7;
244 const uint8_t mods = 3;
246 emit_rex(st, op, 0, dreg);
247 emit_bytes(st, &ops, sizeof(ops));
248 emit_modregrm(st, MOD_DIRECT, mods, dreg);
252 * emit mov %<sreg>, %<dreg>
255 emit_mov_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
258 const uint8_t ops = 0x89;
260 /* if operands are 32-bit, then it can be used to clear upper 32-bit */
261 if (sreg != dreg || BPF_CLASS(op) == BPF_ALU) {
262 emit_rex(st, op, sreg, dreg);
263 emit_bytes(st, &ops, sizeof(ops));
264 emit_modregrm(st, MOD_DIRECT, sreg, dreg);
269 * emit movzwl %<sreg>, %<dreg>
272 emit_movzwl(struct bpf_jit_state *st, uint32_t sreg, uint32_t dreg)
274 static const uint8_t ops[] = {0x0F, 0xB7};
276 emit_rex(st, BPF_ALU, sreg, dreg);
277 emit_bytes(st, ops, sizeof(ops));
278 emit_modregrm(st, MOD_DIRECT, sreg, dreg);
282 * emit ror <imm8>, %<dreg>
285 emit_ror_imm(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
287 const uint8_t prfx = 0x66;
288 const uint8_t ops = 0xC1;
289 const uint8_t mods = 1;
291 emit_bytes(st, &prfx, sizeof(prfx));
292 emit_rex(st, BPF_ALU, 0, dreg);
293 emit_bytes(st, &ops, sizeof(ops));
294 emit_modregrm(st, MOD_DIRECT, mods, dreg);
295 emit_imm(st, imm, imm_size(imm));
302 emit_be2le_48(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
306 const uint8_t ops = 0x0F;
307 const uint8_t mods = 1;
309 rop = (imm == 64) ? EBPF_ALU64 : BPF_ALU;
310 emit_rex(st, rop, 0, dreg);
311 emit_bytes(st, &ops, sizeof(ops));
312 emit_modregrm(st, MOD_DIRECT, mods, dreg);
316 emit_be2le(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
319 emit_ror_imm(st, dreg, 8);
320 emit_movzwl(st, dreg, dreg);
322 emit_be2le_48(st, dreg, imm);
326 * In general it is NOP for x86.
327 * Just clear the upper bits.
330 emit_le2be(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
333 emit_movzwl(st, dreg, dreg);
335 emit_mov_reg(st, BPF_ALU | EBPF_MOV | BPF_X, dreg, dreg);
347 emit_alu_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
352 const uint8_t op8 = 0x83;
353 const uint8_t op32 = 0x81;
354 static const uint8_t mods[] = {
355 [GET_BPF_OP(BPF_ADD)] = 0,
356 [GET_BPF_OP(BPF_AND)] = 4,
357 [GET_BPF_OP(BPF_OR)] = 1,
358 [GET_BPF_OP(BPF_SUB)] = 5,
359 [GET_BPF_OP(BPF_XOR)] = 6,
362 bop = GET_BPF_OP(op);
365 imsz = imm_size(imm);
366 opcode = (imsz == 1) ? op8 : op32;
368 emit_rex(st, op, 0, dreg);
369 emit_bytes(st, &opcode, sizeof(opcode));
370 emit_modregrm(st, MOD_DIRECT, mod, dreg);
371 emit_imm(st, imm, imsz);
376 * add %<sreg>, %<dreg>
377 * and %<sreg>, %<dreg>
378 * or %<sreg>, %<dreg>
379 * sub %<sreg>, %<dreg>
380 * xor %<sreg>, %<dreg>
383 emit_alu_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
388 static const uint8_t ops[] = {
389 [GET_BPF_OP(BPF_ADD)] = 0x01,
390 [GET_BPF_OP(BPF_AND)] = 0x21,
391 [GET_BPF_OP(BPF_OR)] = 0x09,
392 [GET_BPF_OP(BPF_SUB)] = 0x29,
393 [GET_BPF_OP(BPF_XOR)] = 0x31,
396 bop = GET_BPF_OP(op);
398 emit_rex(st, op, sreg, dreg);
399 emit_bytes(st, &ops[bop], sizeof(ops[bop]));
400 emit_modregrm(st, MOD_DIRECT, sreg, dreg);
404 emit_shift(struct bpf_jit_state *st, uint32_t op, uint32_t dreg)
409 static const uint8_t ops[] = {0xC1, 0xD3};
410 static const uint8_t mods[] = {
411 [GET_BPF_OP(BPF_LSH)] = 4,
412 [GET_BPF_OP(BPF_RSH)] = 5,
413 [GET_BPF_OP(EBPF_ARSH)] = 7,
416 bop = GET_BPF_OP(op);
418 opx = (BPF_SRC(op) == BPF_X);
420 emit_rex(st, op, 0, dreg);
421 emit_bytes(st, &ops[opx], sizeof(ops[opx]));
422 emit_modregrm(st, MOD_DIRECT, mod, dreg);
432 emit_shift_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg,
435 emit_shift(st, op, dreg);
436 emit_imm(st, imm, imm_size(imm));
444 * note that rcx is implicitly used as a source register, so few extra
445 * instructions for register spillage might be necessary.
448 emit_shift_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
452 emit_xchg_reg(st, RCX, sreg);
454 emit_shift(st, op, (dreg == RCX) ? sreg : dreg);
457 emit_xchg_reg(st, RCX, sreg);
461 * emit mov <imm>, %<dreg>
464 emit_mov_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
466 const uint8_t ops = 0xC7;
469 /* replace 'mov 0, %<dst>' with 'xor %<dst>, %<dst>' */
470 op = BPF_CLASS(op) | BPF_XOR | BPF_X;
471 emit_alu_reg(st, op, dreg, dreg);
475 emit_rex(st, op, 0, dreg);
476 emit_bytes(st, &ops, sizeof(ops));
477 emit_modregrm(st, MOD_DIRECT, 0, dreg);
478 emit_imm(st, imm, sizeof(imm));
482 * emit mov <imm64>, %<dreg>
485 emit_ld_imm64(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm0,
490 const uint8_t ops = 0xB8;
492 op = (imm1 == 0) ? BPF_ALU : EBPF_ALU64;
494 emit_rex(st, op, 0, dreg);
495 emit_opcode(st, ops, dreg);
497 emit_imm(st, imm0, sizeof(imm0));
499 emit_imm(st, imm1, sizeof(imm1));
503 * note that rax:rdx are implicitly used as source/destination registers,
504 * so some reg spillage is necessary.
519 emit_mul(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
522 const uint8_t ops = 0xF7;
523 const uint8_t mods = 4;
526 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, REG_TMP0);
527 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, REG_TMP1);
530 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, dreg, RAX);
532 if (BPF_SRC(op) == BPF_X)
534 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X,
535 sreg == RAX ? REG_TMP0 : sreg, RDX);
538 emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, RDX, imm);
540 emit_rex(st, op, RAX, RDX);
541 emit_bytes(st, &ops, sizeof(ops));
542 emit_modregrm(st, MOD_DIRECT, mods, RDX);
546 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP1, RDX);
550 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, dreg);
552 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP0, RAX);
557 * emit mov <ofs>(%<sreg>), %<dreg>
558 * note that for non 64-bit ops, higher bits have to be cleared.
561 emit_ld_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
565 const uint8_t op32 = 0x8B;
566 const uint8_t op16[] = {0x0F, 0xB7};
567 const uint8_t op8[] = {0x0F, 0xB6};
569 emit_rex(st, op, dreg, sreg);
573 emit_bytes(st, op8, sizeof(op8));
574 else if (opsz == BPF_H)
575 emit_bytes(st, op16, sizeof(op16));
577 emit_bytes(st, &op32, sizeof(op32));
579 mods = (imm_size(ofs) == 1) ? MOD_IDISP8 : MOD_IDISP32;
581 emit_modregrm(st, mods, dreg, sreg);
582 if (sreg == RSP || sreg == R12)
583 emit_sib(st, SIB_SCALE_1, sreg, sreg);
584 emit_imm(st, ofs, imm_size(ofs));
589 * mov %<sreg>, <ofs>(%<dreg>)
590 * mov <imm>, <ofs>(%<dreg>)
593 emit_st_common(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
594 uint32_t dreg, uint32_t imm, int32_t ofs)
596 uint32_t mods, imsz, opsz, opx;
597 const uint8_t prfx16 = 0x66;
599 /* 8 bit instruction opcodes */
600 static const uint8_t op8[] = {0xC6, 0x88};
602 /* 16/32/64 bit instruction opcodes */
603 static const uint8_t ops[] = {0xC7, 0x89};
605 /* is the instruction has immediate value or src reg? */
606 opx = (BPF_CLASS(op) == BPF_STX);
610 emit_bytes(st, &prfx16, sizeof(prfx16));
612 emit_rex(st, op, sreg, dreg);
615 emit_bytes(st, &op8[opx], sizeof(op8[opx]));
617 emit_bytes(st, &ops[opx], sizeof(ops[opx]));
619 imsz = imm_size(ofs);
620 mods = (imsz == 1) ? MOD_IDISP8 : MOD_IDISP32;
622 emit_modregrm(st, mods, sreg, dreg);
624 if (dreg == RSP || dreg == R12)
625 emit_sib(st, SIB_SCALE_1, dreg, dreg);
627 emit_imm(st, ofs, imsz);
630 imsz = RTE_MIN(bpf_size(opsz), sizeof(imm));
631 emit_imm(st, imm, imsz);
636 emit_st_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm,
639 emit_st_common(st, op, 0, dreg, imm, ofs);
643 emit_st_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
646 emit_st_common(st, op, sreg, dreg, 0, ofs);
650 * emit lock add %<sreg>, <ofs>(%<dreg>)
653 emit_st_xadd(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
654 uint32_t dreg, int32_t ofs)
658 const uint8_t lck = 0xF0; /* lock prefix */
659 const uint8_t ops = 0x01; /* add opcode */
661 imsz = imm_size(ofs);
662 mods = (imsz == 1) ? MOD_IDISP8 : MOD_IDISP32;
664 emit_bytes(st, &lck, sizeof(lck));
665 emit_rex(st, op, sreg, dreg);
666 emit_bytes(st, &ops, sizeof(ops));
667 emit_modregrm(st, mods, sreg, dreg);
668 emit_imm(st, ofs, imsz);
673 * mov <imm64>, (%rax)
677 emit_call(struct bpf_jit_state *st, uintptr_t trg)
679 const uint8_t ops = 0xFF;
680 const uint8_t mods = 2;
682 emit_ld_imm64(st, RAX, trg, trg >> 32);
683 emit_bytes(st, &ops, sizeof(ops));
684 emit_modregrm(st, MOD_DIRECT, mods, RAX);
689 * where 'ofs' is the target offset for the native code.
692 emit_abs_jmp(struct bpf_jit_state *st, int32_t ofs)
697 const uint8_t op8 = 0xEB;
698 const uint8_t op32 = 0xE9;
700 const int32_t sz8 = sizeof(op8) + sizeof(uint8_t);
701 const int32_t sz32 = sizeof(op32) + sizeof(uint32_t);
703 /* max possible jmp instruction size */
704 const int32_t iszm = RTE_MAX(sz8, sz32);
707 imsz = RTE_MAX(imm_size(joff), imm_size(joff + iszm));
710 emit_bytes(st, &op8, sizeof(op8));
713 emit_bytes(st, &op32, sizeof(op32));
717 emit_imm(st, joff, imsz);
722 * where 'ofs' is the target offset for the BPF bytecode.
725 emit_jmp(struct bpf_jit_state *st, int32_t ofs)
727 emit_abs_jmp(st, st->off[st->idx + ofs]);
732 * cmovz %<sreg>, <%dreg>
733 * cmovne %<sreg>, <%dreg>
734 * cmova %<sreg>, <%dreg>
735 * cmovb %<sreg>, <%dreg>
736 * cmovae %<sreg>, <%dreg>
737 * cmovbe %<sreg>, <%dreg>
738 * cmovg %<sreg>, <%dreg>
739 * cmovl %<sreg>, <%dreg>
740 * cmovge %<sreg>, <%dreg>
741 * cmovle %<sreg>, <%dreg>
744 emit_movcc_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
749 static const uint8_t ops[][2] = {
750 [GET_BPF_OP(BPF_JEQ)] = {0x0F, 0x44}, /* CMOVZ */
751 [GET_BPF_OP(EBPF_JNE)] = {0x0F, 0x45}, /* CMOVNE */
752 [GET_BPF_OP(BPF_JGT)] = {0x0F, 0x47}, /* CMOVA */
753 [GET_BPF_OP(EBPF_JLT)] = {0x0F, 0x42}, /* CMOVB */
754 [GET_BPF_OP(BPF_JGE)] = {0x0F, 0x43}, /* CMOVAE */
755 [GET_BPF_OP(EBPF_JLE)] = {0x0F, 0x46}, /* CMOVBE */
756 [GET_BPF_OP(EBPF_JSGT)] = {0x0F, 0x4F}, /* CMOVG */
757 [GET_BPF_OP(EBPF_JSLT)] = {0x0F, 0x4C}, /* CMOVL */
758 [GET_BPF_OP(EBPF_JSGE)] = {0x0F, 0x4D}, /* CMOVGE */
759 [GET_BPF_OP(EBPF_JSLE)] = {0x0F, 0x4E}, /* CMOVLE */
760 [GET_BPF_OP(BPF_JSET)] = {0x0F, 0x45}, /* CMOVNE */
763 bop = GET_BPF_OP(op);
765 emit_rex(st, op, dreg, sreg);
766 emit_bytes(st, ops[bop], sizeof(ops[bop]));
767 emit_modregrm(st, MOD_DIRECT, dreg, sreg);
782 * where 'ofs' is the target offset for the native code.
785 emit_abs_jcc(struct bpf_jit_state *st, uint32_t op, int32_t ofs)
790 static const uint8_t op8[] = {
791 [GET_BPF_OP(BPF_JEQ)] = 0x74, /* JE */
792 [GET_BPF_OP(EBPF_JNE)] = 0x75, /* JNE */
793 [GET_BPF_OP(BPF_JGT)] = 0x77, /* JA */
794 [GET_BPF_OP(EBPF_JLT)] = 0x72, /* JB */
795 [GET_BPF_OP(BPF_JGE)] = 0x73, /* JAE */
796 [GET_BPF_OP(EBPF_JLE)] = 0x76, /* JBE */
797 [GET_BPF_OP(EBPF_JSGT)] = 0x7F, /* JG */
798 [GET_BPF_OP(EBPF_JSLT)] = 0x7C, /* JL */
799 [GET_BPF_OP(EBPF_JSGE)] = 0x7D, /*JGE */
800 [GET_BPF_OP(EBPF_JSLE)] = 0x7E, /* JLE */
801 [GET_BPF_OP(BPF_JSET)] = 0x75, /*JNE */
804 static const uint8_t op32[][2] = {
805 [GET_BPF_OP(BPF_JEQ)] = {0x0F, 0x84}, /* JE */
806 [GET_BPF_OP(EBPF_JNE)] = {0x0F, 0x85}, /* JNE */
807 [GET_BPF_OP(BPF_JGT)] = {0x0F, 0x87}, /* JA */
808 [GET_BPF_OP(EBPF_JLT)] = {0x0F, 0x82}, /* JB */
809 [GET_BPF_OP(BPF_JGE)] = {0x0F, 0x83}, /* JAE */
810 [GET_BPF_OP(EBPF_JLE)] = {0x0F, 0x86}, /* JBE */
811 [GET_BPF_OP(EBPF_JSGT)] = {0x0F, 0x8F}, /* JG */
812 [GET_BPF_OP(EBPF_JSLT)] = {0x0F, 0x8C}, /* JL */
813 [GET_BPF_OP(EBPF_JSGE)] = {0x0F, 0x8D}, /*JGE */
814 [GET_BPF_OP(EBPF_JSLE)] = {0x0F, 0x8E}, /* JLE */
815 [GET_BPF_OP(BPF_JSET)] = {0x0F, 0x85}, /*JNE */
818 const int32_t sz8 = sizeof(op8[0]) + sizeof(uint8_t);
819 const int32_t sz32 = sizeof(op32[0]) + sizeof(uint32_t);
821 /* max possible jcc instruction size */
822 const int32_t iszm = RTE_MAX(sz8, sz32);
825 imsz = RTE_MAX(imm_size(joff), imm_size(joff + iszm));
827 bop = GET_BPF_OP(op);
830 emit_bytes(st, &op8[bop], sizeof(op8[bop]));
833 emit_bytes(st, op32[bop], sizeof(op32[bop]));
837 emit_imm(st, joff, imsz);
852 * where 'ofs' is the target offset for the BPF bytecode.
855 emit_jcc(struct bpf_jit_state *st, uint32_t op, int32_t ofs)
857 emit_abs_jcc(st, op, st->off[st->idx + ofs]);
862 * emit cmp <imm>, %<dreg>
865 emit_cmp_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
870 const uint8_t op8 = 0x83;
871 const uint8_t op32 = 0x81;
872 const uint8_t mods = 7;
874 imsz = imm_size(imm);
875 ops = (imsz == 1) ? op8 : op32;
877 emit_rex(st, op, 0, dreg);
878 emit_bytes(st, &ops, sizeof(ops));
879 emit_modregrm(st, MOD_DIRECT, mods, dreg);
880 emit_imm(st, imm, imsz);
884 * emit test <imm>, %<dreg>
887 emit_tst_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
889 const uint8_t ops = 0xF7;
890 const uint8_t mods = 0;
892 emit_rex(st, op, 0, dreg);
893 emit_bytes(st, &ops, sizeof(ops));
894 emit_modregrm(st, MOD_DIRECT, mods, dreg);
895 emit_imm(st, imm, imm_size(imm));
899 emit_jcc_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg,
900 uint32_t imm, int32_t ofs)
902 if (BPF_OP(op) == BPF_JSET)
903 emit_tst_imm(st, EBPF_ALU64, dreg, imm);
905 emit_cmp_imm(st, EBPF_ALU64, dreg, imm);
907 emit_jcc(st, op, ofs);
911 * emit test %<sreg>, %<dreg>
914 emit_tst_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
917 const uint8_t ops = 0x85;
919 emit_rex(st, op, sreg, dreg);
920 emit_bytes(st, &ops, sizeof(ops));
921 emit_modregrm(st, MOD_DIRECT, sreg, dreg);
925 * emit cmp %<sreg>, %<dreg>
928 emit_cmp_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
931 const uint8_t ops = 0x39;
933 emit_rex(st, op, sreg, dreg);
934 emit_bytes(st, &ops, sizeof(ops));
935 emit_modregrm(st, MOD_DIRECT, sreg, dreg);
940 emit_jcc_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
941 uint32_t dreg, int32_t ofs)
943 if (BPF_OP(op) == BPF_JSET)
944 emit_tst_reg(st, EBPF_ALU64, sreg, dreg);
946 emit_cmp_reg(st, EBPF_ALU64, sreg, dreg);
948 emit_jcc(st, op, ofs);
952 * note that rax:rdx are implicitly used as source/destination registers,
953 * so some reg spillage is necessary.
959 * for divisor as immediate value:
973 emit_div(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
978 const uint8_t ops = 0xF7;
979 const uint8_t mods = 6;
981 if (BPF_SRC(op) == BPF_X) {
983 /* check that src divisor is not zero */
984 emit_tst_reg(st, BPF_CLASS(op), sreg, sreg);
986 /* exit with return value zero */
987 emit_movcc_reg(st, BPF_CLASS(op) | BPF_JEQ | BPF_X, sreg, RAX);
988 emit_abs_jcc(st, BPF_JMP | BPF_JEQ | BPF_K, st->exit.off);
993 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, REG_TMP0);
995 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, REG_TMP1);
998 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, dreg, RAX);
999 emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, RDX, 0);
1001 if (BPF_SRC(op) == BPF_X) {
1009 emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, sr, imm);
1012 emit_rex(st, op, 0, sr);
1013 emit_bytes(st, &ops, sizeof(ops));
1014 emit_modregrm(st, MOD_DIRECT, mods, sr);
1016 if (BPF_OP(op) == BPF_DIV)
1017 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, dreg);
1019 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, dreg);
1022 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP0, RAX);
1024 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP1, RDX);
1028 emit_prolog(struct bpf_jit_state *st, int32_t stack_size)
1034 for (i = 0; i != RTE_DIM(save_regs); i++)
1035 spil += INUSE(st->reguse, save_regs[i]);
1037 /* we can avoid touching the stack at all */
1042 emit_alu_imm(st, EBPF_ALU64 | BPF_SUB | BPF_K, RSP,
1043 spil * sizeof(uint64_t));
1046 for (i = 0; i != RTE_DIM(save_regs); i++) {
1047 if (INUSE(st->reguse, save_regs[i]) != 0) {
1048 emit_st_reg(st, BPF_STX | BPF_MEM | EBPF_DW,
1049 save_regs[i], RSP, ofs);
1050 ofs += sizeof(uint64_t);
1054 if (INUSE(st->reguse, RBP) != 0) {
1055 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RSP, RBP);
1056 emit_alu_imm(st, EBPF_ALU64 | BPF_SUB | BPF_K, RSP, stack_size);
1064 emit_ret(struct bpf_jit_state *st)
1066 const uint8_t ops = 0xC3;
1068 emit_bytes(st, &ops, sizeof(ops));
1072 emit_epilog(struct bpf_jit_state *st)
1077 /* if we allready have an epilog generate a jump to it */
1078 if (st->exit.num++ != 0) {
1079 emit_abs_jmp(st, st->exit.off);
1083 /* store offset of epilog block */
1084 st->exit.off = st->sz;
1087 for (i = 0; i != RTE_DIM(save_regs); i++)
1088 spil += INUSE(st->reguse, save_regs[i]);
1092 if (INUSE(st->reguse, RBP) != 0)
1093 emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X,
1097 for (i = 0; i != RTE_DIM(save_regs); i++) {
1098 if (INUSE(st->reguse, save_regs[i]) != 0) {
1099 emit_ld_reg(st, BPF_LDX | BPF_MEM | EBPF_DW,
1100 RSP, save_regs[i], ofs);
1101 ofs += sizeof(uint64_t);
1105 emit_alu_imm(st, EBPF_ALU64 | BPF_ADD | BPF_K, RSP,
1106 spil * sizeof(uint64_t));
1113 * walk through bpf code and translate them x86_64 one.
1116 emit(struct bpf_jit_state *st, const struct rte_bpf *bpf)
1118 uint32_t i, dr, op, sr;
1119 const struct ebpf_insn *ins;
1121 /* reset state fields */
1125 emit_prolog(st, bpf->stack_sz);
1127 for (i = 0; i != bpf->prm.nb_ins; i++) {
1130 st->off[i] = st->sz;
1132 ins = bpf->prm.ins + i;
1134 dr = ebpf2x86[ins->dst_reg];
1135 sr = ebpf2x86[ins->src_reg];
1139 /* 32 bit ALU IMM operations */
1140 case (BPF_ALU | BPF_ADD | BPF_K):
1141 case (BPF_ALU | BPF_SUB | BPF_K):
1142 case (BPF_ALU | BPF_AND | BPF_K):
1143 case (BPF_ALU | BPF_OR | BPF_K):
1144 case (BPF_ALU | BPF_XOR | BPF_K):
1145 emit_alu_imm(st, op, dr, ins->imm);
1147 case (BPF_ALU | BPF_LSH | BPF_K):
1148 case (BPF_ALU | BPF_RSH | BPF_K):
1149 emit_shift_imm(st, op, dr, ins->imm);
1151 case (BPF_ALU | EBPF_MOV | BPF_K):
1152 emit_mov_imm(st, op, dr, ins->imm);
1154 /* 32 bit ALU REG operations */
1155 case (BPF_ALU | BPF_ADD | BPF_X):
1156 case (BPF_ALU | BPF_SUB | BPF_X):
1157 case (BPF_ALU | BPF_AND | BPF_X):
1158 case (BPF_ALU | BPF_OR | BPF_X):
1159 case (BPF_ALU | BPF_XOR | BPF_X):
1160 emit_alu_reg(st, op, sr, dr);
1162 case (BPF_ALU | BPF_LSH | BPF_X):
1163 case (BPF_ALU | BPF_RSH | BPF_X):
1164 emit_shift_reg(st, op, sr, dr);
1166 case (BPF_ALU | EBPF_MOV | BPF_X):
1167 emit_mov_reg(st, op, sr, dr);
1169 case (BPF_ALU | BPF_NEG):
1170 emit_neg(st, op, dr);
1172 case (BPF_ALU | EBPF_END | EBPF_TO_BE):
1173 emit_be2le(st, dr, ins->imm);
1175 case (BPF_ALU | EBPF_END | EBPF_TO_LE):
1176 emit_le2be(st, dr, ins->imm);
1178 /* 64 bit ALU IMM operations */
1179 case (EBPF_ALU64 | BPF_ADD | BPF_K):
1180 case (EBPF_ALU64 | BPF_SUB | BPF_K):
1181 case (EBPF_ALU64 | BPF_AND | BPF_K):
1182 case (EBPF_ALU64 | BPF_OR | BPF_K):
1183 case (EBPF_ALU64 | BPF_XOR | BPF_K):
1184 emit_alu_imm(st, op, dr, ins->imm);
1186 case (EBPF_ALU64 | BPF_LSH | BPF_K):
1187 case (EBPF_ALU64 | BPF_RSH | BPF_K):
1188 case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
1189 emit_shift_imm(st, op, dr, ins->imm);
1191 case (EBPF_ALU64 | EBPF_MOV | BPF_K):
1192 emit_mov_imm(st, op, dr, ins->imm);
1194 /* 64 bit ALU REG operations */
1195 case (EBPF_ALU64 | BPF_ADD | BPF_X):
1196 case (EBPF_ALU64 | BPF_SUB | BPF_X):
1197 case (EBPF_ALU64 | BPF_AND | BPF_X):
1198 case (EBPF_ALU64 | BPF_OR | BPF_X):
1199 case (EBPF_ALU64 | BPF_XOR | BPF_X):
1200 emit_alu_reg(st, op, sr, dr);
1202 case (EBPF_ALU64 | BPF_LSH | BPF_X):
1203 case (EBPF_ALU64 | BPF_RSH | BPF_X):
1204 case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
1205 emit_shift_reg(st, op, sr, dr);
1207 case (EBPF_ALU64 | EBPF_MOV | BPF_X):
1208 emit_mov_reg(st, op, sr, dr);
1210 case (EBPF_ALU64 | BPF_NEG):
1211 emit_neg(st, op, dr);
1213 /* multiply instructions */
1214 case (BPF_ALU | BPF_MUL | BPF_K):
1215 case (BPF_ALU | BPF_MUL | BPF_X):
1216 case (EBPF_ALU64 | BPF_MUL | BPF_K):
1217 case (EBPF_ALU64 | BPF_MUL | BPF_X):
1218 emit_mul(st, op, sr, dr, ins->imm);
1220 /* divide instructions */
1221 case (BPF_ALU | BPF_DIV | BPF_K):
1222 case (BPF_ALU | BPF_MOD | BPF_K):
1223 case (BPF_ALU | BPF_DIV | BPF_X):
1224 case (BPF_ALU | BPF_MOD | BPF_X):
1225 case (EBPF_ALU64 | BPF_DIV | BPF_K):
1226 case (EBPF_ALU64 | BPF_MOD | BPF_K):
1227 case (EBPF_ALU64 | BPF_DIV | BPF_X):
1228 case (EBPF_ALU64 | BPF_MOD | BPF_X):
1229 emit_div(st, op, sr, dr, ins->imm);
1231 /* load instructions */
1232 case (BPF_LDX | BPF_MEM | BPF_B):
1233 case (BPF_LDX | BPF_MEM | BPF_H):
1234 case (BPF_LDX | BPF_MEM | BPF_W):
1235 case (BPF_LDX | BPF_MEM | EBPF_DW):
1236 emit_ld_reg(st, op, sr, dr, ins->off);
1238 /* load 64 bit immediate value */
1239 case (BPF_LD | BPF_IMM | EBPF_DW):
1240 emit_ld_imm64(st, dr, ins[0].imm, ins[1].imm);
1243 /* store instructions */
1244 case (BPF_STX | BPF_MEM | BPF_B):
1245 case (BPF_STX | BPF_MEM | BPF_H):
1246 case (BPF_STX | BPF_MEM | BPF_W):
1247 case (BPF_STX | BPF_MEM | EBPF_DW):
1248 emit_st_reg(st, op, sr, dr, ins->off);
1250 case (BPF_ST | BPF_MEM | BPF_B):
1251 case (BPF_ST | BPF_MEM | BPF_H):
1252 case (BPF_ST | BPF_MEM | BPF_W):
1253 case (BPF_ST | BPF_MEM | EBPF_DW):
1254 emit_st_imm(st, op, dr, ins->imm, ins->off);
1256 /* atomic add instructions */
1257 case (BPF_STX | EBPF_XADD | BPF_W):
1258 case (BPF_STX | EBPF_XADD | EBPF_DW):
1259 emit_st_xadd(st, op, sr, dr, ins->off);
1261 /* jump instructions */
1262 case (BPF_JMP | BPF_JA):
1263 emit_jmp(st, ins->off + 1);
1265 /* jump IMM instructions */
1266 case (BPF_JMP | BPF_JEQ | BPF_K):
1267 case (BPF_JMP | EBPF_JNE | BPF_K):
1268 case (BPF_JMP | BPF_JGT | BPF_K):
1269 case (BPF_JMP | EBPF_JLT | BPF_K):
1270 case (BPF_JMP | BPF_JGE | BPF_K):
1271 case (BPF_JMP | EBPF_JLE | BPF_K):
1272 case (BPF_JMP | EBPF_JSGT | BPF_K):
1273 case (BPF_JMP | EBPF_JSLT | BPF_K):
1274 case (BPF_JMP | EBPF_JSGE | BPF_K):
1275 case (BPF_JMP | EBPF_JSLE | BPF_K):
1276 case (BPF_JMP | BPF_JSET | BPF_K):
1277 emit_jcc_imm(st, op, dr, ins->imm, ins->off + 1);
1279 /* jump REG instructions */
1280 case (BPF_JMP | BPF_JEQ | BPF_X):
1281 case (BPF_JMP | EBPF_JNE | BPF_X):
1282 case (BPF_JMP | BPF_JGT | BPF_X):
1283 case (BPF_JMP | EBPF_JLT | BPF_X):
1284 case (BPF_JMP | BPF_JGE | BPF_X):
1285 case (BPF_JMP | EBPF_JLE | BPF_X):
1286 case (BPF_JMP | EBPF_JSGT | BPF_X):
1287 case (BPF_JMP | EBPF_JSLT | BPF_X):
1288 case (BPF_JMP | EBPF_JSGE | BPF_X):
1289 case (BPF_JMP | EBPF_JSLE | BPF_X):
1290 case (BPF_JMP | BPF_JSET | BPF_X):
1291 emit_jcc_reg(st, op, sr, dr, ins->off + 1);
1293 /* call instructions */
1294 case (BPF_JMP | EBPF_CALL):
1296 (uintptr_t)bpf->prm.xsym[ins->imm].func.val);
1298 /* return instruction */
1299 case (BPF_JMP | EBPF_EXIT):
1304 "%s(%p): invalid opcode %#x at pc: %u;\n",
1305 __func__, bpf, ins->code, i);
1314 * produce a native ISA version of the given BPF code.
1317 bpf_jit_x86(struct rte_bpf *bpf)
1322 struct bpf_jit_state st;
1325 memset(&st, 0, sizeof(st));
1326 st.off = malloc(bpf->prm.nb_ins * sizeof(st.off[0]));
1330 /* fill with fake offsets */
1331 st.exit.off = INT32_MAX;
1332 for (i = 0; i != bpf->prm.nb_ins; i++)
1333 st.off[i] = INT32_MAX;
1336 * dry runs, used to calculate total code size and valid jump offsets.
1337 * stop when we get minimal possible size
1341 rc = emit(&st, bpf);
1342 } while (rc == 0 && sz != st.sz);
1346 /* allocate memory needed */
1347 st.ins = mmap(NULL, st.sz, PROT_READ | PROT_WRITE,
1348 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1349 if (st.ins == MAP_FAILED)
1353 rc = emit(&st, bpf);
1356 if (rc == 0 && mprotect(st.ins, st.sz, PROT_READ | PROT_EXEC) != 0)
1360 munmap(st.ins, st.sz);
1362 bpf->jit.func = (void *)st.ins;
1363 bpf->jit.sz = st.sz;