X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_bpf%2Fbpf_jit_arm64.c;h=a5a5d46f0622a099c35404c719a538b4de813527;hb=be848992298b7a64a161487600a4070e52b9d26d;hp=5d2ce378cc9e03607f0ccefe6cadeedf66549f91;hpb=111e2a747a4fde20bfcb093936efbb09dae8d4ec;p=dpdk.git diff --git a/lib/librte_bpf/bpf_jit_arm64.c b/lib/librte_bpf/bpf_jit_arm64.c index 5d2ce378cc..a5a5d46f06 100644 --- a/lib/librte_bpf/bpf_jit_arm64.c +++ b/lib/librte_bpf/bpf_jit_arm64.c @@ -6,6 +6,7 @@ #include #include +#include #include "bpf_impl.h" @@ -43,6 +44,17 @@ struct a64_jit_ctx { uint8_t foundcall; /* Found EBPF_CALL class code in eBPF pgm */ }; +static int +check_immr_imms(bool is64, uint8_t immr, uint8_t imms) +{ + const unsigned int width = is64 ? 64 : 32; + + if (immr >= width || imms >= width) + return 1; + + return 0; +} + static int check_mov_hw(bool is64, const uint8_t val) { @@ -54,6 +66,15 @@ check_mov_hw(bool is64, const uint8_t val) return 0; } +static int +check_ls_sz(uint8_t sz) +{ + if (sz == BPF_B || sz == BPF_H || sz == BPF_W || sz == EBPF_DW) + return 0; + + return 1; +} + static int check_reg(uint8_t r) { @@ -84,6 +105,112 @@ check_invalid_args(struct a64_jit_ctx *ctx, uint32_t limit) return 0; } +static int +jump_offset_init(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) +{ + uint32_t i; + + ctx->map = malloc(bpf->prm.nb_ins * sizeof(ctx->map[0])); + if (ctx->map == NULL) + return -ENOMEM; + + /* Fill with fake offsets */ + for (i = 0; i != bpf->prm.nb_ins; i++) { + ctx->map[i].off = INT32_MAX; + ctx->map[i].off_to_b = 0; + } + return 0; +} + +static void +jump_offset_fini(struct a64_jit_ctx *ctx) +{ + free(ctx->map); +} + +static void +jump_offset_update(struct a64_jit_ctx *ctx, uint32_t ebpf_idx) +{ + if (is_first_pass(ctx)) + ctx->map[ebpf_idx].off = ctx->idx; +} + +static void +jump_offset_to_branch_update(struct a64_jit_ctx *ctx, uint32_t ebpf_idx) +{ + if (is_first_pass(ctx)) + ctx->map[ebpf_idx].off_to_b = ctx->idx - ctx->map[ebpf_idx].off; + +} + +static int32_t +jump_offset_get(struct a64_jit_ctx *ctx, uint32_t from, int16_t offset) +{ + int32_t a64_from, a64_to; + + a64_from = ctx->map[from].off + ctx->map[from].off_to_b; + a64_to = ctx->map[from + offset + 1].off; + + if (a64_to == INT32_MAX) + return a64_to; + + return a64_to - a64_from; +} + +enum a64_cond_e { + A64_EQ = 0x0, /* == */ + A64_NE = 0x1, /* != */ + A64_CS = 0x2, /* Unsigned >= */ + A64_CC = 0x3, /* Unsigned < */ + A64_MI = 0x4, /* < 0 */ + A64_PL = 0x5, /* >= 0 */ + A64_VS = 0x6, /* Overflow */ + A64_VC = 0x7, /* No overflow */ + A64_HI = 0x8, /* Unsigned > */ + A64_LS = 0x9, /* Unsigned <= */ + A64_GE = 0xa, /* Signed >= */ + A64_LT = 0xb, /* Signed < */ + A64_GT = 0xc, /* Signed > */ + A64_LE = 0xd, /* Signed <= */ + A64_AL = 0xe, /* Always */ +}; + +static int +check_cond(uint8_t cond) +{ + return (cond >= A64_AL) ? 1 : 0; +} + +static uint8_t +ebpf_to_a64_cond(uint8_t op) +{ + switch (BPF_OP(op)) { + case BPF_JEQ: + return A64_EQ; + case BPF_JGT: + return A64_HI; + case EBPF_JLT: + return A64_CC; + case BPF_JGE: + return A64_CS; + case EBPF_JLE: + return A64_LS; + case BPF_JSET: + case EBPF_JNE: + return A64_NE; + case EBPF_JSGT: + return A64_GT; + case EBPF_JSLT: + return A64_LT; + case EBPF_JSGE: + return A64_GE; + case EBPF_JSLE: + return A64_LE; + default: + return UINT8_MAX; + } +} + /* Emit an instruction */ static inline void emit_insn(struct a64_jit_ctx *ctx, uint32_t insn, int error) @@ -259,6 +386,47 @@ emit_mov_imm(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint64_t val) } } +static void +emit_ls(struct a64_jit_ctx *ctx, uint8_t sz, uint8_t rt, uint8_t rn, uint8_t rm, + bool load) +{ + uint32_t insn; + + insn = 0x1c1 << 21; + if (load) + insn |= 1 << 22; + if (sz == BPF_B) + insn |= 0 << 30; + else if (sz == BPF_H) + insn |= 1 << 30; + else if (sz == BPF_W) + insn |= 2 << 30; + else if (sz == EBPF_DW) + insn |= 3 << 30; + + insn |= rm << 16; + insn |= 0x1a << 10; /* LSL and S = 0 */ + insn |= rn << 5; + insn |= rt; + + emit_insn(ctx, insn, check_reg(rt) || check_reg(rn) || check_reg(rm) || + check_ls_sz(sz)); +} + +static void +emit_str(struct a64_jit_ctx *ctx, uint8_t sz, uint8_t rt, uint8_t rn, + uint8_t rm) +{ + emit_ls(ctx, sz, rt, rn, rm, 0); +} + +static void +emit_ldr(struct a64_jit_ctx *ctx, uint8_t sz, uint8_t rt, uint8_t rn, + uint8_t rm) +{ + emit_ls(ctx, sz, rt, rn, rm, 1); +} + #define A64_ADD 0x58 #define A64_SUB 0x258 static void @@ -288,6 +456,12 @@ emit_sub(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) emit_add_sub(ctx, is64, rd, rd, rm, A64_SUB); } +static void +emit_neg(struct a64_jit_ctx *ctx, bool is64, uint8_t rd) +{ + emit_add_sub(ctx, is64, rd, A64_ZR, rd, A64_SUB); +} + static void emit_mul(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) { @@ -304,6 +478,9 @@ emit_mul(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) } #define A64_UDIV 0x2 +#define A64_LSLV 0x8 +#define A64_LSRV 0x9 +#define A64_ASRV 0xA static void emit_data_process_two_src(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn, uint8_t rm, uint16_t op) @@ -327,6 +504,107 @@ emit_div(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_UDIV); } +static void +emit_lslv(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) +{ + emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_LSLV); +} + +static void +emit_lsrv(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) +{ + emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_LSRV); +} + +static void +emit_asrv(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) +{ + emit_data_process_two_src(ctx, is64, rd, rd, rm, A64_ASRV); +} + +#define A64_UBFM 0x2 +#define A64_SBFM 0x0 +static void +emit_bitfield(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn, + uint8_t immr, uint8_t imms, uint16_t op) + +{ + uint32_t insn; + + insn = (!!is64) << 31; + if (insn) + insn |= 1 << 22; /* Set N bit when is64 is set */ + insn |= op << 29; + insn |= 0x26 << 23; + insn |= immr << 16; + insn |= imms << 10; + insn |= rn << 5; + insn |= rd; + + emit_insn(ctx, insn, check_reg(rd) || check_reg(rn) || + check_immr_imms(is64, immr, imms)); +} +static void +emit_lsl(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t imm) +{ + const unsigned int width = is64 ? 64 : 32; + uint8_t imms, immr; + + immr = (width - imm) & (width - 1); + imms = width - 1 - imm; + + emit_bitfield(ctx, is64, rd, rd, immr, imms, A64_UBFM); +} + +static void +emit_lsr(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t imm) +{ + emit_bitfield(ctx, is64, rd, rd, imm, is64 ? 63 : 31, A64_UBFM); +} + +static void +emit_asr(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t imm) +{ + emit_bitfield(ctx, is64, rd, rd, imm, is64 ? 63 : 31, A64_SBFM); +} + +#define A64_AND 0 +#define A64_OR 1 +#define A64_XOR 2 +static void +emit_logical(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, + uint8_t rm, uint16_t op) +{ + uint32_t insn; + + insn = (!!is64) << 31; + insn |= op << 29; + insn |= 0x50 << 21; + insn |= rm << 16; + insn |= rd << 5; + insn |= rd; + + emit_insn(ctx, insn, check_reg(rd) || check_reg(rm)); +} + +static void +emit_or(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) +{ + emit_logical(ctx, is64, rd, rm, A64_OR); +} + +static void +emit_and(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) +{ + emit_logical(ctx, is64, rd, rm, A64_AND); +} + +static void +emit_xor(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm) +{ + emit_logical(ctx, is64, rd, rm, A64_XOR); +} + static void emit_msub(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn, uint8_t rm, uint8_t ra) @@ -353,6 +631,95 @@ emit_mod(struct a64_jit_ctx *ctx, bool is64, uint8_t tmp, uint8_t rd, emit_msub(ctx, is64, rd, tmp, rm, rd); } +static void +emit_blr(struct a64_jit_ctx *ctx, uint8_t rn) +{ + uint32_t insn; + + insn = 0xd63f0000; + insn |= rn << 5; + + emit_insn(ctx, insn, check_reg(rn)); +} + +static void +emit_zero_extend(struct a64_jit_ctx *ctx, uint8_t rd, int32_t imm) +{ + switch (imm) { + case 16: + /* Zero-extend 16 bits into 64 bits */ + emit_bitfield(ctx, 1, rd, rd, 0, 15, A64_UBFM); + break; + case 32: + /* Zero-extend 32 bits into 64 bits */ + emit_bitfield(ctx, 1, rd, rd, 0, 31, A64_UBFM); + break; + case 64: + break; + default: + /* Generate error */ + emit_insn(ctx, 0, 1); + } +} + +static void +emit_rev(struct a64_jit_ctx *ctx, uint8_t rd, int32_t imm) +{ + uint32_t insn; + + insn = 0xdac00000; + insn |= rd << 5; + insn |= rd; + + switch (imm) { + case 16: + insn |= 1 << 10; + emit_insn(ctx, insn, check_reg(rd)); + emit_zero_extend(ctx, rd, 16); + break; + case 32: + insn |= 2 << 10; + emit_insn(ctx, insn, check_reg(rd)); + /* Upper 32 bits already cleared */ + break; + case 64: + insn |= 3 << 10; + emit_insn(ctx, insn, check_reg(rd)); + break; + default: + /* Generate error */ + emit_insn(ctx, insn, 1); + } +} + +static int +is_be(void) +{ +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + return 1; +#else + return 0; +#endif +} + +static void +emit_be(struct a64_jit_ctx *ctx, uint8_t rd, int32_t imm) +{ + if (is_be()) + emit_zero_extend(ctx, rd, imm); + else + emit_rev(ctx, rd, imm); +} + +static void +emit_le(struct a64_jit_ctx *ctx, uint8_t rd, int32_t imm) +{ + if (is_be()) + emit_rev(ctx, rd, imm); + else + emit_zero_extend(ctx, rd, imm); +} + static uint8_t ebpf_to_a64_reg(struct a64_jit_ctx *ctx, uint8_t reg) { @@ -549,6 +916,16 @@ emit_epilogue(struct a64_jit_ctx *ctx) emit_epilogue_no_call(ctx); } +static void +emit_call(struct a64_jit_ctx *ctx, uint8_t tmp, void *func) +{ + uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0); + + emit_mov_imm(ctx, 1, tmp, (uint64_t)func); + emit_blr(ctx, tmp); + emit_mov_64(ctx, r0, A64_R(0)); +} + static void emit_cbnz(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, int32_t imm19) { @@ -587,6 +964,131 @@ emit_return_zero_if_src_zero(struct a64_jit_ctx *ctx, bool is64, uint8_t src) emit_b(ctx, jump_to_epilogue); } +static void +emit_stadd(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rn) +{ + uint32_t insn; + + insn = 0xb820001f; + insn |= (!!is64) << 30; + insn |= rs << 16; + insn |= rn << 5; + + emit_insn(ctx, insn, check_reg(rs) || check_reg(rn)); +} + +static void +emit_ldxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, uint8_t rn) +{ + uint32_t insn; + + insn = 0x885f7c00; + insn |= (!!is64) << 30; + insn |= rn << 5; + insn |= rt; + + emit_insn(ctx, insn, check_reg(rt) || check_reg(rn)); +} + +static void +emit_stxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rt, + uint8_t rn) +{ + uint32_t insn; + + insn = 0x88007c00; + insn |= (!!is64) << 30; + insn |= rs << 16; + insn |= rn << 5; + insn |= rt; + + emit_insn(ctx, insn, check_reg(rs) || check_reg(rt) || check_reg(rn)); +} + +static int +has_atomics(void) +{ + int rc = 0; + +#if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS) + rc = 1; +#endif + return rc; +} + +static void +emit_xadd(struct a64_jit_ctx *ctx, uint8_t op, uint8_t tmp1, uint8_t tmp2, + uint8_t tmp3, uint8_t dst, int16_t off, uint8_t src) +{ + bool is64 = (BPF_SIZE(op) == EBPF_DW); + uint8_t rn; + + if (off) { + emit_mov_imm(ctx, 1, tmp1, off); + emit_add(ctx, 1, tmp1, dst); + rn = tmp1; + } else { + rn = dst; + } + + if (has_atomics()) { + emit_stadd(ctx, is64, src, rn); + } else { + emit_ldxr(ctx, is64, tmp2, rn); + emit_add(ctx, is64, tmp2, src); + emit_stxr(ctx, is64, tmp3, tmp2, rn); + emit_cbnz(ctx, is64, tmp3, -3); + } +} + +#define A64_CMP 0x6b00000f +#define A64_TST 0x6a00000f +static void +emit_cmp_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm, + uint32_t opc) +{ + uint32_t insn; + + insn = opc; + insn |= (!!is64) << 31; + insn |= rm << 16; + insn |= rn << 5; + + emit_insn(ctx, insn, check_reg(rn) || check_reg(rm)); +} + +static void +emit_cmp(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm) +{ + emit_cmp_tst(ctx, is64, rn, rm, A64_CMP); +} + +static void +emit_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm) +{ + emit_cmp_tst(ctx, is64, rn, rm, A64_TST); +} + +static void +emit_b_cond(struct a64_jit_ctx *ctx, uint8_t cond, int32_t imm19) +{ + uint32_t insn, imm; + + imm = mask_imm(19, imm19); + insn = 0x15 << 26; + insn |= imm << 5; + insn |= cond; + + emit_insn(ctx, insn, check_cond(cond) || check_imm(19, imm19)); +} + +static void +emit_branch(struct a64_jit_ctx *ctx, uint8_t op, uint32_t i, int16_t off) +{ + jump_offset_to_branch_update(ctx, i); + emit_b_cond(ctx, ebpf_to_a64_cond(op), jump_offset_get(ctx, i, off)); +} + static void check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) { @@ -613,8 +1115,10 @@ check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) static int emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) { - uint8_t op, dst, src, tmp1, tmp2; + uint8_t op, dst, src, tmp1, tmp2, tmp3; const struct ebpf_insn *ins; + uint64_t u64; + int16_t off; int32_t imm; uint32_t i; bool is64; @@ -626,13 +1130,16 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) ctx->stack_sz = RTE_ALIGN_MUL_CEIL(bpf->stack_sz, 16); tmp1 = ebpf_to_a64_reg(ctx, TMP_REG_1); tmp2 = ebpf_to_a64_reg(ctx, TMP_REG_2); + tmp3 = ebpf_to_a64_reg(ctx, TMP_REG_3); emit_prologue(ctx); for (i = 0; i != bpf->prm.nb_ins; i++) { + jump_offset_update(ctx, i); ins = bpf->prm.ins + i; op = ins->code; + off = ins->off; imm = ins->imm; dst = ebpf_to_a64_reg(ctx, ins->dst_reg); @@ -707,6 +1214,164 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) emit_mov_imm(ctx, is64, tmp1, imm); emit_mod(ctx, is64, tmp2, dst, tmp1); break; + /* dst |= src */ + case (BPF_ALU | BPF_OR | BPF_X): + case (EBPF_ALU64 | BPF_OR | BPF_X): + emit_or(ctx, is64, dst, src); + break; + /* dst |= imm */ + case (BPF_ALU | BPF_OR | BPF_K): + case (EBPF_ALU64 | BPF_OR | BPF_K): + emit_mov_imm(ctx, is64, tmp1, imm); + emit_or(ctx, is64, dst, tmp1); + break; + /* dst &= src */ + case (BPF_ALU | BPF_AND | BPF_X): + case (EBPF_ALU64 | BPF_AND | BPF_X): + emit_and(ctx, is64, dst, src); + break; + /* dst &= imm */ + case (BPF_ALU | BPF_AND | BPF_K): + case (EBPF_ALU64 | BPF_AND | BPF_K): + emit_mov_imm(ctx, is64, tmp1, imm); + emit_and(ctx, is64, dst, tmp1); + break; + /* dst ^= src */ + case (BPF_ALU | BPF_XOR | BPF_X): + case (EBPF_ALU64 | BPF_XOR | BPF_X): + emit_xor(ctx, is64, dst, src); + break; + /* dst ^= imm */ + case (BPF_ALU | BPF_XOR | BPF_K): + case (EBPF_ALU64 | BPF_XOR | BPF_K): + emit_mov_imm(ctx, is64, tmp1, imm); + emit_xor(ctx, is64, dst, tmp1); + break; + /* dst = -dst */ + case (BPF_ALU | BPF_NEG): + case (EBPF_ALU64 | BPF_NEG): + emit_neg(ctx, is64, dst); + break; + /* dst <<= src */ + case BPF_ALU | BPF_LSH | BPF_X: + case EBPF_ALU64 | BPF_LSH | BPF_X: + emit_lslv(ctx, is64, dst, src); + break; + /* dst <<= imm */ + case BPF_ALU | BPF_LSH | BPF_K: + case EBPF_ALU64 | BPF_LSH | BPF_K: + emit_lsl(ctx, is64, dst, imm); + break; + /* dst >>= src */ + case BPF_ALU | BPF_RSH | BPF_X: + case EBPF_ALU64 | BPF_RSH | BPF_X: + emit_lsrv(ctx, is64, dst, src); + break; + /* dst >>= imm */ + case BPF_ALU | BPF_RSH | BPF_K: + case EBPF_ALU64 | BPF_RSH | BPF_K: + emit_lsr(ctx, is64, dst, imm); + break; + /* dst >>= src (arithmetic) */ + case BPF_ALU | EBPF_ARSH | BPF_X: + case EBPF_ALU64 | EBPF_ARSH | BPF_X: + emit_asrv(ctx, is64, dst, src); + break; + /* dst >>= imm (arithmetic) */ + case BPF_ALU | EBPF_ARSH | BPF_K: + case EBPF_ALU64 | EBPF_ARSH | BPF_K: + emit_asr(ctx, is64, dst, imm); + break; + /* dst = be##imm(dst) */ + case (BPF_ALU | EBPF_END | EBPF_TO_BE): + emit_be(ctx, dst, imm); + break; + /* dst = le##imm(dst) */ + case (BPF_ALU | EBPF_END | EBPF_TO_LE): + emit_le(ctx, dst, imm); + break; + /* dst = *(size *) (src + off) */ + case (BPF_LDX | BPF_MEM | BPF_B): + case (BPF_LDX | BPF_MEM | BPF_H): + case (BPF_LDX | BPF_MEM | BPF_W): + case (BPF_LDX | BPF_MEM | EBPF_DW): + emit_mov_imm(ctx, 1, tmp1, off); + emit_ldr(ctx, BPF_SIZE(op), dst, src, tmp1); + break; + /* dst = imm64 */ + case (BPF_LD | BPF_IMM | EBPF_DW): + u64 = ((uint64_t)ins[1].imm << 32) | (uint32_t)imm; + emit_mov_imm(ctx, 1, dst, u64); + i++; + break; + /* *(size *)(dst + off) = src */ + case (BPF_STX | BPF_MEM | BPF_B): + case (BPF_STX | BPF_MEM | BPF_H): + case (BPF_STX | BPF_MEM | BPF_W): + case (BPF_STX | BPF_MEM | EBPF_DW): + emit_mov_imm(ctx, 1, tmp1, off); + emit_str(ctx, BPF_SIZE(op), src, dst, tmp1); + break; + /* *(size *)(dst + off) = imm */ + case (BPF_ST | BPF_MEM | BPF_B): + case (BPF_ST | BPF_MEM | BPF_H): + case (BPF_ST | BPF_MEM | BPF_W): + case (BPF_ST | BPF_MEM | EBPF_DW): + emit_mov_imm(ctx, 1, tmp1, imm); + emit_mov_imm(ctx, 1, tmp2, off); + emit_str(ctx, BPF_SIZE(op), tmp1, dst, tmp2); + break; + /* STX XADD: lock *(size *)(dst + off) += src */ + case (BPF_STX | EBPF_XADD | BPF_W): + case (BPF_STX | EBPF_XADD | EBPF_DW): + emit_xadd(ctx, op, tmp1, tmp2, tmp3, dst, off, src); + break; + /* PC += off */ + case (BPF_JMP | BPF_JA): + emit_b(ctx, jump_offset_get(ctx, i, off)); + break; + /* PC += off if dst COND imm */ + case (BPF_JMP | BPF_JEQ | BPF_K): + case (BPF_JMP | EBPF_JNE | BPF_K): + case (BPF_JMP | BPF_JGT | BPF_K): + case (BPF_JMP | EBPF_JLT | BPF_K): + case (BPF_JMP | BPF_JGE | BPF_K): + case (BPF_JMP | EBPF_JLE | BPF_K): + case (BPF_JMP | EBPF_JSGT | BPF_K): + case (BPF_JMP | EBPF_JSLT | BPF_K): + case (BPF_JMP | EBPF_JSGE | BPF_K): + case (BPF_JMP | EBPF_JSLE | BPF_K): + emit_mov_imm(ctx, 1, tmp1, imm); + emit_cmp(ctx, 1, dst, tmp1); + emit_branch(ctx, op, i, off); + break; + case (BPF_JMP | BPF_JSET | BPF_K): + emit_mov_imm(ctx, 1, tmp1, imm); + emit_tst(ctx, 1, dst, tmp1); + emit_branch(ctx, op, i, off); + break; + /* PC += off if dst COND src */ + case (BPF_JMP | BPF_JEQ | BPF_X): + case (BPF_JMP | EBPF_JNE | BPF_X): + case (BPF_JMP | BPF_JGT | BPF_X): + case (BPF_JMP | EBPF_JLT | BPF_X): + case (BPF_JMP | BPF_JGE | BPF_X): + case (BPF_JMP | EBPF_JLE | BPF_X): + case (BPF_JMP | EBPF_JSGT | BPF_X): + case (BPF_JMP | EBPF_JSLT | BPF_X): + case (BPF_JMP | EBPF_JSGE | BPF_X): + case (BPF_JMP | EBPF_JSLE | BPF_X): + emit_cmp(ctx, 1, dst, src); + emit_branch(ctx, op, i, off); + break; + case (BPF_JMP | BPF_JSET | BPF_X): + emit_tst(ctx, 1, dst, src); + emit_branch(ctx, op, i, off); + break; + /* Call imm */ + case (BPF_JMP | EBPF_CALL): + emit_call(ctx, tmp1, bpf->prm.xsym[ins->imm].func.val); + break; /* Return r0 */ case (BPF_JMP | EBPF_EXIT): emit_epilogue(ctx); @@ -736,6 +1401,11 @@ bpf_jit_arm64(struct rte_bpf *bpf) /* Init JIT context */ memset(&ctx, 0, sizeof(ctx)); + /* Initialize the memory for eBPF to a64 insn offset map for jump */ + rc = jump_offset_init(&ctx, bpf); + if (rc) + goto error; + /* Find eBPF program has call class or not */ check_program_has_call(&ctx, bpf); @@ -765,7 +1435,7 @@ bpf_jit_arm64(struct rte_bpf *bpf) } /* Flush the icache */ - __builtin___clear_cache(ctx.ins, ctx.ins + ctx.idx); + __builtin___clear_cache((char *)ctx.ins, (char *)(ctx.ins + ctx.idx)); bpf->jit.func = (void *)ctx.ins; bpf->jit.sz = size; @@ -775,5 +1445,7 @@ bpf_jit_arm64(struct rte_bpf *bpf) munmap: munmap(ctx.ins, size); finish: + jump_offset_fini(&ctx); +error: return rc; }