X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pipeline%2Frte_swx_pipeline.c;h=eaaed7a0a92520abd482f71511df327d509edbfa;hb=3a1aca384d3ac628dec8830714826e323f509dec;hp=aaf2aafa5ddf065cef5355349f312c7677cddcc4;hpb=c6b752cdf21525659499a3bae5c8f0f391fa2185;p=dpdk.git diff --git a/lib/librte_pipeline/rte_swx_pipeline.c b/lib/librte_pipeline/rte_swx_pipeline.c index aaf2aafa5d..eaaed7a0a9 100644 --- a/lib/librte_pipeline/rte_swx_pipeline.c +++ b/lib/librte_pipeline/rte_swx_pipeline.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -22,7 +23,17 @@ do { \ } while (0) #define CHECK_NAME(name, err_code) \ - CHECK((name) && (name)[0], err_code) + CHECK((name) && \ + (name)[0] && \ + (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \ + err_code) + +#define CHECK_INSTRUCTION(instr, err_code) \ + CHECK((instr) && \ + (instr)[0] && \ + (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \ + RTE_SWX_INSTRUCTION_SIZE), \ + err_code) #ifndef TRACE_LEVEL #define TRACE_LEVEL 0 @@ -358,6 +369,84 @@ enum instruction_type { /* extern f.func */ INSTR_EXTERN_FUNC, + + /* jmp LABEL + * Unconditional jump + */ + INSTR_JMP, + + /* jmpv LABEL h.header + * Jump if header is valid + */ + INSTR_JMP_VALID, + + /* jmpnv LABEL h.header + * Jump if header is invalid + */ + INSTR_JMP_INVALID, + + /* jmph LABEL + * Jump if table lookup hit + */ + INSTR_JMP_HIT, + + /* jmpnh LABEL + * Jump if table lookup miss + */ + INSTR_JMP_MISS, + + /* jmpa LABEL ACTION + * Jump if action run + */ + INSTR_JMP_ACTION_HIT, + + /* jmpna LABEL ACTION + * Jump if action not run + */ + INSTR_JMP_ACTION_MISS, + + /* jmpeq LABEL a b + * Jump is a is equal to b + * a = HMEFT, b = HMEFTI + */ + INSTR_JMP_EQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */ + INSTR_JMP_EQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */ + INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */ + + /* jmpneq LABEL a b + * Jump is a is not equal to b + * a = HMEFT, b = HMEFTI + */ + INSTR_JMP_NEQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */ + INSTR_JMP_NEQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */ + INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */ + + /* jmplt LABEL a b + * Jump if a is less than b + * a = HMEFT, b = HMEFTI + */ + INSTR_JMP_LT, /* a = MEF, b = MEF */ + INSTR_JMP_LT_MH, /* a = MEF, b = H */ + INSTR_JMP_LT_HM, /* a = H, b = MEF */ + INSTR_JMP_LT_HH, /* a = H, b = H */ + INSTR_JMP_LT_MI, /* a = MEF, b = I */ + INSTR_JMP_LT_HI, /* a = H, b = I */ + + /* jmpgt LABEL a b + * Jump if a is greater than b + * a = HMEFT, b = HMEFTI + */ + INSTR_JMP_GT, /* a = MEF, b = MEF */ + INSTR_JMP_GT_MH, /* a = MEF, b = H */ + INSTR_JMP_GT_HM, /* a = H, b = MEF */ + INSTR_JMP_GT_HH, /* a = H, b = H */ + INSTR_JMP_GT_MI, /* a = MEF, b = I */ + INSTR_JMP_GT_HI, /* a = H, b = I */ + + /* return + * Return from action + */ + INSTR_RETURN, }; struct instr_operand { @@ -402,7 +491,7 @@ struct instr_dst_src { struct instr_operand dst; union { struct instr_operand src; - uint32_t src_val; + uint64_t src_val; }; }; @@ -419,6 +508,21 @@ struct instr_dma { uint16_t n_bytes[8]; }; +struct instr_jmp { + struct instruction *ip; + + union { + struct instr_operand a; + uint8_t header_id; + uint8_t action_id; + }; + + union { + struct instr_operand b; + uint64_t b_val; + }; +}; + struct instruction { enum instruction_type type; union { @@ -430,6 +534,7 @@ struct instruction { struct instr_table table; struct instr_extern_obj ext_obj; struct instr_extern_func ext_func; + struct instr_jmp jmp; }; }; @@ -544,6 +649,9 @@ struct thread { #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos))) #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos))) +#define HEADER_VALID(thread, header_id) \ + MASK64_BIT_GET((thread)->valid_headers, header_id) + #define ALU(thread, ip, operator) \ { \ uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ @@ -725,6 +833,118 @@ struct thread { *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ } +#define JMP_CMP(thread, ip, operator) \ +{ \ + uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ + uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ + uint64_t a64 = *a64_ptr; \ + uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ + uint64_t a = a64 & a64_mask; \ + \ + uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ + uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ + uint64_t b64 = *b64_ptr; \ + uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ + uint64_t b = b64 & b64_mask; \ + \ + (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ +} + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + +#define JMP_CMP_S(thread, ip, operator) \ +{ \ + uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ + uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ + uint64_t a64 = *a64_ptr; \ + uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ + uint64_t a = a64 & a64_mask; \ + \ + uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ + uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ + uint64_t b64 = *b64_ptr; \ + uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ + \ + (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ +} + +#define JMP_CMP_MH JMP_CMP_S + +#define JMP_CMP_HM(thread, ip, operator) \ +{ \ + uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ + uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ + uint64_t a64 = *a64_ptr; \ + uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ + \ + uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ + uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ + uint64_t b64 = *b64_ptr; \ + uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ + uint64_t b = b64 & b64_mask; \ + \ + (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ +} + +#define JMP_CMP_HH(thread, ip, operator) \ +{ \ + uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ + uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ + uint64_t a64 = *a64_ptr; \ + uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ + \ + uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ + uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ + uint64_t b64 = *b64_ptr; \ + uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ + \ + (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ +} + +#else + +#define JMP_CMP_S JMP_CMP +#define JMP_CMP_MH JMP_CMP +#define JMP_CMP_HM JMP_CMP +#define JMP_CMP_HH JMP_CMP + +#endif + +#define JMP_CMP_I(thread, ip, operator) \ +{ \ + uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ + uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ + uint64_t a64 = *a64_ptr; \ + uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ + uint64_t a = a64 & a64_mask; \ + \ + uint64_t b = (ip)->jmp.b_val; \ + \ + (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ +} + +#define JMP_CMP_MI JMP_CMP_I + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + +#define JMP_CMP_HI(thread, ip, operator) \ +{ \ + uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ + uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ + uint64_t a64 = *a64_ptr; \ + uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ + \ + uint64_t b = (ip)->jmp.b_val; \ + \ + (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ +} + +#else + +#define JMP_CMP_HI JMP_CMP_I + +#endif + #define METADATA_READ(thread, offset, n_bits) \ ({ \ uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ @@ -1426,12 +1646,12 @@ rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p, CHECK(p, EINVAL); - CHECK(extern_type_name, EINVAL); + CHECK_NAME(extern_type_name, EINVAL); type = extern_type_find(p, extern_type_name); CHECK(type, EINVAL); CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC); - CHECK(name, EINVAL); + CHECK_NAME(name, EINVAL); CHECK(!extern_type_member_func_find(type, name), EEXIST); CHECK(member_func, EINVAL); @@ -2048,6 +2268,42 @@ metadata_free(struct rte_swx_pipeline *p) /* * Instruction. */ +static int +instruction_is_jmp(struct instruction *instr) +{ + switch (instr->type) { + case INSTR_JMP: + case INSTR_JMP_VALID: + case INSTR_JMP_INVALID: + case INSTR_JMP_HIT: + case INSTR_JMP_MISS: + case INSTR_JMP_ACTION_HIT: + case INSTR_JMP_ACTION_MISS: + case INSTR_JMP_EQ: + case INSTR_JMP_EQ_S: + case INSTR_JMP_EQ_I: + case INSTR_JMP_NEQ: + case INSTR_JMP_NEQ_S: + case INSTR_JMP_NEQ_I: + case INSTR_JMP_LT: + case INSTR_JMP_LT_MH: + case INSTR_JMP_LT_HM: + case INSTR_JMP_LT_HH: + case INSTR_JMP_LT_MI: + case INSTR_JMP_LT_HI: + case INSTR_JMP_GT: + case INSTR_JMP_GT_MH: + case INSTR_JMP_GT_HM: + case INSTR_JMP_GT_HH: + case INSTR_JMP_GT_MI: + case INSTR_JMP_GT_HI: + return 1; + + default: + return 0; + } +} + static struct field * action_field_parse(struct action *action, const char *name); @@ -2136,6 +2392,12 @@ thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t) t->ip = p->instructions; } +static inline void +thread_ip_set(struct thread *t, struct instruction *ip) +{ + t->ip = ip; +} + static inline void thread_ip_action_call(struct rte_swx_pipeline *p, struct thread *t, @@ -2938,7 +3200,8 @@ instr_mov_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -2963,17 +3226,17 @@ instr_mov_translate(struct rte_swx_pipeline *p, } /* MOV_I. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); if (dst[0] == 'h') - src_val = htonl(src_val); + src_val = hton64(src_val) >> (64 - fdst->n_bits); instr->type = INSTR_MOV_I; instr->mov.dst.struct_id = (uint8_t)dst_struct_id; instr->mov.dst.n_bits = fdst->n_bits; instr->mov.dst.offset = fdst->offset / 8; - instr->mov.src_val = (uint32_t)src_val; + instr->mov.src_val = src_val; return 0; } @@ -3013,7 +3276,7 @@ instr_mov_i_exec(struct rte_swx_pipeline *p) struct thread *t = &p->threads[p->thread_id]; struct instruction *ip = t->ip; - TRACE("[Thread %2u] mov m.f %x\n", + TRACE("[Thread %2u] mov m.f %" PRIx64 "\n", p->thread_id, ip->mov.src_val); @@ -3200,7 +3463,8 @@ instr_alu_add_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3228,7 +3492,7 @@ instr_alu_add_translate(struct rte_swx_pipeline *p, } /* ADD_MI, ADD_HI. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); instr->type = INSTR_ALU_ADD_MI; @@ -3238,7 +3502,7 @@ instr_alu_add_translate(struct rte_swx_pipeline *p, instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -3252,7 +3516,8 @@ instr_alu_sub_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3280,7 +3545,7 @@ instr_alu_sub_translate(struct rte_swx_pipeline *p, } /* SUB_MI, SUB_HI. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); instr->type = INSTR_ALU_SUB_MI; @@ -3290,7 +3555,7 @@ instr_alu_sub_translate(struct rte_swx_pipeline *p, instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -3381,7 +3646,8 @@ instr_alu_shl_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3409,7 +3675,7 @@ instr_alu_shl_translate(struct rte_swx_pipeline *p, } /* SHL_MI, SHL_HI. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); instr->type = INSTR_ALU_SHL_MI; @@ -3419,7 +3685,7 @@ instr_alu_shl_translate(struct rte_swx_pipeline *p, instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -3433,7 +3699,8 @@ instr_alu_shr_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3461,7 +3728,7 @@ instr_alu_shr_translate(struct rte_swx_pipeline *p, } /* SHR_MI, SHR_HI. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); instr->type = INSTR_ALU_SHR_MI; @@ -3471,7 +3738,7 @@ instr_alu_shr_translate(struct rte_swx_pipeline *p, instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -3485,7 +3752,8 @@ instr_alu_and_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3510,17 +3778,17 @@ instr_alu_and_translate(struct rte_swx_pipeline *p, } /* AND_I. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); if (dst[0] == 'h') - src_val = htonl(src_val); + src_val = hton64(src_val) >> (64 - fdst->n_bits); instr->type = INSTR_ALU_AND_I; instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -3534,7 +3802,8 @@ instr_alu_or_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3559,17 +3828,17 @@ instr_alu_or_translate(struct rte_swx_pipeline *p, } /* OR_I. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); if (dst[0] == 'h') - src_val = htonl(src_val); + src_val = hton64(src_val) >> (64 - fdst->n_bits); instr->type = INSTR_ALU_OR_I; instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -3583,7 +3852,8 @@ instr_alu_xor_translate(struct rte_swx_pipeline *p, { char *dst = tokens[1], *src = tokens[2]; struct field *fdst, *fsrc; - uint32_t dst_struct_id, src_struct_id, src_val; + uint64_t src_val; + uint32_t dst_struct_id, src_struct_id; CHECK(n_tokens == 3, EINVAL); @@ -3608,17 +3878,17 @@ instr_alu_xor_translate(struct rte_swx_pipeline *p, } /* XOR_I. */ - src_val = strtoul(src, &src, 0); + src_val = strtoull(src, &src, 0); CHECK(!src[0], EINVAL); if (dst[0] == 'h') - src_val = htonl(src_val); + src_val = hton64(src_val) >> (64 - fdst->n_bits); instr->type = INSTR_ALU_XOR_I; instr->alu.dst.struct_id = (uint8_t)dst_struct_id; instr->alu.dst.n_bits = fdst->n_bits; instr->alu.dst.offset = fdst->offset / 8; - instr->alu.src_val = (uint32_t)src_val; + instr->alu.src_val = src_val; return 0; } @@ -4351,140 +4621,821 @@ instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p) thread_ip_inc(p); } -#define RTE_SWX_INSTRUCTION_TOKENS_MAX 16 +/* + * jmp. + */ +static struct action * +action_find(struct rte_swx_pipeline *p, const char *name); static int -instr_translate(struct rte_swx_pipeline *p, - struct action *action, - char *string, - struct instruction *instr, - struct instruction_data *data) +instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused, + struct action *action __rte_unused, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) { - char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX]; - int n_tokens = 0, tpos = 0; + CHECK(n_tokens == 2, EINVAL); - /* Parse the instruction string into tokens. */ - for ( ; ; ) { - char *token; + strcpy(data->jmp_label, tokens[1]); - token = strtok_r(string, " \t\v", &string); - if (!token) - break; + instr->type = INSTR_JMP; + instr->jmp.ip = NULL; /* Resolved later. */ + return 0; +} - CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL); +static int +instr_jmp_valid_translate(struct rte_swx_pipeline *p, + struct action *action __rte_unused, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + struct header *h; - tokens[n_tokens] = token; - n_tokens++; - } + CHECK(n_tokens == 3, EINVAL); - CHECK(n_tokens, EINVAL); + strcpy(data->jmp_label, tokens[1]); - /* Handle the optional instruction label. */ - if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) { - strcpy(data->label, tokens[0]); + h = header_parse(p, tokens[2]); + CHECK(h, EINVAL); - tpos += 2; - CHECK(n_tokens - tpos, EINVAL); - } + instr->type = INSTR_JMP_VALID; + instr->jmp.ip = NULL; /* Resolved later. */ + instr->jmp.header_id = h->id; + return 0; +} - /* Identify the instruction type. */ - if (!strcmp(tokens[tpos], "rx")) - return instr_rx_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); +static int +instr_jmp_invalid_translate(struct rte_swx_pipeline *p, + struct action *action __rte_unused, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + struct header *h; - if (!strcmp(tokens[tpos], "tx")) - return instr_tx_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + CHECK(n_tokens == 3, EINVAL); - if (!strcmp(tokens[tpos], "extract")) - return instr_hdr_extract_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + strcpy(data->jmp_label, tokens[1]); - if (!strcmp(tokens[tpos], "emit")) - return instr_hdr_emit_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + h = header_parse(p, tokens[2]); + CHECK(h, EINVAL); - if (!strcmp(tokens[tpos], "validate")) - return instr_hdr_validate_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + instr->type = INSTR_JMP_INVALID; + instr->jmp.ip = NULL; /* Resolved later. */ + instr->jmp.header_id = h->id; + return 0; +} - if (!strcmp(tokens[tpos], "invalidate")) - return instr_hdr_invalidate_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); +static int +instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + CHECK(!action, EINVAL); + CHECK(n_tokens == 2, EINVAL); - if (!strcmp(tokens[tpos], "mov")) - return instr_mov_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + strcpy(data->jmp_label, tokens[1]); - if (!strcmp(tokens[tpos], "dma")) - return instr_dma_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + instr->type = INSTR_JMP_HIT; + instr->jmp.ip = NULL; /* Resolved later. */ + return 0; +} - if (!strcmp(tokens[tpos], "add")) - return instr_alu_add_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); +static int +instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + CHECK(!action, EINVAL); + CHECK(n_tokens == 2, EINVAL); - if (!strcmp(tokens[tpos], "sub")) - return instr_alu_sub_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + strcpy(data->jmp_label, tokens[1]); - if (!strcmp(tokens[tpos], "ckadd")) - return instr_alu_ckadd_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + instr->type = INSTR_JMP_MISS; + instr->jmp.ip = NULL; /* Resolved later. */ + return 0; +} - if (!strcmp(tokens[tpos], "cksub")) - return instr_alu_cksub_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); +static int +instr_jmp_action_hit_translate(struct rte_swx_pipeline *p, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + struct action *a; - if (!strcmp(tokens[tpos], "and")) + CHECK(!action, EINVAL); + CHECK(n_tokens == 3, EINVAL); + + strcpy(data->jmp_label, tokens[1]); + + a = action_find(p, tokens[2]); + CHECK(a, EINVAL); + + instr->type = INSTR_JMP_ACTION_HIT; + instr->jmp.ip = NULL; /* Resolved later. */ + instr->jmp.action_id = a->id; + return 0; +} + +static int +instr_jmp_action_miss_translate(struct rte_swx_pipeline *p, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + struct action *a; + + CHECK(!action, EINVAL); + CHECK(n_tokens == 3, EINVAL); + + strcpy(data->jmp_label, tokens[1]); + + a = action_find(p, tokens[2]); + CHECK(a, EINVAL); + + instr->type = INSTR_JMP_ACTION_MISS; + instr->jmp.ip = NULL; /* Resolved later. */ + instr->jmp.action_id = a->id; + return 0; +} + +static int +instr_jmp_eq_translate(struct rte_swx_pipeline *p, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + char *a = tokens[2], *b = tokens[3]; + struct field *fa, *fb; + uint64_t b_val; + uint32_t a_struct_id, b_struct_id; + + CHECK(n_tokens == 4, EINVAL); + + strcpy(data->jmp_label, tokens[1]); + + fa = struct_field_parse(p, action, a, &a_struct_id); + CHECK(fa, EINVAL); + + /* JMP_EQ or JMP_EQ_S. */ + fb = struct_field_parse(p, action, b, &b_struct_id); + if (fb) { + instr->type = INSTR_JMP_EQ; + if ((a[0] == 'h' && b[0] != 'h') || + (a[0] != 'h' && b[0] == 'h')) + instr->type = INSTR_JMP_EQ_S; + instr->jmp.ip = NULL; /* Resolved later. */ + + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b.struct_id = (uint8_t)b_struct_id; + instr->jmp.b.n_bits = fb->n_bits; + instr->jmp.b.offset = fb->offset / 8; + return 0; + } + + /* JMP_EQ_I. */ + b_val = strtoull(b, &b, 0); + CHECK(!b[0], EINVAL); + + if (a[0] == 'h') + b_val = hton64(b_val) >> (64 - fa->n_bits); + + instr->type = INSTR_JMP_EQ_I; + instr->jmp.ip = NULL; /* Resolved later. */ + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b_val = b_val; + return 0; +} + +static int +instr_jmp_neq_translate(struct rte_swx_pipeline *p, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + char *a = tokens[2], *b = tokens[3]; + struct field *fa, *fb; + uint64_t b_val; + uint32_t a_struct_id, b_struct_id; + + CHECK(n_tokens == 4, EINVAL); + + strcpy(data->jmp_label, tokens[1]); + + fa = struct_field_parse(p, action, a, &a_struct_id); + CHECK(fa, EINVAL); + + /* JMP_NEQ or JMP_NEQ_S. */ + fb = struct_field_parse(p, action, b, &b_struct_id); + if (fb) { + instr->type = INSTR_JMP_NEQ; + if ((a[0] == 'h' && b[0] != 'h') || + (a[0] != 'h' && b[0] == 'h')) + instr->type = INSTR_JMP_NEQ_S; + instr->jmp.ip = NULL; /* Resolved later. */ + + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b.struct_id = (uint8_t)b_struct_id; + instr->jmp.b.n_bits = fb->n_bits; + instr->jmp.b.offset = fb->offset / 8; + return 0; + } + + /* JMP_NEQ_I. */ + b_val = strtoull(b, &b, 0); + CHECK(!b[0], EINVAL); + + if (a[0] == 'h') + b_val = hton64(b_val) >> (64 - fa->n_bits); + + instr->type = INSTR_JMP_NEQ_I; + instr->jmp.ip = NULL; /* Resolved later. */ + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b_val = b_val; + return 0; +} + +static int +instr_jmp_lt_translate(struct rte_swx_pipeline *p, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + char *a = tokens[2], *b = tokens[3]; + struct field *fa, *fb; + uint64_t b_val; + uint32_t a_struct_id, b_struct_id; + + CHECK(n_tokens == 4, EINVAL); + + strcpy(data->jmp_label, tokens[1]); + + fa = struct_field_parse(p, action, a, &a_struct_id); + CHECK(fa, EINVAL); + + /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */ + fb = struct_field_parse(p, action, b, &b_struct_id); + if (fb) { + instr->type = INSTR_JMP_LT; + if (a[0] == 'h' && b[0] == 'm') + instr->type = INSTR_JMP_LT_HM; + if (a[0] == 'm' && b[0] == 'h') + instr->type = INSTR_JMP_LT_MH; + if (a[0] == 'h' && b[0] == 'h') + instr->type = INSTR_JMP_LT_HH; + instr->jmp.ip = NULL; /* Resolved later. */ + + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b.struct_id = (uint8_t)b_struct_id; + instr->jmp.b.n_bits = fb->n_bits; + instr->jmp.b.offset = fb->offset / 8; + return 0; + } + + /* JMP_LT_MI, JMP_LT_HI. */ + b_val = strtoull(b, &b, 0); + CHECK(!b[0], EINVAL); + + instr->type = INSTR_JMP_LT_MI; + if (a[0] == 'h') + instr->type = INSTR_JMP_LT_HI; + instr->jmp.ip = NULL; /* Resolved later. */ + + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b_val = b_val; + return 0; +} + +static int +instr_jmp_gt_translate(struct rte_swx_pipeline *p, + struct action *action, + char **tokens, + int n_tokens, + struct instruction *instr, + struct instruction_data *data) +{ + char *a = tokens[2], *b = tokens[3]; + struct field *fa, *fb; + uint64_t b_val; + uint32_t a_struct_id, b_struct_id; + + CHECK(n_tokens == 4, EINVAL); + + strcpy(data->jmp_label, tokens[1]); + + fa = struct_field_parse(p, action, a, &a_struct_id); + CHECK(fa, EINVAL); + + /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */ + fb = struct_field_parse(p, action, b, &b_struct_id); + if (fb) { + instr->type = INSTR_JMP_GT; + if (a[0] == 'h' && b[0] == 'm') + instr->type = INSTR_JMP_GT_HM; + if (a[0] == 'm' && b[0] == 'h') + instr->type = INSTR_JMP_GT_MH; + if (a[0] == 'h' && b[0] == 'h') + instr->type = INSTR_JMP_GT_HH; + instr->jmp.ip = NULL; /* Resolved later. */ + + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b.struct_id = (uint8_t)b_struct_id; + instr->jmp.b.n_bits = fb->n_bits; + instr->jmp.b.offset = fb->offset / 8; + return 0; + } + + /* JMP_GT_MI, JMP_GT_HI. */ + b_val = strtoull(b, &b, 0); + CHECK(!b[0], EINVAL); + + instr->type = INSTR_JMP_GT_MI; + if (a[0] == 'h') + instr->type = INSTR_JMP_GT_HI; + instr->jmp.ip = NULL; /* Resolved later. */ + + instr->jmp.a.struct_id = (uint8_t)a_struct_id; + instr->jmp.a.n_bits = fa->n_bits; + instr->jmp.a.offset = fa->offset / 8; + instr->jmp.b_val = b_val; + return 0; +} + +static inline void +instr_jmp_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmp\n", p->thread_id); + + thread_ip_set(t, ip->jmp.ip); +} + +static inline void +instr_jmp_valid_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + uint32_t header_id = ip->jmp.header_id; + + TRACE("[Thread %2u] jmpv\n", p->thread_id); + + t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1); +} + +static inline void +instr_jmp_invalid_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + uint32_t header_id = ip->jmp.header_id; + + TRACE("[Thread %2u] jmpnv\n", p->thread_id); + + t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip; +} + +static inline void +instr_jmp_hit_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip}; + + TRACE("[Thread %2u] jmph\n", p->thread_id); + + t->ip = ip_next[t->hit]; +} + +static inline void +instr_jmp_miss_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1}; + + TRACE("[Thread %2u] jmpnh\n", p->thread_id); + + t->ip = ip_next[t->hit]; +} + +static inline void +instr_jmp_action_hit_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpa\n", p->thread_id); + + t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1); +} + +static inline void +instr_jmp_action_miss_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpna\n", p->thread_id); + + t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip; +} + +static inline void +instr_jmp_eq_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpeq\n", p->thread_id); + + JMP_CMP(t, ip, ==); +} + +static inline void +instr_jmp_eq_s_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id); + + JMP_CMP_S(t, ip, ==); +} + +static inline void +instr_jmp_eq_i_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id); + + JMP_CMP_I(t, ip, ==); +} + +static inline void +instr_jmp_neq_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpneq\n", p->thread_id); + + JMP_CMP(t, ip, !=); +} + +static inline void +instr_jmp_neq_s_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id); + + JMP_CMP_S(t, ip, !=); +} + +static inline void +instr_jmp_neq_i_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id); + + JMP_CMP_I(t, ip, !=); +} + +static inline void +instr_jmp_lt_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmplt\n", p->thread_id); + + JMP_CMP(t, ip, <); +} + +static inline void +instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id); + + JMP_CMP_MH(t, ip, <); +} + +static inline void +instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id); + + JMP_CMP_HM(t, ip, <); +} + +static inline void +instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id); + + JMP_CMP_HH(t, ip, <); +} + +static inline void +instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id); + + JMP_CMP_MI(t, ip, <); +} + +static inline void +instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id); + + JMP_CMP_HI(t, ip, <); +} + +static inline void +instr_jmp_gt_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpgt\n", p->thread_id); + + JMP_CMP(t, ip, >); +} + +static inline void +instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id); + + JMP_CMP_MH(t, ip, >); +} + +static inline void +instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id); + + JMP_CMP_HM(t, ip, >); +} + +static inline void +instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id); + + JMP_CMP_HH(t, ip, >); +} + +static inline void +instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id); + + JMP_CMP_MI(t, ip, >); +} + +static inline void +instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + struct instruction *ip = t->ip; + + TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id); + + JMP_CMP_HI(t, ip, >); +} + +/* + * return. + */ +static int +instr_return_translate(struct rte_swx_pipeline *p __rte_unused, + struct action *action, + char **tokens __rte_unused, + int n_tokens, + struct instruction *instr, + struct instruction_data *data __rte_unused) +{ + CHECK(action, EINVAL); + CHECK(n_tokens == 1, EINVAL); + + instr->type = INSTR_RETURN; + return 0; +} + +static inline void +instr_return_exec(struct rte_swx_pipeline *p) +{ + struct thread *t = &p->threads[p->thread_id]; + + TRACE("[Thread %2u] return\n", p->thread_id); + + t->ip = t->ret; +} + +static int +instr_translate(struct rte_swx_pipeline *p, + struct action *action, + char *string, + struct instruction *instr, + struct instruction_data *data) +{ + char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX]; + int n_tokens = 0, tpos = 0; + + /* Parse the instruction string into tokens. */ + for ( ; ; ) { + char *token; + + token = strtok_r(string, " \t\v", &string); + if (!token) + break; + + CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL); + CHECK_NAME(token, EINVAL); + + tokens[n_tokens] = token; + n_tokens++; + } + + CHECK(n_tokens, EINVAL); + + /* Handle the optional instruction label. */ + if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) { + strcpy(data->label, tokens[0]); + + tpos += 2; + CHECK(n_tokens - tpos, EINVAL); + } + + /* Identify the instruction type. */ + if (!strcmp(tokens[tpos], "rx")) + return instr_rx_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "tx")) + return instr_tx_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "extract")) + return instr_hdr_extract_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "emit")) + return instr_hdr_emit_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "validate")) + return instr_hdr_validate_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "invalidate")) + return instr_hdr_invalidate_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "mov")) + return instr_mov_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "dma")) + return instr_dma_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "add")) + return instr_alu_add_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "sub")) + return instr_alu_sub_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "ckadd")) + return instr_alu_ckadd_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "cksub")) + return instr_alu_cksub_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "and")) return instr_alu_and_translate(p, action, &tokens[tpos], @@ -4492,102 +5443,510 @@ instr_translate(struct rte_swx_pipeline *p, instr, data); - if (!strcmp(tokens[tpos], "or")) - return instr_alu_or_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + if (!strcmp(tokens[tpos], "or")) + return instr_alu_or_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "xor")) + return instr_alu_xor_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "shl")) + return instr_alu_shl_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "shr")) + return instr_alu_shr_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "table")) + return instr_table_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "extern")) + return instr_extern_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmp")) + return instr_jmp_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpv")) + return instr_jmp_valid_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpnv")) + return instr_jmp_invalid_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmph")) + return instr_jmp_hit_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpnh")) + return instr_jmp_miss_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpa")) + return instr_jmp_action_hit_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpna")) + return instr_jmp_action_miss_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpeq")) + return instr_jmp_eq_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpneq")) + return instr_jmp_neq_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmplt")) + return instr_jmp_lt_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "jmpgt")) + return instr_jmp_gt_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + if (!strcmp(tokens[tpos], "return")) + return instr_return_translate(p, + action, + &tokens[tpos], + n_tokens - tpos, + instr, + data); + + CHECK(0, EINVAL); +} + +static struct instruction_data * +label_find(struct instruction_data *data, uint32_t n, const char *label) +{ + uint32_t i; + + for (i = 0; i < n; i++) + if (!strcmp(label, data[i].label)) + return &data[i]; + + return NULL; +} + +static uint32_t +label_is_used(struct instruction_data *data, uint32_t n, const char *label) +{ + uint32_t count = 0, i; + + if (!label[0]) + return 0; + + for (i = 0; i < n; i++) + if (!strcmp(label, data[i].jmp_label)) + count++; + + return count; +} + +static int +instr_label_check(struct instruction_data *instruction_data, + uint32_t n_instructions) +{ + uint32_t i; + + /* Check that all instruction labels are unique. */ + for (i = 0; i < n_instructions; i++) { + struct instruction_data *data = &instruction_data[i]; + char *label = data->label; + uint32_t j; + + if (!label[0]) + continue; + + for (j = i + 1; j < n_instructions; j++) + CHECK(strcmp(label, data[j].label), EINVAL); + } + + /* Get users for each instruction label. */ + for (i = 0; i < n_instructions; i++) { + struct instruction_data *data = &instruction_data[i]; + char *label = data->label; + + data->n_users = label_is_used(instruction_data, + n_instructions, + label); + } + + return 0; +} + +static int +instr_jmp_resolve(struct instruction *instructions, + struct instruction_data *instruction_data, + uint32_t n_instructions) +{ + uint32_t i; + + for (i = 0; i < n_instructions; i++) { + struct instruction *instr = &instructions[i]; + struct instruction_data *data = &instruction_data[i]; + struct instruction_data *found; + + if (!instruction_is_jmp(instr)) + continue; + + found = label_find(instruction_data, + n_instructions, + data->jmp_label); + CHECK(found, EINVAL); + + instr->jmp.ip = &instructions[found - instruction_data]; + } + + return 0; +} + +static int +instr_verify(struct rte_swx_pipeline *p __rte_unused, + struct action *a, + struct instruction *instr, + struct instruction_data *data __rte_unused, + uint32_t n_instructions) +{ + if (!a) { + enum instruction_type type; + uint32_t i; + + /* Check that the first instruction is rx. */ + CHECK(instr[0].type == INSTR_RX, EINVAL); + + /* Check that there is at least one tx instruction. */ + for (i = 0; i < n_instructions; i++) { + type = instr[i].type; + + if (type == INSTR_TX) + break; + } + CHECK(i < n_instructions, EINVAL); + + /* Check that the last instruction is either tx or unconditional + * jump. + */ + type = instr[n_instructions - 1].type; + CHECK((type == INSTR_TX) || (type == INSTR_JMP), EINVAL); + } + + if (a) { + enum instruction_type type; + uint32_t i; + + /* Check that there is at least one return or tx instruction. */ + for (i = 0; i < n_instructions; i++) { + type = instr[i].type; + + if ((type == INSTR_RETURN) || (type == INSTR_TX)) + break; + } + CHECK(i < n_instructions, EINVAL); + } + + return 0; +} + +static int +instr_pattern_extract_many_detect(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr, + uint32_t *n_pattern_instr) +{ + uint32_t i; + + for (i = 0; i < n_instr; i++) { + if (data[i].invalid) + break; + + if (instr[i].type != INSTR_HDR_EXTRACT) + break; + + if (i == RTE_DIM(instr->io.hdr.header_id)) + break; + + if (i && data[i].n_users) + break; + } + + if (i < 2) + return 0; + + *n_pattern_instr = i; + return 1; +} + +static void +instr_pattern_extract_many_optimize(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr) +{ + uint32_t i; + + for (i = 1; i < n_instr; i++) { + instr[0].type++; + instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0]; + instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0]; + instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0]; + + data[i].invalid = 1; + } +} + +static int +instr_pattern_emit_many_tx_detect(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr, + uint32_t *n_pattern_instr) +{ + uint32_t i; + + for (i = 0; i < n_instr; i++) { + if (data[i].invalid) + break; - if (!strcmp(tokens[tpos], "xor")) - return instr_alu_xor_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + if (instr[i].type != INSTR_HDR_EMIT) + break; - if (!strcmp(tokens[tpos], "shl")) - return instr_alu_shl_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + if (i == RTE_DIM(instr->io.hdr.header_id)) + break; - if (!strcmp(tokens[tpos], "shr")) - return instr_alu_shr_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + if (i && data[i].n_users) + break; + } - if (!strcmp(tokens[tpos], "table")) - return instr_table_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + if (!i) + return 0; - if (!strcmp(tokens[tpos], "extern")) - return instr_extern_translate(p, - action, - &tokens[tpos], - n_tokens - tpos, - instr, - data); + if (instr[i].type != INSTR_TX) + return 0; - CHECK(0, EINVAL); + if (data[i].n_users) + return 0; + + i++; + + *n_pattern_instr = i; + return 1; } -static uint32_t -label_is_used(struct instruction_data *data, uint32_t n, const char *label) +static void +instr_pattern_emit_many_tx_optimize(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr) { - uint32_t count = 0, i; + uint32_t i; - if (!label[0]) - return 0; + /* Any emit instruction in addition to the first one. */ + for (i = 1; i < n_instr - 1; i++) { + instr[0].type++; + instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0]; + instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0]; + instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0]; - for (i = 0; i < n; i++) - if (!strcmp(label, data[i].jmp_label)) - count++; + data[i].invalid = 1; + } - return count; + /* The TX instruction is the last one in the pattern. */ + instr[0].type++; + instr[0].io.io.offset = instr[i].io.io.offset; + instr[0].io.io.n_bits = instr[i].io.io.n_bits; + data[i].invalid = 1; } static int -instr_label_check(struct instruction_data *instruction_data, - uint32_t n_instructions) +instr_pattern_dma_many_detect(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr, + uint32_t *n_pattern_instr) { uint32_t i; - /* Check that all instruction labels are unique. */ - for (i = 0; i < n_instructions; i++) { + for (i = 0; i < n_instr; i++) { + if (data[i].invalid) + break; + + if (instr[i].type != INSTR_DMA_HT) + break; + + if (i == RTE_DIM(instr->dma.dst.header_id)) + break; + + if (i && data[i].n_users) + break; + } + + if (i < 2) + return 0; + + *n_pattern_instr = i; + return 1; +} + +static void +instr_pattern_dma_many_optimize(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr) +{ + uint32_t i; + + for (i = 1; i < n_instr; i++) { + instr[0].type++; + instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0]; + instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0]; + instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0]; + instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0]; + + data[i].invalid = 1; + } +} + +static uint32_t +instr_optimize(struct instruction *instructions, + struct instruction_data *instruction_data, + uint32_t n_instructions) +{ + uint32_t i, pos = 0; + + for (i = 0; i < n_instructions; ) { + struct instruction *instr = &instructions[i]; struct instruction_data *data = &instruction_data[i]; - char *label = data->label; - uint32_t j; + uint32_t n_instr = 0; + int detected; + + /* Extract many. */ + detected = instr_pattern_extract_many_detect(instr, + data, + n_instructions - i, + &n_instr); + if (detected) { + instr_pattern_extract_many_optimize(instr, + data, + n_instr); + i += n_instr; + continue; + } - if (!label[0]) + /* Emit many + TX. */ + detected = instr_pattern_emit_many_tx_detect(instr, + data, + n_instructions - i, + &n_instr); + if (detected) { + instr_pattern_emit_many_tx_optimize(instr, + data, + n_instr); + i += n_instr; continue; + } - for (j = i + 1; j < n_instructions; j++) - CHECK(strcmp(label, data[j].label), EINVAL); + /* DMA many. */ + detected = instr_pattern_dma_many_detect(instr, + data, + n_instructions - i, + &n_instr); + if (detected) { + instr_pattern_dma_many_optimize(instr, data, n_instr); + i += n_instr; + continue; + } + + /* No pattern starting at the current instruction. */ + i++; } - /* Get users for each instruction label. */ + /* Eliminate the invalid instructions that have been optimized out. */ for (i = 0; i < n_instructions; i++) { + struct instruction *instr = &instructions[i]; struct instruction_data *data = &instruction_data[i]; - char *label = data->label; - data->n_users = label_is_used(instruction_data, - n_instructions, - label); + if (data->invalid) + continue; + + if (i != pos) { + memcpy(&instructions[pos], instr, sizeof(*instr)); + memcpy(&instruction_data[pos], data, sizeof(*data)); + } + + pos++; } - return 0; + return pos; } static int @@ -4598,14 +5957,13 @@ instruction_config(struct rte_swx_pipeline *p, { struct instruction *instr = NULL; struct instruction_data *data = NULL; - char *string = NULL; int err = 0; uint32_t i; CHECK(n_instructions, EINVAL); CHECK(instructions, EINVAL); for (i = 0; i < n_instructions; i++) - CHECK(instructions[i], EINVAL); + CHECK_INSTRUCTION(instructions[i], EINVAL); /* Memory allocation. */ instr = calloc(n_instructions, sizeof(struct instruction)); @@ -4621,15 +5979,17 @@ instruction_config(struct rte_swx_pipeline *p, } for (i = 0; i < n_instructions; i++) { - string = strdup(instructions[i]); + char *string = strdup(instructions[i]); if (!string) { err = ENOMEM; goto error; } err = instr_translate(p, a, string, &instr[i], &data[i]); - if (err) + if (err) { + free(string); goto error; + } free(string); } @@ -4638,7 +5998,15 @@ instruction_config(struct rte_swx_pipeline *p, if (err) goto error; - free(data); + err = instr_verify(p, a, instr, data, n_instructions); + if (err) + goto error; + + n_instructions = instr_optimize(instr, data, n_instructions); + + err = instr_jmp_resolve(instr, data, n_instructions); + if (err) + goto error; if (a) { a->instructions = instr; @@ -4648,10 +6016,10 @@ instruction_config(struct rte_swx_pipeline *p, p->n_instructions = n_instructions; } + free(data); return 0; error: - free(string); free(data); free(instr); return err; @@ -4746,6 +6114,38 @@ static instr_exec_t instruction_table[] = { [INSTR_TABLE] = instr_table_exec, [INSTR_EXTERN_OBJ] = instr_extern_obj_exec, [INSTR_EXTERN_FUNC] = instr_extern_func_exec, + + [INSTR_JMP] = instr_jmp_exec, + [INSTR_JMP_VALID] = instr_jmp_valid_exec, + [INSTR_JMP_INVALID] = instr_jmp_invalid_exec, + [INSTR_JMP_HIT] = instr_jmp_hit_exec, + [INSTR_JMP_MISS] = instr_jmp_miss_exec, + [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec, + [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec, + + [INSTR_JMP_EQ] = instr_jmp_eq_exec, + [INSTR_JMP_EQ_S] = instr_jmp_eq_s_exec, + [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec, + + [INSTR_JMP_NEQ] = instr_jmp_neq_exec, + [INSTR_JMP_NEQ_S] = instr_jmp_neq_s_exec, + [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec, + + [INSTR_JMP_LT] = instr_jmp_lt_exec, + [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec, + [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec, + [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec, + [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec, + [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec, + + [INSTR_JMP_GT] = instr_jmp_gt_exec, + [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec, + [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec, + [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec, + [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec, + [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec, + + [INSTR_RETURN] = instr_return_exec, }; static inline void @@ -4776,6 +6176,18 @@ action_find(struct rte_swx_pipeline *p, const char *name) return NULL; } +static struct action * +action_find_by_id(struct rte_swx_pipeline *p, uint32_t id) +{ + struct action *action = NULL; + + TAILQ_FOREACH(action, &p->actions, node) + if (action->id == id) + return action; + + return NULL; +} + static struct field * action_field_find(struct action *a, const char *name) { @@ -5055,7 +6467,7 @@ rte_swx_pipeline_table_config(struct rte_swx_pipeline *p, struct action *a; uint32_t action_data_size; - CHECK(action_name, EINVAL); + CHECK_NAME(action_name, EINVAL); a = action_find(p, action_name); CHECK(a, EINVAL); @@ -5065,7 +6477,7 @@ rte_swx_pipeline_table_config(struct rte_swx_pipeline *p, action_data_size_max = action_data_size; } - CHECK(params->default_action_name, EINVAL); + CHECK_NAME(params->default_action_name, EINVAL); for (i = 0; i < p->n_actions; i++) if (!strcmp(params->action_names[i], params->default_action_name)) @@ -5076,6 +6488,9 @@ rte_swx_pipeline_table_config(struct rte_swx_pipeline *p, !params->default_action_data, EINVAL); /* Table type checks. */ + if (recommended_table_type_name) + CHECK_NAME(recommended_table_type_name, EINVAL); + if (params->n_fields) { enum rte_swx_table_match_type match_type; @@ -5563,9 +6978,193 @@ rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions) instr_exec(p); } +void +rte_swx_pipeline_flush(struct rte_swx_pipeline *p) +{ + uint32_t i; + + for (i = 0; i < p->n_ports_out; i++) { + struct port_out_runtime *port = &p->out[i]; + + if (port->flush) + port->flush(port->obj); + } +} + /* * Control. */ +int +rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p, + struct rte_swx_ctl_pipeline_info *pipeline) +{ + struct action *action; + struct table *table; + uint32_t n_actions = 0, n_tables = 0; + + if (!p || !pipeline) + return -EINVAL; + + TAILQ_FOREACH(action, &p->actions, node) + n_actions++; + + TAILQ_FOREACH(table, &p->tables, node) + n_tables++; + + pipeline->n_ports_in = p->n_ports_in; + pipeline->n_ports_out = p->n_ports_out; + pipeline->n_actions = n_actions; + pipeline->n_tables = n_tables; + + return 0; +} + +int +rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node) +{ + if (!p || !numa_node) + return -EINVAL; + + *numa_node = p->numa_node; + return 0; +} + +int +rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p, + uint32_t action_id, + struct rte_swx_ctl_action_info *action) +{ + struct action *a = NULL; + + if (!p || (action_id >= p->n_actions) || !action) + return -EINVAL; + + a = action_find_by_id(p, action_id); + if (!a) + return -EINVAL; + + strcpy(action->name, a->name); + action->n_args = a->st ? a->st->n_fields : 0; + return 0; +} + +int +rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p, + uint32_t action_id, + uint32_t action_arg_id, + struct rte_swx_ctl_action_arg_info *action_arg) +{ + struct action *a = NULL; + struct field *arg = NULL; + + if (!p || (action_id >= p->n_actions) || !action_arg) + return -EINVAL; + + a = action_find_by_id(p, action_id); + if (!a || !a->st || (action_arg_id >= a->st->n_fields)) + return -EINVAL; + + arg = &a->st->fields[action_arg_id]; + strcpy(action_arg->name, arg->name); + action_arg->n_bits = arg->n_bits; + + return 0; +} + +int +rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p, + uint32_t table_id, + struct rte_swx_ctl_table_info *table) +{ + struct table *t = NULL; + + if (!p || !table) + return -EINVAL; + + t = table_find_by_id(p, table_id); + if (!t) + return -EINVAL; + + strcpy(table->name, t->name); + strcpy(table->args, t->args); + table->n_match_fields = t->n_fields; + table->n_actions = t->n_actions; + table->default_action_is_const = t->default_action_is_const; + table->size = t->size; + return 0; +} + +int +rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p, + uint32_t table_id, + uint32_t match_field_id, + struct rte_swx_ctl_table_match_field_info *match_field) +{ + struct table *t; + struct match_field *f; + + if (!p || (table_id >= p->n_tables) || !match_field) + return -EINVAL; + + t = table_find_by_id(p, table_id); + if (!t || (match_field_id >= t->n_fields)) + return -EINVAL; + + f = &t->fields[match_field_id]; + match_field->match_type = f->match_type; + match_field->is_header = t->is_header; + match_field->n_bits = f->field->n_bits; + match_field->offset = f->field->offset; + + return 0; +} + +int +rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p, + uint32_t table_id, + uint32_t table_action_id, + struct rte_swx_ctl_table_action_info *table_action) +{ + struct table *t; + + if (!p || (table_id >= p->n_tables) || !table_action) + return -EINVAL; + + t = table_find_by_id(p, table_id); + if (!t || (table_action_id >= t->n_actions)) + return -EINVAL; + + table_action->action_id = t->actions[table_action_id]->id; + + return 0; +} + +int +rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p, + uint32_t table_id, + struct rte_swx_table_ops *table_ops, + int *is_stub) +{ + struct table *t; + + if (!p || (table_id >= p->n_tables)) + return -EINVAL; + + t = table_find_by_id(p, table_id); + if (!t) + return -EINVAL; + + if (t->type) { + if (table_ops) + memcpy(table_ops, &t->type->ops, sizeof(*table_ops)); + *is_stub = 0; + } else { + *is_stub = 1; + } + + return 0; +} + int rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p, struct rte_swx_table_state **table_state) @@ -5587,3 +7186,39 @@ rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p, p->table_state = table_state; return 0; } + +int +rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p, + uint32_t port_id, + struct rte_swx_port_in_stats *stats) +{ + struct port_in *port; + + if (!p || !stats) + return -EINVAL; + + port = port_in_find(p, port_id); + if (!port) + return -EINVAL; + + port->type->ops.stats_read(port->obj, stats); + return 0; +} + +int +rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p, + uint32_t port_id, + struct rte_swx_port_out_stats *stats) +{ + struct port_out *port; + + if (!p || !stats) + return -EINVAL; + + port = port_out_find(p, port_id); + if (!port) + return -EINVAL; + + port->type->ops.stats_read(port->obj, stats); + return 0; +}