+ MOV(t, ip);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_mov_s_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] mov (s)\n",
+ p->thread_id);
+
+ MOV_S(t, ip);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_mov_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
+ p->thread_id,
+ ip->mov.src_val);
+
+ MOV_I(t, ip);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+/*
+ * dma.
+ */
+static int
+instr_dma_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1];
+ char *src = tokens[2];
+ struct header *h;
+ struct field *tf;
+
+ CHECK(action, EINVAL);
+ CHECK(n_tokens == 3, EINVAL);
+
+ h = header_parse(p, dst);
+ CHECK(h, EINVAL);
+
+ tf = action_field_parse(action, src);
+ CHECK(tf, EINVAL);
+
+ instr->type = INSTR_DMA_HT;
+ instr->dma.dst.header_id[0] = h->id;
+ instr->dma.dst.struct_id[0] = h->struct_id;
+ instr->dma.n_bytes[0] = h->st->n_bits / 8;
+ instr->dma.src.offset[0] = tf->offset / 8;
+
+ return 0;
+}
+
+static inline void
+__instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
+
+static inline void
+__instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint8_t *action_data = t->structs[0];
+ uint64_t valid_headers = t->valid_headers;
+ uint32_t i;
+
+ for (i = 0; i < n_dma; i++) {
+ uint32_t header_id = ip->dma.dst.header_id[i];
+ uint32_t struct_id = ip->dma.dst.struct_id[i];
+ uint32_t offset = ip->dma.src.offset[i];
+ uint32_t n_bytes = ip->dma.n_bytes[i];
+
+ struct header_runtime *h = &t->headers[header_id];
+ uint8_t *h_ptr0 = h->ptr0;
+ uint8_t *h_ptr = t->structs[struct_id];
+
+ void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
+ h_ptr : h_ptr0;
+ void *src = &action_data[offset];
+
+ TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
+
+ /* Headers. */
+ memcpy(dst, src, n_bytes);
+ t->structs[struct_id] = dst;
+ valid_headers = MASK64_BIT_SET(valid_headers, header_id);
+ }
+
+ t->valid_headers = valid_headers;
+}
+
+static inline void
+instr_dma_ht_exec(struct rte_swx_pipeline *p)
+{
+ __instr_dma_ht_exec(p, 1);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht2_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 2);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht3_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 3);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht4_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 4);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht5_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 5);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht6_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 6);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht7_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 7);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht8_exec(struct rte_swx_pipeline *p)
+{
+ TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
+ p->thread_id);
+
+ __instr_dma_ht_exec(p, 8);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+/*
+ * alu.
+ */
+static int
+instr_alu_add_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* ADD, ADD_HM, ADD_MH, ADD_HH. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_ADD;
+ if (dst[0] == 'h' && src[0] != 'h')
+ instr->type = INSTR_ALU_ADD_HM;
+ if (dst[0] != 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_ADD_MH;
+ if (dst[0] == 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_ADD_HH;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* ADD_MI, ADD_HI. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ instr->type = INSTR_ALU_ADD_MI;
+ if (dst[0] == 'h')
+ instr->type = INSTR_ALU_ADD_HI;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static int
+instr_alu_sub_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* SUB, SUB_HM, SUB_MH, SUB_HH. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_SUB;
+ if (dst[0] == 'h' && src[0] != 'h')
+ instr->type = INSTR_ALU_SUB_HM;
+ if (dst[0] != 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_SUB_MH;
+ if (dst[0] == 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_SUB_HH;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* SUB_MI, SUB_HI. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ instr->type = INSTR_ALU_SUB_MI;
+ if (dst[0] == 'h')
+ instr->type = INSTR_ALU_SUB_HI;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static int
+instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
+ struct action *action __rte_unused,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct header *hdst, *hsrc;
+ struct field *fdst, *fsrc;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = header_field_parse(p, dst, &hdst);
+ CHECK(fdst && (fdst->n_bits == 16), EINVAL);
+
+ /* CKADD_FIELD. */
+ fsrc = header_field_parse(p, src, &hsrc);
+ if (fsrc) {
+ instr->type = INSTR_ALU_CKADD_FIELD;
+ instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* CKADD_STRUCT, CKADD_STRUCT20. */
+ hsrc = header_parse(p, src);
+ CHECK(hsrc, EINVAL);
+
+ instr->type = INSTR_ALU_CKADD_STRUCT;
+ if ((hsrc->st->n_bits / 8) == 20)
+ instr->type = INSTR_ALU_CKADD_STRUCT20;
+
+ instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
+ instr->alu.src.n_bits = hsrc->st->n_bits;
+ instr->alu.src.offset = 0; /* Unused. */
+ return 0;
+}
+
+static int
+instr_alu_cksub_translate(struct rte_swx_pipeline *p,
+ struct action *action __rte_unused,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct header *hdst, *hsrc;
+ struct field *fdst, *fsrc;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = header_field_parse(p, dst, &hdst);
+ CHECK(fdst && (fdst->n_bits == 16), EINVAL);
+
+ fsrc = header_field_parse(p, src, &hsrc);
+ CHECK(fsrc, EINVAL);
+
+ instr->type = INSTR_ALU_CKSUB_FIELD;
+ instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+}
+
+static int
+instr_alu_shl_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* SHL, SHL_HM, SHL_MH, SHL_HH. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_SHL;
+ if (dst[0] == 'h' && src[0] != 'h')
+ instr->type = INSTR_ALU_SHL_HM;
+ if (dst[0] != 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_SHL_MH;
+ if (dst[0] == 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_SHL_HH;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* SHL_MI, SHL_HI. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ instr->type = INSTR_ALU_SHL_MI;
+ if (dst[0] == 'h')
+ instr->type = INSTR_ALU_SHL_HI;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static int
+instr_alu_shr_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* SHR, SHR_HM, SHR_MH, SHR_HH. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_SHR;
+ if (dst[0] == 'h' && src[0] != 'h')
+ instr->type = INSTR_ALU_SHR_HM;
+ if (dst[0] != 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_SHR_MH;
+ if (dst[0] == 'h' && src[0] == 'h')
+ instr->type = INSTR_ALU_SHR_HH;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* SHR_MI, SHR_HI. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ instr->type = INSTR_ALU_SHR_MI;
+ if (dst[0] == 'h')
+ instr->type = INSTR_ALU_SHR_HI;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static int
+instr_alu_and_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* AND or AND_S. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_AND;
+ if ((dst[0] == 'h' && src[0] != 'h') ||
+ (dst[0] != 'h' && src[0] == 'h'))
+ instr->type = INSTR_ALU_AND_S;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* AND_I. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ if (dst[0] == 'h')
+ src_val = hton64(src_val) >> (64 - fdst->n_bits);
+
+ instr->type = INSTR_ALU_AND_I;
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static int
+instr_alu_or_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* OR or OR_S. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_OR;
+ if ((dst[0] == 'h' && src[0] != 'h') ||
+ (dst[0] != 'h' && src[0] == 'h'))
+ instr->type = INSTR_ALU_OR_S;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* OR_I. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ if (dst[0] == 'h')
+ src_val = hton64(src_val) >> (64 - fdst->n_bits);
+
+ instr->type = INSTR_ALU_OR_I;
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static int
+instr_alu_xor_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *src = tokens[2];
+ struct field *fdst, *fsrc;
+ uint64_t src_val;
+ uint32_t dst_struct_id, src_struct_id;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* XOR or XOR_S. */
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fsrc) {
+ instr->type = INSTR_ALU_XOR;
+ if ((dst[0] == 'h' && src[0] != 'h') ||
+ (dst[0] != 'h' && src[0] == 'h'))
+ instr->type = INSTR_ALU_XOR_S;
+
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src.struct_id = (uint8_t)src_struct_id;
+ instr->alu.src.n_bits = fsrc->n_bits;
+ instr->alu.src.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* XOR_I. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ if (dst[0] == 'h')
+ src_val = hton64(src_val) >> (64 - fdst->n_bits);
+
+ instr->type = INSTR_ALU_XOR_I;
+ instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+ instr->alu.dst.n_bits = fdst->n_bits;
+ instr->alu.dst.offset = fdst->offset / 8;
+ instr->alu.src_val = src_val;
+ return 0;
+}
+
+static inline void
+instr_alu_add_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] add\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, +);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] add (mh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MH(t, ip, +);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] add (hm)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HM(t, ip, +);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] add (hh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HH(t, ip, +);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] add (mi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MI(t, ip, +);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] add (hi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HI(t, ip, +);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] sub\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, -);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MH(t, ip, -);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HM(t, ip, -);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HH(t, ip, -);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MI(t, ip, -);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HI(t, ip, -);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shl\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, <<);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MH(t, ip, <<);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HM(t, ip, <<);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HH(t, ip, <<);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MI(t, ip, <<);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HI(t, ip, <<);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shr\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, >>);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MH(t, ip, >>);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HM(t, ip, >>);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HH(t, ip, >>);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_MI(t, ip, >>);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_HI(t, ip, >>);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_and_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] and\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, &);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_and_s_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] and (s)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_S(t, ip, &);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_and_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] and (i)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_I(t, ip, &);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_or_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] or\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, |);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_or_s_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] or (s)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_S(t, ip, |);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_or_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] or (i)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_I(t, ip, |);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_xor_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] xor\n", p->thread_id);
+
+ /* Structs. */
+ ALU(t, ip, ^);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] xor (s)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_S(t, ip, ^);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] xor (i)\n", p->thread_id);
+
+ /* Structs. */
+ ALU_I(t, ip, ^);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint8_t *dst_struct, *src_struct;
+ uint16_t *dst16_ptr, dst;
+ uint64_t *src64_ptr, src64, src64_mask, src;
+ uint64_t r;
+
+ TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
+
+ /* Structs. */
+ dst_struct = t->structs[ip->alu.dst.struct_id];
+ dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+ dst = *dst16_ptr;
+
+ src_struct = t->structs[ip->alu.src.struct_id];
+ src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
+ src64 = *src64_ptr;
+ src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
+ src = src64 & src64_mask;
+
+ r = dst;
+ r = ~r & 0xFFFF;
+
+ /* The first input (r) is a 16-bit number. The second and the third
+ * inputs are 32-bit numbers. In the worst case scenario, the sum of the
+ * three numbers (output r) is a 34-bit number.
+ */
+ r += (src >> 32) + (src & 0xFFFFFFFF);
+
+ /* The first input is a 16-bit number. The second input is an 18-bit
+ * number. In the worst case scenario, the sum of the two numbers is a
+ * 19-bit number.
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+ * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+ * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+ * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
+ * therefore the output r is always a 16-bit number.
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ r = ~r & 0xFFFF;
+ r = r ? r : 0xFFFF;
+
+ *dst16_ptr = (uint16_t)r;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint8_t *dst_struct, *src_struct;
+ uint16_t *dst16_ptr, dst;
+ uint64_t *src64_ptr, src64, src64_mask, src;
+ uint64_t r;
+
+ TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
+
+ /* Structs. */
+ dst_struct = t->structs[ip->alu.dst.struct_id];
+ dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+ dst = *dst16_ptr;
+
+ src_struct = t->structs[ip->alu.src.struct_id];
+ src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
+ src64 = *src64_ptr;
+ src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
+ src = src64 & src64_mask;
+
+ r = dst;
+ r = ~r & 0xFFFF;
+
+ /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
+ * the following sequence of operations in 2's complement arithmetic:
+ * a '- b = (a - b) % 0xFFFF.
+ *
+ * In order to prevent an underflow for the below subtraction, in which
+ * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
+ * minuend), we first add a multiple of the 0xFFFF modulus to the
+ * minuend. The number we add to the minuend needs to be a 34-bit number
+ * or higher, so for readability reasons we picked the 36-bit multiple.
+ * We are effectively turning the 16-bit minuend into a 36-bit number:
+ * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
+ */
+ r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
+
+ /* A 33-bit number is subtracted from a 36-bit number (the input r). The
+ * result (the output r) is a 36-bit number.
+ */
+ r -= (src >> 32) + (src & 0xFFFFFFFF);
+
+ /* The first input is a 16-bit number. The second input is a 20-bit
+ * number. Their sum is a 21-bit number.
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+ * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+ * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+ * 0x1001E), the output r is (0 .. 31). So no carry bit can be
+ * generated, therefore the output r is always a 16-bit number.
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ r = ~r & 0xFFFF;
+ r = r ? r : 0xFFFF;
+
+ *dst16_ptr = (uint16_t)r;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint8_t *dst_struct, *src_struct;
+ uint16_t *dst16_ptr;
+ uint32_t *src32_ptr;
+ uint64_t r0, r1;
+
+ TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
+
+ /* Structs. */
+ dst_struct = t->structs[ip->alu.dst.struct_id];
+ dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+
+ src_struct = t->structs[ip->alu.src.struct_id];
+ src32_ptr = (uint32_t *)&src_struct[0];
+
+ r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
+ r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
+ r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
+ r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
+ r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
+
+ /* The first input is a 16-bit number. The second input is a 19-bit
+ * number. Their sum is a 20-bit number.
+ */
+ r0 = (r0 & 0xFFFF) + (r0 >> 16);
+
+ /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+ * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
+ */
+ r0 = (r0 & 0xFFFF) + (r0 >> 16);
+
+ /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+ * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+ * 0x1000E), the output r is (0 .. 15). So no carry bit can be
+ * generated, therefore the output r is always a 16-bit number.
+ */
+ r0 = (r0 & 0xFFFF) + (r0 >> 16);
+
+ r0 = ~r0 & 0xFFFF;
+ r0 = r0 ? r0 : 0xFFFF;
+
+ *dst16_ptr = (uint16_t)r0;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint8_t *dst_struct, *src_struct;
+ uint16_t *dst16_ptr;
+ uint32_t *src32_ptr;
+ uint64_t r = 0;
+ uint32_t i;
+
+ TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
+
+ /* Structs. */
+ dst_struct = t->structs[ip->alu.dst.struct_id];
+ dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+
+ src_struct = t->structs[ip->alu.src.struct_id];
+ src32_ptr = (uint32_t *)&src_struct[0];
+
+ /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
+ * Therefore, in the worst case scenario, a 35-bit number is added to a
+ * 16-bit number (the input r), so the output r is 36-bit number.
+ */
+ for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
+ r += *src32_ptr;
+
+ /* The first input is a 16-bit number. The second input is a 20-bit
+ * number. Their sum is a 21-bit number.
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+ * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+ * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+ * 0x1001E), the output r is (0 .. 31). So no carry bit can be
+ * generated, therefore the output r is always a 16-bit number.
+ */
+ r = (r & 0xFFFF) + (r >> 16);
+
+ r = ~r & 0xFFFF;
+ r = r ? r : 0xFFFF;
+
+ *dst16_ptr = (uint16_t)r;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+/*
+ * Register array.
+ */
+static struct regarray *
+regarray_find(struct rte_swx_pipeline *p, const char *name);
+
+static int
+instr_regprefetch_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *regarray = tokens[1], *idx = tokens[2];
+ struct regarray *r;
+ struct field *fidx;
+ uint32_t idx_struct_id, idx_val;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ r = regarray_find(p, regarray);
+ CHECK(r, EINVAL);
+
+ /* REGPREFETCH_RH, REGPREFETCH_RM. */
+ fidx = struct_field_parse(p, action, idx, &idx_struct_id);
+ if (fidx) {
+ instr->type = INSTR_REGPREFETCH_RM;
+ if (idx[0] == 'h')
+ instr->type = INSTR_REGPREFETCH_RH;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->regarray.idx.n_bits = fidx->n_bits;
+ instr->regarray.idx.offset = fidx->offset / 8;
+ instr->regarray.dstsrc_val = 0; /* Unused. */
+ return 0;
+ }
+
+ /* REGPREFETCH_RI. */
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_REGPREFETCH_RI;
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx_val = idx_val;
+ instr->regarray.dstsrc_val = 0; /* Unused. */
+ return 0;
+}
+
+static int
+instr_regrd_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
+ struct regarray *r;
+ struct field *fdst, *fidx;
+ uint32_t dst_struct_id, idx_struct_id, idx_val;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ r = regarray_find(p, regarray);
+ CHECK(r, EINVAL);
+
+ fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+ CHECK(fdst, EINVAL);
+
+ /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
+ fidx = struct_field_parse(p, action, idx, &idx_struct_id);
+ if (fidx) {
+ instr->type = INSTR_REGRD_MRM;
+ if (dst[0] == 'h' && idx[0] != 'h')
+ instr->type = INSTR_REGRD_HRM;
+ if (dst[0] != 'h' && idx[0] == 'h')
+ instr->type = INSTR_REGRD_MRH;
+ if (dst[0] == 'h' && idx[0] == 'h')
+ instr->type = INSTR_REGRD_HRH;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->regarray.idx.n_bits = fidx->n_bits;
+ instr->regarray.idx.offset = fidx->offset / 8;
+ instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
+ instr->regarray.dstsrc.n_bits = fdst->n_bits;
+ instr->regarray.dstsrc.offset = fdst->offset / 8;
+ return 0;
+ }
+
+ /* REGRD_MRI, REGRD_HRI. */
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_REGRD_MRI;
+ if (dst[0] == 'h')
+ instr->type = INSTR_REGRD_HRI;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx_val = idx_val;
+ instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
+ instr->regarray.dstsrc.n_bits = fdst->n_bits;
+ instr->regarray.dstsrc.offset = fdst->offset / 8;
+ return 0;
+}
+
+static int
+instr_regwr_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
+ struct regarray *r;
+ struct field *fidx, *fsrc;
+ uint64_t src_val;
+ uint32_t idx_struct_id, idx_val, src_struct_id;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ r = regarray_find(p, regarray);
+ CHECK(r, EINVAL);
+
+ /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
+ fidx = struct_field_parse(p, action, idx, &idx_struct_id);
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fidx && fsrc) {
+ instr->type = INSTR_REGWR_RMM;
+ if (idx[0] == 'h' && src[0] != 'h')
+ instr->type = INSTR_REGWR_RHM;
+ if (idx[0] != 'h' && src[0] == 'h')
+ instr->type = INSTR_REGWR_RMH;
+ if (idx[0] == 'h' && src[0] == 'h')
+ instr->type = INSTR_REGWR_RHH;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->regarray.idx.n_bits = fidx->n_bits;
+ instr->regarray.idx.offset = fidx->offset / 8;
+ instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
+ instr->regarray.dstsrc.n_bits = fsrc->n_bits;
+ instr->regarray.dstsrc.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* REGWR_RHI, REGWR_RMI. */
+ if (fidx && !fsrc) {
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ instr->type = INSTR_REGWR_RMI;
+ if (idx[0] == 'h')
+ instr->type = INSTR_REGWR_RHI;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->regarray.idx.n_bits = fidx->n_bits;
+ instr->regarray.idx.offset = fidx->offset / 8;
+ instr->regarray.dstsrc_val = src_val;
+ return 0;
+ }
+
+ /* REGWR_RIH, REGWR_RIM. */
+ if (!fidx && fsrc) {
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_REGWR_RIM;
+ if (src[0] == 'h')
+ instr->type = INSTR_REGWR_RIH;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx_val = idx_val;
+ instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
+ instr->regarray.dstsrc.n_bits = fsrc->n_bits;
+ instr->regarray.dstsrc.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* REGWR_RII. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_REGWR_RII;
+ instr->regarray.idx_val = idx_val;
+ instr->regarray.dstsrc_val = src_val;
+
+ return 0;
+}
+
+static int
+instr_regadd_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
+ struct regarray *r;
+ struct field *fidx, *fsrc;
+ uint64_t src_val;
+ uint32_t idx_struct_id, idx_val, src_struct_id;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ r = regarray_find(p, regarray);
+ CHECK(r, EINVAL);
+
+ /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
+ fidx = struct_field_parse(p, action, idx, &idx_struct_id);
+ fsrc = struct_field_parse(p, action, src, &src_struct_id);
+ if (fidx && fsrc) {
+ instr->type = INSTR_REGADD_RMM;
+ if (idx[0] == 'h' && src[0] != 'h')
+ instr->type = INSTR_REGADD_RHM;
+ if (idx[0] != 'h' && src[0] == 'h')
+ instr->type = INSTR_REGADD_RMH;
+ if (idx[0] == 'h' && src[0] == 'h')
+ instr->type = INSTR_REGADD_RHH;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->regarray.idx.n_bits = fidx->n_bits;
+ instr->regarray.idx.offset = fidx->offset / 8;
+ instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
+ instr->regarray.dstsrc.n_bits = fsrc->n_bits;
+ instr->regarray.dstsrc.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* REGADD_RHI, REGADD_RMI. */
+ if (fidx && !fsrc) {
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ instr->type = INSTR_REGADD_RMI;
+ if (idx[0] == 'h')
+ instr->type = INSTR_REGADD_RHI;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->regarray.idx.n_bits = fidx->n_bits;
+ instr->regarray.idx.offset = fidx->offset / 8;
+ instr->regarray.dstsrc_val = src_val;
+ return 0;
+ }
+
+ /* REGADD_RIH, REGADD_RIM. */
+ if (!fidx && fsrc) {
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_REGADD_RIM;
+ if (src[0] == 'h')
+ instr->type = INSTR_REGADD_RIH;
+
+ instr->regarray.regarray_id = r->id;
+ instr->regarray.idx_val = idx_val;
+ instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
+ instr->regarray.dstsrc.n_bits = fsrc->n_bits;
+ instr->regarray.dstsrc.offset = fsrc->offset / 8;
+ return 0;
+ }
+
+ /* REGADD_RII. */
+ src_val = strtoull(src, &src, 0);
+ CHECK(!src[0], EINVAL);
+
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_REGADD_RII;
+ instr->regarray.idx_val = idx_val;
+ instr->regarray.dstsrc_val = src_val;
+ return 0;
+}
+
+static inline uint64_t *
+instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
+{
+ struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
+ return r->regarray;
+}
+
+static inline uint64_t
+instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
+{
+ struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
+
+ uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
+ uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
+ uint64_t idx64 = *idx64_ptr;
+ uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
+ uint64_t idx = idx64 & idx64_mask & r->size_mask;
+
+ return idx;
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+static inline uint64_t
+instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
+{
+ struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
+
+ uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
+ uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
+ uint64_t idx64 = *idx64_ptr;
+ uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
+
+ return idx;
+}
+
+#else
+
+#define instr_regarray_idx_nbo instr_regarray_idx_hbo
+
+#endif
+
+static inline uint64_t
+instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
+{
+ struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
+
+ uint64_t idx = ip->regarray.idx_val & r->size_mask;
+
+ return idx;
+}
+
+static inline uint64_t
+instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
+{
+ uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
+ uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
+ uint64_t src64 = *src64_ptr;
+ uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
+ uint64_t src = src64 & src64_mask;
+
+ return src;
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+static inline uint64_t
+instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
+{
+ uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
+ uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
+ uint64_t src64 = *src64_ptr;
+ uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
+
+ return src;
+}
+
+#else
+
+#define instr_regarray_src_nbo instr_regarray_src_hbo
+
+#endif
+
+static inline void
+instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
+{
+ uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
+ uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
+ uint64_t dst64 = *dst64_ptr;
+ uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
+
+ *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
+
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+static inline void
+instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
+{
+ uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
+ uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
+ uint64_t dst64 = *dst64_ptr;
+ uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
+
+ src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
+ *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
+}
+
+#else
+
+#define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
+
+#endif
+
+static inline void
+instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ rte_prefetch0(®array[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ rte_prefetch0(®array[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ rte_prefetch0(®array[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regrd_hri_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regrd_mri_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx;
+
+ TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ src = instr_regarray_src_nbo(t, ip);
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ src = instr_regarray_src_hbo(t, ip);
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ src = instr_regarray_src_nbo(t, ip);
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ src = instr_regarray_src_hbo(t, ip);
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ src = ip->regarray.dstsrc_val;
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ src = ip->regarray.dstsrc_val;
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rih_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ src = instr_regarray_src_nbo(t, ip);
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rim_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ src = instr_regarray_src_hbo(t, ip);
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regwr_rii_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ src = ip->regarray.dstsrc_val;
+ regarray[idx] = src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ src = instr_regarray_src_nbo(t, ip);
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ src = instr_regarray_src_hbo(t, ip);
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ src = instr_regarray_src_nbo(t, ip);
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ src = instr_regarray_src_hbo(t, ip);
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_nbo(p, t, ip);
+ src = ip->regarray.dstsrc_val;
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_hbo(p, t, ip);
+ src = ip->regarray.dstsrc_val;
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rih_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ src = instr_regarray_src_nbo(t, ip);
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rim_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ src = instr_regarray_src_hbo(t, ip);
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_regadd_rii_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint64_t *regarray, idx, src;
+
+ TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
+
+ /* Structs. */
+ regarray = instr_regarray_regarray(p, ip);
+ idx = instr_regarray_idx_imm(p, ip);
+ src = ip->regarray.dstsrc_val;
+ regarray[idx] += src;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+/*
+ * metarray.
+ */
+static struct metarray *
+metarray_find(struct rte_swx_pipeline *p, const char *name);
+
+static int
+instr_metprefetch_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *metarray = tokens[1], *idx = tokens[2];
+ struct metarray *m;
+ struct field *fidx;
+ uint32_t idx_struct_id, idx_val;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ m = metarray_find(p, metarray);
+ CHECK(m, EINVAL);
+
+ /* METPREFETCH_H, METPREFETCH_M. */
+ fidx = struct_field_parse(p, action, idx, &idx_struct_id);
+ if (fidx) {
+ instr->type = INSTR_METPREFETCH_M;
+ if (idx[0] == 'h')
+ instr->type = INSTR_METPREFETCH_H;
+
+ instr->meter.metarray_id = m->id;
+ instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->meter.idx.n_bits = fidx->n_bits;
+ instr->meter.idx.offset = fidx->offset / 8;
+ return 0;
+ }
+
+ /* METPREFETCH_I. */
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_METPREFETCH_I;
+ instr->meter.metarray_id = m->id;
+ instr->meter.idx_val = idx_val;
+ return 0;
+}
+
+static int
+instr_meter_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
+ char *color_in = tokens[4], *color_out = tokens[5];
+ struct metarray *m;
+ struct field *fidx, *flength, *fcin, *fcout;
+ uint32_t idx_struct_id, length_struct_id;
+ uint32_t color_in_struct_id, color_out_struct_id;
+
+ CHECK(n_tokens == 6, EINVAL);
+
+ m = metarray_find(p, metarray);
+ CHECK(m, EINVAL);
+
+ fidx = struct_field_parse(p, action, idx, &idx_struct_id);
+
+ flength = struct_field_parse(p, action, length, &length_struct_id);
+ CHECK(flength, EINVAL);
+
+ fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
+
+ fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
+ CHECK(fcout, EINVAL);
+
+ /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
+ if (fidx && fcin) {
+ instr->type = INSTR_METER_MMM;
+ if (idx[0] == 'h' && length[0] == 'h')
+ instr->type = INSTR_METER_HHM;
+ if (idx[0] == 'h' && length[0] != 'h')
+ instr->type = INSTR_METER_HMM;
+ if (idx[0] != 'h' && length[0] == 'h')
+ instr->type = INSTR_METER_MHM;
+
+ instr->meter.metarray_id = m->id;
+
+ instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->meter.idx.n_bits = fidx->n_bits;
+ instr->meter.idx.offset = fidx->offset / 8;
+
+ instr->meter.length.struct_id = (uint8_t)length_struct_id;
+ instr->meter.length.n_bits = flength->n_bits;
+ instr->meter.length.offset = flength->offset / 8;
+
+ instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
+ instr->meter.color_in.n_bits = fcin->n_bits;
+ instr->meter.color_in.offset = fcin->offset / 8;
+
+ instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
+ instr->meter.color_out.n_bits = fcout->n_bits;
+ instr->meter.color_out.offset = fcout->offset / 8;
+
+ return 0;
+ }
+
+ /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
+ if (fidx && !fcin) {
+ uint32_t color_in_val = strtoul(color_in, &color_in, 0);
+ CHECK(!color_in[0], EINVAL);
+
+ instr->type = INSTR_METER_MMI;
+ if (idx[0] == 'h' && length[0] == 'h')
+ instr->type = INSTR_METER_HHI;
+ if (idx[0] == 'h' && length[0] != 'h')
+ instr->type = INSTR_METER_HMI;
+ if (idx[0] != 'h' && length[0] == 'h')
+ instr->type = INSTR_METER_MHI;
+
+ instr->meter.metarray_id = m->id;
+
+ instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
+ instr->meter.idx.n_bits = fidx->n_bits;
+ instr->meter.idx.offset = fidx->offset / 8;
+
+ instr->meter.length.struct_id = (uint8_t)length_struct_id;
+ instr->meter.length.n_bits = flength->n_bits;
+ instr->meter.length.offset = flength->offset / 8;
+
+ instr->meter.color_in_val = color_in_val;
+
+ instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
+ instr->meter.color_out.n_bits = fcout->n_bits;
+ instr->meter.color_out.offset = fcout->offset / 8;
+
+ return 0;
+ }
+
+ /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
+ if (!fidx && fcin) {
+ uint32_t idx_val;
+
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ instr->type = INSTR_METER_IMM;
+ if (length[0] == 'h')
+ instr->type = INSTR_METER_IHM;
+
+ instr->meter.metarray_id = m->id;
+
+ instr->meter.idx_val = idx_val;
+
+ instr->meter.length.struct_id = (uint8_t)length_struct_id;
+ instr->meter.length.n_bits = flength->n_bits;
+ instr->meter.length.offset = flength->offset / 8;
+
+ instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
+ instr->meter.color_in.n_bits = fcin->n_bits;
+ instr->meter.color_in.offset = fcin->offset / 8;
+
+ instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
+ instr->meter.color_out.n_bits = fcout->n_bits;
+ instr->meter.color_out.offset = fcout->offset / 8;
+
+ return 0;
+ }
+
+ /* index = I, length = HMEFT, color_in = I, color_out = MEF. */
+ if (!fidx && !fcin) {
+ uint32_t idx_val, color_in_val;
+
+ idx_val = strtoul(idx, &idx, 0);
+ CHECK(!idx[0], EINVAL);
+
+ color_in_val = strtoul(color_in, &color_in, 0);
+ CHECK(!color_in[0], EINVAL);
+
+ instr->type = INSTR_METER_IMI;
+ if (length[0] == 'h')
+ instr->type = INSTR_METER_IHI;
+
+ instr->meter.metarray_id = m->id;
+
+ instr->meter.idx_val = idx_val;
+
+ instr->meter.length.struct_id = (uint8_t)length_struct_id;
+ instr->meter.length.n_bits = flength->n_bits;
+ instr->meter.length.offset = flength->offset / 8;
+
+ instr->meter.color_in_val = color_in_val;
+
+ instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
+ instr->meter.color_out.n_bits = fcout->n_bits;
+ instr->meter.color_out.offset = fcout->offset / 8;
+
+ return 0;
+ }
+
+ CHECK(0, EINVAL);
+}
+
+static inline struct meter *
+instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
+{
+ struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
+
+ uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
+ uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
+ uint64_t idx64 = *idx64_ptr;
+ uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
+ uint64_t idx = idx64 & idx64_mask & r->size_mask;
+
+ return &r->metarray[idx];
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+static inline struct meter *
+instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
+{
+ struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
+
+ uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
+ uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
+ uint64_t idx64 = *idx64_ptr;
+ uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
+
+ return &r->metarray[idx];
+}
+
+#else
+
+#define instr_meter_idx_nbo instr_meter_idx_hbo
+
+#endif
+
+static inline struct meter *
+instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
+{
+ struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
+
+ uint64_t idx = ip->meter.idx_val & r->size_mask;
+
+ return &r->metarray[idx];
+}
+
+static inline uint32_t
+instr_meter_length_hbo(struct thread *t, struct instruction *ip)
+{
+ uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
+ uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
+ uint64_t src64 = *src64_ptr;
+ uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
+ uint64_t src = src64 & src64_mask;
+
+ return (uint32_t)src;
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+static inline uint32_t
+instr_meter_length_nbo(struct thread *t, struct instruction *ip)
+{
+ uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
+ uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
+ uint64_t src64 = *src64_ptr;
+ uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
+
+ return (uint32_t)src;
+}
+
+#else
+
+#define instr_meter_length_nbo instr_meter_length_hbo
+
+#endif
+
+static inline enum rte_color
+instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
+{
+ uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
+ uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
+ uint64_t src64 = *src64_ptr;
+ uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
+ uint64_t src = src64 & src64_mask;
+
+ return (enum rte_color)src;
+}
+
+static inline void
+instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
+{
+ uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
+ uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
+ uint64_t dst64 = *dst64_ptr;
+ uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
+
+ uint64_t src = (uint64_t)color_out;
+
+ *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
+}
+
+static inline void
+instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+
+ TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_nbo(p, t, ip);
+ rte_prefetch0(m);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+
+ TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_hbo(p, t, ip);
+ rte_prefetch0(m);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+
+ TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_imm(p, ip);
+ rte_prefetch0(m);
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_hhm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_nbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_nbo(t, ip);
+ color_in = instr_meter_color_in_hbo(t, ip);
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_hhi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_nbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_nbo(t, ip);
+ color_in = (enum rte_color)ip->meter.color_in_val;
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_hmm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_nbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_hbo(t, ip);
+ color_in = instr_meter_color_in_hbo(t, ip);
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+static inline void
+instr_meter_hmi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_nbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_hbo(t, ip);
+ color_in = (enum rte_color)ip->meter.color_in_val;
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_mhm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_hbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_nbo(t, ip);
+ color_in = instr_meter_color_in_hbo(t, ip);
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_mhi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_hbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_nbo(t, ip);
+ color_in = (enum rte_color)ip->meter.color_in_val;
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_mmm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_hbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_hbo(t, ip);
+ color_in = instr_meter_color_in_hbo(t, ip);
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_mmi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_hbo(p, t, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_hbo(t, ip);
+ color_in = (enum rte_color)ip->meter.color_in_val;
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_ihm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_imm(p, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_nbo(t, ip);
+ color_in = instr_meter_color_in_hbo(t, ip);
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_ihi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_imm(p, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_nbo(t, ip);
+ color_in = (enum rte_color)ip->meter.color_in_val;
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+static inline void
+instr_meter_imm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_imm(p, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_hbo(t, ip);
+ color_in = instr_meter_color_in_hbo(t, ip);
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+static inline void
+instr_meter_imi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct meter *m;
+ uint64_t time, n_pkts, n_bytes;
+ uint32_t length;
+ enum rte_color color_in, color_out;
+
+ TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
+
+ /* Structs. */
+ m = instr_meter_idx_imm(p, ip);
+ rte_prefetch0(m->n_pkts);
+ time = rte_get_tsc_cycles();
+ length = instr_meter_length_hbo(t, ip);
+ color_in = (enum rte_color)ip->meter.color_in_val;
+
+ color_out = rte_meter_trtcm_color_aware_check(&m->m,
+ &m->profile->profile,
+ time,
+ length,
+ color_in);
+
+ color_out &= m->color_mask;
+
+ n_pkts = m->n_pkts[color_out];
+ n_bytes = m->n_bytes[color_out];
+
+ instr_meter_color_out_hbo_set(t, ip, color_out);
+
+ m->n_pkts[color_out] = n_pkts + 1;
+ m->n_bytes[color_out] = n_bytes + length;
+
+ /* Thread. */
+ thread_ip_inc(p);
+}
+
+/*
+ * jmp.
+ */
+static struct action *
+action_find(struct rte_swx_pipeline *p, const char *name);
+
+static int
+instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
+ struct action *action __rte_unused,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ CHECK(n_tokens == 2, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ instr->type = INSTR_JMP;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ return 0;
+}
+
+static int
+instr_jmp_valid_translate(struct rte_swx_pipeline *p,
+ struct action *action __rte_unused,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ struct header *h;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ h = header_parse(p, tokens[2]);
+ CHECK(h, EINVAL);
+
+ instr->type = INSTR_JMP_VALID;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ instr->jmp.header_id = h->id;
+ return 0;
+}
+
+static int
+instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
+ struct action *action __rte_unused,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ struct header *h;
+
+ CHECK(n_tokens == 3, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ h = header_parse(p, tokens[2]);
+ CHECK(h, EINVAL);
+
+ instr->type = INSTR_JMP_INVALID;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ instr->jmp.header_id = h->id;
+ return 0;
+}
+
+static int
+instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ CHECK(!action, EINVAL);
+ CHECK(n_tokens == 2, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ instr->type = INSTR_JMP_HIT;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ return 0;
+}
+
+static int
+instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ CHECK(!action, EINVAL);
+ CHECK(n_tokens == 2, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ instr->type = INSTR_JMP_MISS;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ return 0;
+}
+
+static int
+instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ struct action *a;
+
+ CHECK(!action, EINVAL);
+ CHECK(n_tokens == 3, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ a = action_find(p, tokens[2]);
+ CHECK(a, EINVAL);
+
+ instr->type = INSTR_JMP_ACTION_HIT;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ instr->jmp.action_id = a->id;
+ return 0;
+}
+
+static int
+instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ struct action *a;
+
+ CHECK(!action, EINVAL);
+ CHECK(n_tokens == 3, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ a = action_find(p, tokens[2]);
+ CHECK(a, EINVAL);
+
+ instr->type = INSTR_JMP_ACTION_MISS;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ instr->jmp.action_id = a->id;
+ return 0;
+}
+
+static int
+instr_jmp_eq_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ char *a = tokens[2], *b = tokens[3];
+ struct field *fa, *fb;
+ uint64_t b_val;
+ uint32_t a_struct_id, b_struct_id;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ fa = struct_field_parse(p, action, a, &a_struct_id);
+ CHECK(fa, EINVAL);
+
+ /* JMP_EQ or JMP_EQ_S. */
+ fb = struct_field_parse(p, action, b, &b_struct_id);
+ if (fb) {
+ instr->type = INSTR_JMP_EQ;
+ if ((a[0] == 'h' && b[0] != 'h') ||
+ (a[0] != 'h' && b[0] == 'h'))
+ instr->type = INSTR_JMP_EQ_S;
+ instr->jmp.ip = NULL; /* Resolved later. */
+
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+ instr->jmp.b.n_bits = fb->n_bits;
+ instr->jmp.b.offset = fb->offset / 8;
+ return 0;
+ }
+
+ /* JMP_EQ_I. */
+ b_val = strtoull(b, &b, 0);
+ CHECK(!b[0], EINVAL);
+
+ if (a[0] == 'h')
+ b_val = hton64(b_val) >> (64 - fa->n_bits);
+
+ instr->type = INSTR_JMP_EQ_I;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b_val = b_val;
+ return 0;
+}
+
+static int
+instr_jmp_neq_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ char *a = tokens[2], *b = tokens[3];
+ struct field *fa, *fb;
+ uint64_t b_val;
+ uint32_t a_struct_id, b_struct_id;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ fa = struct_field_parse(p, action, a, &a_struct_id);
+ CHECK(fa, EINVAL);
+
+ /* JMP_NEQ or JMP_NEQ_S. */
+ fb = struct_field_parse(p, action, b, &b_struct_id);
+ if (fb) {
+ instr->type = INSTR_JMP_NEQ;
+ if ((a[0] == 'h' && b[0] != 'h') ||
+ (a[0] != 'h' && b[0] == 'h'))
+ instr->type = INSTR_JMP_NEQ_S;
+ instr->jmp.ip = NULL; /* Resolved later. */
+
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+ instr->jmp.b.n_bits = fb->n_bits;
+ instr->jmp.b.offset = fb->offset / 8;
+ return 0;
+ }
+
+ /* JMP_NEQ_I. */
+ b_val = strtoull(b, &b, 0);
+ CHECK(!b[0], EINVAL);
+
+ if (a[0] == 'h')
+ b_val = hton64(b_val) >> (64 - fa->n_bits);
+
+ instr->type = INSTR_JMP_NEQ_I;
+ instr->jmp.ip = NULL; /* Resolved later. */
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b_val = b_val;
+ return 0;
+}
+
+static int
+instr_jmp_lt_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ char *a = tokens[2], *b = tokens[3];
+ struct field *fa, *fb;
+ uint64_t b_val;
+ uint32_t a_struct_id, b_struct_id;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ fa = struct_field_parse(p, action, a, &a_struct_id);
+ CHECK(fa, EINVAL);
+
+ /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
+ fb = struct_field_parse(p, action, b, &b_struct_id);
+ if (fb) {
+ instr->type = INSTR_JMP_LT;
+ if (a[0] == 'h' && b[0] != 'h')
+ instr->type = INSTR_JMP_LT_HM;
+ if (a[0] != 'h' && b[0] == 'h')
+ instr->type = INSTR_JMP_LT_MH;
+ if (a[0] == 'h' && b[0] == 'h')
+ instr->type = INSTR_JMP_LT_HH;
+ instr->jmp.ip = NULL; /* Resolved later. */
+
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+ instr->jmp.b.n_bits = fb->n_bits;
+ instr->jmp.b.offset = fb->offset / 8;
+ return 0;
+ }
+
+ /* JMP_LT_MI, JMP_LT_HI. */
+ b_val = strtoull(b, &b, 0);
+ CHECK(!b[0], EINVAL);
+
+ instr->type = INSTR_JMP_LT_MI;
+ if (a[0] == 'h')
+ instr->type = INSTR_JMP_LT_HI;
+ instr->jmp.ip = NULL; /* Resolved later. */
+
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b_val = b_val;
+ return 0;
+}
+
+static int
+instr_jmp_gt_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char **tokens,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ char *a = tokens[2], *b = tokens[3];
+ struct field *fa, *fb;
+ uint64_t b_val;
+ uint32_t a_struct_id, b_struct_id;
+
+ CHECK(n_tokens == 4, EINVAL);
+
+ strcpy(data->jmp_label, tokens[1]);
+
+ fa = struct_field_parse(p, action, a, &a_struct_id);
+ CHECK(fa, EINVAL);
+
+ /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
+ fb = struct_field_parse(p, action, b, &b_struct_id);
+ if (fb) {
+ instr->type = INSTR_JMP_GT;
+ if (a[0] == 'h' && b[0] != 'h')
+ instr->type = INSTR_JMP_GT_HM;
+ if (a[0] != 'h' && b[0] == 'h')
+ instr->type = INSTR_JMP_GT_MH;
+ if (a[0] == 'h' && b[0] == 'h')
+ instr->type = INSTR_JMP_GT_HH;
+ instr->jmp.ip = NULL; /* Resolved later. */
+
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+ instr->jmp.b.n_bits = fb->n_bits;
+ instr->jmp.b.offset = fb->offset / 8;
+ return 0;
+ }
+
+ /* JMP_GT_MI, JMP_GT_HI. */
+ b_val = strtoull(b, &b, 0);
+ CHECK(!b[0], EINVAL);
+
+ instr->type = INSTR_JMP_GT_MI;
+ if (a[0] == 'h')
+ instr->type = INSTR_JMP_GT_HI;
+ instr->jmp.ip = NULL; /* Resolved later. */
+
+ instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+ instr->jmp.a.n_bits = fa->n_bits;
+ instr->jmp.a.offset = fa->offset / 8;
+ instr->jmp.b_val = b_val;
+ return 0;
+}
+
+static inline void
+instr_jmp_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmp\n", p->thread_id);
+
+ thread_ip_set(t, ip->jmp.ip);
+}
+
+static inline void
+instr_jmp_valid_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint32_t header_id = ip->jmp.header_id;
+
+ TRACE("[Thread %2u] jmpv\n", p->thread_id);
+
+ t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
+}
+
+static inline void
+instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ uint32_t header_id = ip->jmp.header_id;
+
+ TRACE("[Thread %2u] jmpnv\n", p->thread_id);
+
+ t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
+}
+
+static inline void
+instr_jmp_hit_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
+
+ TRACE("[Thread %2u] jmph\n", p->thread_id);
+
+ t->ip = ip_next[t->hit];
+}
+
+static inline void
+instr_jmp_miss_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+ struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
+
+ TRACE("[Thread %2u] jmpnh\n", p->thread_id);
+
+ t->ip = ip_next[t->hit];
+}
+
+static inline void
+instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpa\n", p->thread_id);
+
+ t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
+}
+
+static inline void
+instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpna\n", p->thread_id);
+
+ t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
+}
+
+static inline void
+instr_jmp_eq_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpeq\n", p->thread_id);
+
+ JMP_CMP(t, ip, ==);
+}
+
+static inline void
+instr_jmp_eq_s_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id);
+
+ JMP_CMP_S(t, ip, ==);
+}
+
+static inline void
+instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
+
+ JMP_CMP_I(t, ip, ==);
+}
+
+static inline void
+instr_jmp_neq_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpneq\n", p->thread_id);
+
+ JMP_CMP(t, ip, !=);
+}
+
+static inline void
+instr_jmp_neq_s_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id);
+
+ JMP_CMP_S(t, ip, !=);
+}
+
+static inline void
+instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
+
+ JMP_CMP_I(t, ip, !=);
+}
+
+static inline void
+instr_jmp_lt_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmplt\n", p->thread_id);
+
+ JMP_CMP(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
+
+ JMP_CMP_MH(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
+
+ JMP_CMP_HM(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
+
+ JMP_CMP_HH(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
+
+ JMP_CMP_MI(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
+
+ JMP_CMP_HI(t, ip, <);
+}
+
+static inline void
+instr_jmp_gt_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpgt\n", p->thread_id);
+
+ JMP_CMP(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
+
+ JMP_CMP_MH(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
+
+ JMP_CMP_HM(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
+
+ JMP_CMP_HH(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
+
+ JMP_CMP_MI(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+ struct instruction *ip = t->ip;
+
+ TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
+
+ JMP_CMP_HI(t, ip, >);
+}
+
+/*
+ * return.
+ */
+static int
+instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
+ struct action *action,
+ char **tokens __rte_unused,
+ int n_tokens,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused)
+{
+ CHECK(action, EINVAL);
+ CHECK(n_tokens == 1, EINVAL);
+
+ instr->type = INSTR_RETURN;
+ return 0;
+}
+
+static inline void
+instr_return_exec(struct rte_swx_pipeline *p)
+{
+ struct thread *t = &p->threads[p->thread_id];
+
+ TRACE("[Thread %2u] return\n", p->thread_id);
+
+ t->ip = t->ret;
+}
+
+static int
+instr_translate(struct rte_swx_pipeline *p,
+ struct action *action,
+ char *string,
+ struct instruction *instr,
+ struct instruction_data *data)
+{
+ char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
+ int n_tokens = 0, tpos = 0;
+
+ /* Parse the instruction string into tokens. */
+ for ( ; ; ) {
+ char *token;
+
+ token = strtok_r(string, " \t\v", &string);
+ if (!token)
+ break;
+
+ CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
+ CHECK_NAME(token, EINVAL);
+
+ tokens[n_tokens] = token;
+ n_tokens++;
+ }
+
+ CHECK(n_tokens, EINVAL);
+
+ /* Handle the optional instruction label. */
+ if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
+ strcpy(data->label, tokens[0]);
+
+ tpos += 2;
+ CHECK(n_tokens - tpos, EINVAL);
+ }
+
+ /* Identify the instruction type. */
+ if (!strcmp(tokens[tpos], "rx"))
+ return instr_rx_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "tx"))
+ return instr_tx_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "drop"))
+ return instr_drop_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "extract"))
+ return instr_hdr_extract_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "emit"))
+ return instr_hdr_emit_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "validate"))
+ return instr_hdr_validate_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "invalidate"))
+ return instr_hdr_invalidate_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "mov"))
+ return instr_mov_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "dma"))
+ return instr_dma_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "add"))
+ return instr_alu_add_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "sub"))
+ return instr_alu_sub_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "ckadd"))
+ return instr_alu_ckadd_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "cksub"))
+ return instr_alu_cksub_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "and"))
+ return instr_alu_and_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "or"))
+ return instr_alu_or_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "xor"))
+ return instr_alu_xor_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "shl"))
+ return instr_alu_shl_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "shr"))
+ return instr_alu_shr_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "regprefetch"))
+ return instr_regprefetch_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "regrd"))
+ return instr_regrd_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "regwr"))
+ return instr_regwr_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "regadd"))
+ return instr_regadd_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "metprefetch"))
+ return instr_metprefetch_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "meter"))
+ return instr_meter_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "table"))
+ return instr_table_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "extern"))
+ return instr_extern_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmp"))
+ return instr_jmp_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpv"))
+ return instr_jmp_valid_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpnv"))
+ return instr_jmp_invalid_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmph"))
+ return instr_jmp_hit_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpnh"))
+ return instr_jmp_miss_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpa"))
+ return instr_jmp_action_hit_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpna"))
+ return instr_jmp_action_miss_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpeq"))
+ return instr_jmp_eq_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpneq"))
+ return instr_jmp_neq_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmplt"))
+ return instr_jmp_lt_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "jmpgt"))
+ return instr_jmp_gt_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ if (!strcmp(tokens[tpos], "return"))
+ return instr_return_translate(p,
+ action,
+ &tokens[tpos],
+ n_tokens - tpos,
+ instr,
+ data);
+
+ CHECK(0, EINVAL);
+}
+
+static struct instruction_data *
+label_find(struct instruction_data *data, uint32_t n, const char *label)
+{
+ uint32_t i;
+
+ for (i = 0; i < n; i++)
+ if (!strcmp(label, data[i].label))
+ return &data[i];
+
+ return NULL;
+}
+
+static uint32_t
+label_is_used(struct instruction_data *data, uint32_t n, const char *label)
+{
+ uint32_t count = 0, i;
+
+ if (!label[0])
+ return 0;
+
+ for (i = 0; i < n; i++)
+ if (!strcmp(label, data[i].jmp_label))
+ count++;
+
+ return count;
+}
+
+static int
+instr_label_check(struct instruction_data *instruction_data,
+ uint32_t n_instructions)
+{
+ uint32_t i;
+
+ /* Check that all instruction labels are unique. */
+ for (i = 0; i < n_instructions; i++) {
+ struct instruction_data *data = &instruction_data[i];
+ char *label = data->label;
+ uint32_t j;
+
+ if (!label[0])
+ continue;
+
+ for (j = i + 1; j < n_instructions; j++)
+ CHECK(strcmp(label, data[j].label), EINVAL);
+ }
+
+ /* Get users for each instruction label. */
+ for (i = 0; i < n_instructions; i++) {
+ struct instruction_data *data = &instruction_data[i];
+ char *label = data->label;
+
+ data->n_users = label_is_used(instruction_data,
+ n_instructions,
+ label);
+ }
+
+ return 0;
+}
+
+static int
+instr_jmp_resolve(struct instruction *instructions,
+ struct instruction_data *instruction_data,
+ uint32_t n_instructions)
+{
+ uint32_t i;
+
+ for (i = 0; i < n_instructions; i++) {
+ struct instruction *instr = &instructions[i];
+ struct instruction_data *data = &instruction_data[i];
+ struct instruction_data *found;
+
+ if (!instruction_is_jmp(instr))
+ continue;
+
+ found = label_find(instruction_data,
+ n_instructions,
+ data->jmp_label);
+ CHECK(found, EINVAL);
+
+ instr->jmp.ip = &instructions[found - instruction_data];
+ }
+
+ return 0;
+}
+
+static int
+instr_verify(struct rte_swx_pipeline *p __rte_unused,
+ struct action *a,
+ struct instruction *instr,
+ struct instruction_data *data __rte_unused,
+ uint32_t n_instructions)
+{
+ if (!a) {
+ enum instruction_type type;
+ uint32_t i;
+
+ /* Check that the first instruction is rx. */
+ CHECK(instr[0].type == INSTR_RX, EINVAL);
+
+ /* Check that there is at least one tx instruction. */
+ for (i = 0; i < n_instructions; i++) {
+ type = instr[i].type;
+
+ if (instruction_is_tx(type))
+ break;
+ }
+ CHECK(i < n_instructions, EINVAL);
+
+ /* Check that the last instruction is either tx or unconditional
+ * jump.
+ */
+ type = instr[n_instructions - 1].type;
+ CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
+ }
+
+ if (a) {
+ enum instruction_type type;
+ uint32_t i;
+
+ /* Check that there is at least one return or tx instruction. */
+ for (i = 0; i < n_instructions; i++) {
+ type = instr[i].type;
+
+ if ((type == INSTR_RETURN) || instruction_is_tx(type))
+ break;
+ }
+ CHECK(i < n_instructions, EINVAL);
+ }
+
+ return 0;
+}
+
+static int
+instr_pattern_extract_many_detect(struct instruction *instr,
+ struct instruction_data *data,
+ uint32_t n_instr,
+ uint32_t *n_pattern_instr)
+{
+ uint32_t i;
+
+ for (i = 0; i < n_instr; i++) {
+ if (data[i].invalid)
+ break;
+
+ if (instr[i].type != INSTR_HDR_EXTRACT)
+ break;
+
+ if (i == RTE_DIM(instr->io.hdr.header_id))
+ break;
+
+ if (i && data[i].n_users)
+ break;
+ }
+
+ if (i < 2)
+ return 0;
+
+ *n_pattern_instr = i;
+ return 1;
+}
+
+static void
+instr_pattern_extract_many_optimize(struct instruction *instr,
+ struct instruction_data *data,
+ uint32_t n_instr)
+{
+ uint32_t i;
+
+ for (i = 1; i < n_instr; i++) {
+ instr[0].type++;
+ instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
+ instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
+ instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
+
+ data[i].invalid = 1;
+ }
+}
+
+static int
+instr_pattern_emit_many_tx_detect(struct instruction *instr,
+ struct instruction_data *data,
+ uint32_t n_instr,
+ uint32_t *n_pattern_instr)
+{
+ uint32_t i;
+
+ for (i = 0; i < n_instr; i++) {
+ if (data[i].invalid)
+ break;
+
+ if (instr[i].type != INSTR_HDR_EMIT)
+ break;
+
+ if (i == RTE_DIM(instr->io.hdr.header_id))
+ break;
+
+ if (i && data[i].n_users)
+ break;
+ }
+
+ if (!i)
+ return 0;
+
+ if (!instruction_is_tx(instr[i].type))
+ return 0;
+
+ if (data[i].n_users)
+ return 0;
+
+ i++;
+
+ *n_pattern_instr = i;
+ return 1;
+}
+
+static void
+instr_pattern_emit_many_tx_optimize(struct instruction *instr,
+ struct instruction_data *data,
+ uint32_t n_instr)
+{
+ uint32_t i;
+
+ /* Any emit instruction in addition to the first one. */
+ for (i = 1; i < n_instr - 1; i++) {
+ instr[0].type++;
+ instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
+ instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
+ instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
+
+ data[i].invalid = 1;
+ }
+
+ /* The TX instruction is the last one in the pattern. */
+ instr[0].type++;
+ instr[0].io.io.offset = instr[i].io.io.offset;
+ instr[0].io.io.n_bits = instr[i].io.io.n_bits;
+ data[i].invalid = 1;
+}
+
+static int
+instr_pattern_dma_many_detect(struct instruction *instr,
+ struct instruction_data *data,
+ uint32_t n_instr,
+ uint32_t *n_pattern_instr)
+{
+ uint32_t i;
+
+ for (i = 0; i < n_instr; i++) {
+ if (data[i].invalid)
+ break;
+
+ if (instr[i].type != INSTR_DMA_HT)
+ break;
+
+ if (i == RTE_DIM(instr->dma.dst.header_id))
+ break;
+
+ if (i && data[i].n_users)
+ break;
+ }
+
+ if (i < 2)
+ return 0;
+
+ *n_pattern_instr = i;
+ return 1;
+}
+
+static void
+instr_pattern_dma_many_optimize(struct instruction *instr,
+ struct instruction_data *data,
+ uint32_t n_instr)
+{
+ uint32_t i;
+
+ for (i = 1; i < n_instr; i++) {
+ instr[0].type++;
+ instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
+ instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
+ instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
+ instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
+
+ data[i].invalid = 1;
+ }
+}
+
+static uint32_t
+instr_optimize(struct instruction *instructions,
+ struct instruction_data *instruction_data,
+ uint32_t n_instructions)
+{
+ uint32_t i, pos = 0;
+
+ for (i = 0; i < n_instructions; ) {
+ struct instruction *instr = &instructions[i];
+ struct instruction_data *data = &instruction_data[i];
+ uint32_t n_instr = 0;
+ int detected;
+
+ /* Extract many. */
+ detected = instr_pattern_extract_many_detect(instr,
+ data,
+ n_instructions - i,
+ &n_instr);
+ if (detected) {
+ instr_pattern_extract_many_optimize(instr,
+ data,
+ n_instr);
+ i += n_instr;
+ continue;
+ }
+
+ /* Emit many + TX. */
+ detected = instr_pattern_emit_many_tx_detect(instr,
+ data,
+ n_instructions - i,
+ &n_instr);
+ if (detected) {
+ instr_pattern_emit_many_tx_optimize(instr,
+ data,
+ n_instr);
+ i += n_instr;
+ continue;
+ }
+
+ /* DMA many. */
+ detected = instr_pattern_dma_many_detect(instr,
+ data,
+ n_instructions - i,
+ &n_instr);
+ if (detected) {
+ instr_pattern_dma_many_optimize(instr, data, n_instr);
+ i += n_instr;
+ continue;
+ }
+
+ /* No pattern starting at the current instruction. */
+ i++;
+ }
+
+ /* Eliminate the invalid instructions that have been optimized out. */
+ for (i = 0; i < n_instructions; i++) {
+ struct instruction *instr = &instructions[i];
+ struct instruction_data *data = &instruction_data[i];
+
+ if (data->invalid)
+ continue;
+
+ if (i != pos) {
+ memcpy(&instructions[pos], instr, sizeof(*instr));
+ memcpy(&instruction_data[pos], data, sizeof(*data));
+ }
+
+ pos++;
+ }
+
+ return pos;
+}
+
+static int
+instruction_config(struct rte_swx_pipeline *p,
+ struct action *a,
+ const char **instructions,
+ uint32_t n_instructions)
+{
+ struct instruction *instr = NULL;
+ struct instruction_data *data = NULL;
+ int err = 0;
+ uint32_t i;
+
+ CHECK(n_instructions, EINVAL);
+ CHECK(instructions, EINVAL);
+ for (i = 0; i < n_instructions; i++)
+ CHECK_INSTRUCTION(instructions[i], EINVAL);
+
+ /* Memory allocation. */