pipeline: add SWX instruction optimizer
[dpdk.git] / lib / librte_pipeline / rte_swx_pipeline.c
index 2ae6229..77eae19 100644 (file)
@@ -6,8 +6,11 @@
 #include <stdio.h>
 #include <errno.h>
 #include <sys/queue.h>
+#include <arpa/inet.h>
 
 #include <rte_common.h>
+#include <rte_prefetch.h>
+#include <rte_byteorder.h>
 
 #include "rte_swx_pipeline.h"
 #include "rte_swx_ctl.h"
@@ -21,6 +24,19 @@ do {                                                                           \
 #define CHECK_NAME(name, err_code)                                             \
        CHECK((name) && (name)[0], err_code)
 
+#ifndef TRACE_LEVEL
+#define TRACE_LEVEL 0
+#endif
+
+#if TRACE_LEVEL
+#define TRACE(...) printf(__VA_ARGS__)
+#else
+#define TRACE(...)
+#endif
+
+#define ntoh64(x) rte_be_to_cpu_64(x)
+#define hton64(x) rte_cpu_to_be_64(x)
+
 /*
  * Struct.
  */
@@ -181,7 +197,341 @@ struct header_out_runtime {
 /*
  * Instruction.
  */
+
+/* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
+ * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
+ * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
+ * when transferred to packet meta-data and in NBO when transferred to packet
+ * headers.
+ */
+
+/* Notation conventions:
+ *    -Header field: H = h.header.field (dst/src)
+ *    -Meta-data field: M = m.field (dst/src)
+ *    -Extern object mailbox field: E = e.field (dst/src)
+ *    -Extern function mailbox field: F = f.field (dst/src)
+ *    -Table action data field: T = t.field (src only)
+ *    -Immediate value: I = 32-bit unsigned value (src only)
+ */
+
+enum instruction_type {
+       /* rx m.port_in */
+       INSTR_RX,
+
+       /* tx m.port_out */
+       INSTR_TX,
+
+       /* extract h.header */
+       INSTR_HDR_EXTRACT,
+       INSTR_HDR_EXTRACT2,
+       INSTR_HDR_EXTRACT3,
+       INSTR_HDR_EXTRACT4,
+       INSTR_HDR_EXTRACT5,
+       INSTR_HDR_EXTRACT6,
+       INSTR_HDR_EXTRACT7,
+       INSTR_HDR_EXTRACT8,
+
+       /* emit h.header */
+       INSTR_HDR_EMIT,
+       INSTR_HDR_EMIT_TX,
+       INSTR_HDR_EMIT2_TX,
+       INSTR_HDR_EMIT3_TX,
+       INSTR_HDR_EMIT4_TX,
+       INSTR_HDR_EMIT5_TX,
+       INSTR_HDR_EMIT6_TX,
+       INSTR_HDR_EMIT7_TX,
+       INSTR_HDR_EMIT8_TX,
+
+       /* validate h.header */
+       INSTR_HDR_VALIDATE,
+
+       /* invalidate h.header */
+       INSTR_HDR_INVALIDATE,
+
+       /* mov dst src
+        * dst = src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_MOV,   /* dst = MEF, src = MEFT */
+       INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
+       INSTR_MOV_I, /* dst = HMEF, src = I */
+
+       /* dma h.header t.field
+        * memcpy(h.header, t.field, sizeof(h.header))
+        */
+       INSTR_DMA_HT,
+       INSTR_DMA_HT2,
+       INSTR_DMA_HT3,
+       INSTR_DMA_HT4,
+       INSTR_DMA_HT5,
+       INSTR_DMA_HT6,
+       INSTR_DMA_HT7,
+       INSTR_DMA_HT8,
+
+       /* add dst src
+        * dst += src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_ADD,    /* dst = MEF, src = MEF */
+       INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
+       INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
+       INSTR_ALU_ADD_HH, /* dst = H, src = H */
+       INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
+       INSTR_ALU_ADD_HI, /* dst = H, src = I */
+
+       /* sub dst src
+        * dst -= src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_SUB,    /* dst = MEF, src = MEF */
+       INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
+       INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
+       INSTR_ALU_SUB_HH, /* dst = H, src = H */
+       INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
+       INSTR_ALU_SUB_HI, /* dst = H, src = I */
+
+       /* ckadd dst src
+        * dst = dst '+ src[0:1] '+ src[2:3] + ...
+        * dst = H, src = {H, h.header}
+        */
+       INSTR_ALU_CKADD_FIELD,    /* src = H */
+       INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
+       INSTR_ALU_CKADD_STRUCT,   /* src = h.hdeader, with any sizeof(header) */
+
+       /* cksub dst src
+        * dst = dst '- src
+        * dst = H, src = H
+        */
+       INSTR_ALU_CKSUB_FIELD,
+
+       /* and dst src
+        * dst &= src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_AND,   /* dst = MEF, src = MEFT */
+       INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
+       INSTR_ALU_AND_I, /* dst = HMEF, src = I */
+
+       /* or dst src
+        * dst |= src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_OR,   /* dst = MEF, src = MEFT */
+       INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
+       INSTR_ALU_OR_I, /* dst = HMEF, src = I */
+
+       /* xor dst src
+        * dst ^= src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_XOR,   /* dst = MEF, src = MEFT */
+       INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
+       INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
+
+       /* shl dst src
+        * dst <<= src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_SHL,    /* dst = MEF, src = MEF */
+       INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
+       INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
+       INSTR_ALU_SHL_HH, /* dst = H, src = H */
+       INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
+       INSTR_ALU_SHL_HI, /* dst = H, src = I */
+
+       /* shr dst src
+        * dst >>= src
+        * dst = HMEF, src = HMEFTI
+        */
+       INSTR_ALU_SHR,    /* dst = MEF, src = MEF */
+       INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
+       INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
+       INSTR_ALU_SHR_HH, /* dst = H, src = H */
+       INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
+       INSTR_ALU_SHR_HI, /* dst = H, src = I */
+
+       /* table TABLE */
+       INSTR_TABLE,
+
+       /* extern e.obj.func */
+       INSTR_EXTERN_OBJ,
+
+       /* extern f.func */
+       INSTR_EXTERN_FUNC,
+
+       /* jmp LABEL
+        * Unconditional jump
+        */
+       INSTR_JMP,
+
+       /* jmpv LABEL h.header
+        * Jump if header is valid
+        */
+       INSTR_JMP_VALID,
+
+       /* jmpnv LABEL h.header
+        * Jump if header is invalid
+        */
+       INSTR_JMP_INVALID,
+
+       /* jmph LABEL
+        * Jump if table lookup hit
+        */
+       INSTR_JMP_HIT,
+
+       /* jmpnh LABEL
+        * Jump if table lookup miss
+        */
+       INSTR_JMP_MISS,
+
+       /* jmpa LABEL ACTION
+        * Jump if action run
+        */
+       INSTR_JMP_ACTION_HIT,
+
+       /* jmpna LABEL ACTION
+        * Jump if action not run
+        */
+       INSTR_JMP_ACTION_MISS,
+
+       /* jmpeq LABEL a b
+        * Jump is a is equal to b
+        * a = HMEFT, b = HMEFTI
+        */
+       INSTR_JMP_EQ,   /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
+       INSTR_JMP_EQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
+       INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
+
+       /* jmpneq LABEL a b
+        * Jump is a is not equal to b
+        * a = HMEFT, b = HMEFTI
+        */
+       INSTR_JMP_NEQ,   /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
+       INSTR_JMP_NEQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
+       INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
+
+       /* jmplt LABEL a b
+        * Jump if a is less than b
+        * a = HMEFT, b = HMEFTI
+        */
+       INSTR_JMP_LT,    /* a = MEF, b = MEF */
+       INSTR_JMP_LT_MH, /* a = MEF, b = H */
+       INSTR_JMP_LT_HM, /* a = H, b = MEF */
+       INSTR_JMP_LT_HH, /* a = H, b = H */
+       INSTR_JMP_LT_MI, /* a = MEF, b = I */
+       INSTR_JMP_LT_HI, /* a = H, b = I */
+
+       /* jmpgt LABEL a b
+        * Jump if a is greater than b
+        * a = HMEFT, b = HMEFTI
+        */
+       INSTR_JMP_GT,    /* a = MEF, b = MEF */
+       INSTR_JMP_GT_MH, /* a = MEF, b = H */
+       INSTR_JMP_GT_HM, /* a = H, b = MEF */
+       INSTR_JMP_GT_HH, /* a = H, b = H */
+       INSTR_JMP_GT_MI, /* a = MEF, b = I */
+       INSTR_JMP_GT_HI, /* a = H, b = I */
+
+       /* return
+        * Return from action
+        */
+       INSTR_RETURN,
+};
+
+struct instr_operand {
+       uint8_t struct_id;
+       uint8_t n_bits;
+       uint8_t offset;
+       uint8_t pad;
+};
+
+struct instr_io {
+       struct {
+               uint8_t offset;
+               uint8_t n_bits;
+               uint8_t pad[2];
+       } io;
+
+       struct {
+               uint8_t header_id[8];
+               uint8_t struct_id[8];
+               uint8_t n_bytes[8];
+       } hdr;
+};
+
+struct instr_hdr_validity {
+       uint8_t header_id;
+};
+
+struct instr_table {
+       uint8_t table_id;
+};
+
+struct instr_extern_obj {
+       uint8_t ext_obj_id;
+       uint8_t func_id;
+};
+
+struct instr_extern_func {
+       uint8_t ext_func_id;
+};
+
+struct instr_dst_src {
+       struct instr_operand dst;
+       union {
+               struct instr_operand src;
+               uint32_t src_val;
+       };
+};
+
+struct instr_dma {
+       struct {
+               uint8_t header_id[8];
+               uint8_t struct_id[8];
+       } dst;
+
+       struct {
+               uint8_t offset[8];
+       } src;
+
+       uint16_t n_bytes[8];
+};
+
+struct instr_jmp {
+       struct instruction *ip;
+
+       union {
+               struct instr_operand a;
+               uint8_t header_id;
+               uint8_t action_id;
+       };
+
+       union {
+               struct instr_operand b;
+               uint32_t b_val;
+       };
+};
+
 struct instruction {
+       enum instruction_type type;
+       union {
+               struct instr_io io;
+               struct instr_hdr_validity valid;
+               struct instr_dst_src mov;
+               struct instr_dma dma;
+               struct instr_dst_src alu;
+               struct instr_table table;
+               struct instr_extern_obj ext_obj;
+               struct instr_extern_func ext_func;
+               struct instr_jmp jmp;
+       };
+};
+
+struct instruction_data {
+       char label[RTE_SWX_NAME_SIZE];
+       char jmp_label[RTE_SWX_NAME_SIZE];
+       uint32_t n_users; /* user = jmp instruction to this instruction. */
+       int invalid;
 };
 
 /*
@@ -251,6 +601,10 @@ struct table_runtime {
  * Pipeline.
  */
 struct thread {
+       /* Packet. */
+       struct rte_swx_pkt pkt;
+       uint8_t *ptr;
+
        /* Structures. */
        uint8_t **structs;
 
@@ -280,6 +634,325 @@ struct thread {
        struct instruction *ret;
 };
 
+#define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
+#define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
+#define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
+
+#define HEADER_VALID(thread, header_id) \
+       MASK64_BIT_GET((thread)->valid_headers, header_id)
+
+#define ALU(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
+       uint64_t dst = dst64 & dst64_mask;                                     \
+                                                                              \
+       uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
+       uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
+       uint64_t src64 = *src64_ptr;                                           \
+       uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits);       \
+       uint64_t src = src64 & src64_mask;                                     \
+                                                                              \
+       uint64_t result = dst operator src;                                    \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask);            \
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define ALU_S(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
+       uint64_t dst = dst64 & dst64_mask;                                     \
+                                                                              \
+       uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
+       uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
+       uint64_t src64 = *src64_ptr;                                           \
+       uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits);           \
+                                                                              \
+       uint64_t result = dst operator src;                                    \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask);            \
+}
+
+#define ALU_MH ALU_S
+
+#define ALU_HM(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
+       uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits);           \
+                                                                              \
+       uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
+       uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
+       uint64_t src64 = *src64_ptr;                                           \
+       uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits);       \
+       uint64_t src = src64 & src64_mask;                                     \
+                                                                              \
+       uint64_t result = dst operator src;                                    \
+       result = hton64(result << (64 - (ip)->alu.dst.n_bits));                \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | result;                           \
+}
+
+#define ALU_HH(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
+       uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits);           \
+                                                                              \
+       uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id];      \
+       uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset];   \
+       uint64_t src64 = *src64_ptr;                                           \
+       uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits);           \
+                                                                              \
+       uint64_t result = dst operator src;                                    \
+       result = hton64(result << (64 - (ip)->alu.dst.n_bits));                \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | result;                           \
+}
+
+#else
+
+#define ALU_S ALU
+#define ALU_MH ALU
+#define ALU_HM ALU
+#define ALU_HH ALU
+
+#endif
+
+#define ALU_I(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
+       uint64_t dst = dst64 & dst64_mask;                                     \
+                                                                              \
+       uint64_t src = (ip)->alu.src_val;                                      \
+                                                                              \
+       uint64_t result = dst operator src;                                    \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask);            \
+}
+
+#define ALU_MI ALU_I
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define ALU_HI(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits);       \
+       uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits);           \
+                                                                              \
+       uint64_t src = (ip)->alu.src_val;                                      \
+                                                                              \
+       uint64_t result = dst operator src;                                    \
+       result = hton64(result << (64 - (ip)->alu.dst.n_bits));                \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | result;                           \
+}
+
+#else
+
+#define ALU_HI ALU_I
+
+#endif
+
+#define MOV(thread, ip)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
+                                                                              \
+       uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id];      \
+       uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset];   \
+       uint64_t src64 = *src64_ptr;                                           \
+       uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits);       \
+       uint64_t src = src64 & src64_mask;                                     \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);               \
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define MOV_S(thread, ip)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
+                                                                              \
+       uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id];      \
+       uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset];   \
+       uint64_t src64 = *src64_ptr;                                           \
+       uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits);           \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);               \
+}
+
+#else
+
+#define MOV_S MOV
+
+#endif
+
+#define MOV_I(thread, ip)  \
+{                                                                              \
+       uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id];      \
+       uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset];   \
+       uint64_t dst64 = *dst64_ptr;                                           \
+       uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits);       \
+                                                                              \
+       uint64_t src = (ip)->mov.src_val;                                      \
+                                                                              \
+       *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);               \
+}
+
+#define JMP_CMP(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
+       uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
+       uint64_t a64 = *a64_ptr;                                               \
+       uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits);           \
+       uint64_t a = a64 & a64_mask;                                           \
+                                                                              \
+       uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
+       uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
+       uint64_t b64 = *b64_ptr;                                               \
+       uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits);           \
+       uint64_t b = b64 & b64_mask;                                           \
+                                                                              \
+       (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
+}
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define JMP_CMP_S(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
+       uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
+       uint64_t a64 = *a64_ptr;                                               \
+       uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits);           \
+       uint64_t a = a64 & a64_mask;                                           \
+                                                                              \
+       uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
+       uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
+       uint64_t b64 = *b64_ptr;                                               \
+       uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits);                 \
+                                                                              \
+       (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
+}
+
+#define JMP_CMP_MH JMP_CMP_S
+
+#define JMP_CMP_HM(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
+       uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
+       uint64_t a64 = *a64_ptr;                                               \
+       uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits);                 \
+                                                                              \
+       uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
+       uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
+       uint64_t b64 = *b64_ptr;                                               \
+       uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits);           \
+       uint64_t b = b64 & b64_mask;                                           \
+                                                                              \
+       (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
+}
+
+#define JMP_CMP_HH(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
+       uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
+       uint64_t a64 = *a64_ptr;                                               \
+       uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits);                 \
+                                                                              \
+       uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id];          \
+       uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset];         \
+       uint64_t b64 = *b64_ptr;                                               \
+       uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits);                 \
+                                                                              \
+       (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
+}
+
+#else
+
+#define JMP_CMP_S JMP_CMP
+#define JMP_CMP_MH JMP_CMP
+#define JMP_CMP_HM JMP_CMP
+#define JMP_CMP_HH JMP_CMP
+
+#endif
+
+#define JMP_CMP_I(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
+       uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
+       uint64_t a64 = *a64_ptr;                                               \
+       uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits);           \
+       uint64_t a = a64 & a64_mask;                                           \
+                                                                              \
+       uint64_t b = (ip)->jmp.b_val;                                          \
+                                                                              \
+       (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
+}
+
+#define JMP_CMP_MI JMP_CMP_I
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define JMP_CMP_HI(thread, ip, operator)  \
+{                                                                              \
+       uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id];          \
+       uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset];         \
+       uint64_t a64 = *a64_ptr;                                               \
+       uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits);                 \
+                                                                              \
+       uint64_t b = (ip)->jmp.b_val;                                          \
+                                                                              \
+       (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1);     \
+}
+
+#else
+
+#define JMP_CMP_HI JMP_CMP_I
+
+#endif
+
+#define METADATA_READ(thread, offset, n_bits)                                  \
+({                                                                             \
+       uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset];           \
+       uint64_t m64 = *m64_ptr;                                               \
+       uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits));                     \
+       (m64 & m64_mask);                                                      \
+})
+
+#define METADATA_WRITE(thread, offset, n_bits, value)                          \
+{                                                                              \
+       uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset];           \
+       uint64_t m64 = *m64_ptr;                                               \
+       uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits));                     \
+                                                                              \
+       uint64_t m_new = value;                                                \
+                                                                              \
+       *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask);                     \
+}
+
 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
 #define RTE_SWX_PIPELINE_THREADS_MAX 16
 #endif
@@ -315,6 +988,8 @@ struct rte_swx_pipeline {
        uint32_t n_actions;
        uint32_t n_tables;
        uint32_t n_headers;
+       uint32_t thread_id;
+       uint32_t port_id;
        uint32_t n_instructions;
        int build_done;
        int numa_node;
@@ -822,6 +1497,94 @@ extern_obj_find(struct rte_swx_pipeline *p, const char *name)
        return NULL;
 }
 
+static struct extern_type_member_func *
+extern_obj_member_func_parse(struct rte_swx_pipeline *p,
+                            const char *name,
+                            struct extern_obj **obj)
+{
+       struct extern_obj *object;
+       struct extern_type_member_func *func;
+       char *object_name, *func_name;
+
+       if (name[0] != 'e' || name[1] != '.')
+               return NULL;
+
+       object_name = strdup(&name[2]);
+       if (!object_name)
+               return NULL;
+
+       func_name = strchr(object_name, '.');
+       if (!func_name) {
+               free(object_name);
+               return NULL;
+       }
+
+       *func_name = 0;
+       func_name++;
+
+       object = extern_obj_find(p, object_name);
+       if (!object) {
+               free(object_name);
+               return NULL;
+       }
+
+       func = extern_type_member_func_find(object->type, func_name);
+       if (!func) {
+               free(object_name);
+               return NULL;
+       }
+
+       if (obj)
+               *obj = object;
+
+       free(object_name);
+       return func;
+}
+
+static struct field *
+extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
+                              const char *name,
+                              struct extern_obj **object)
+{
+       struct extern_obj *obj;
+       struct field *f;
+       char *obj_name, *field_name;
+
+       if ((name[0] != 'e') || (name[1] != '.'))
+               return NULL;
+
+       obj_name = strdup(&name[2]);
+       if (!obj_name)
+               return NULL;
+
+       field_name = strchr(obj_name, '.');
+       if (!field_name) {
+               free(obj_name);
+               return NULL;
+       }
+
+       *field_name = 0;
+       field_name++;
+
+       obj = extern_obj_find(p, obj_name);
+       if (!obj) {
+               free(obj_name);
+               return NULL;
+       }
+
+       f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
+       if (!f) {
+               free(obj_name);
+               return NULL;
+       }
+
+       if (object)
+               *object = obj;
+
+       free(obj_name);
+       return f;
+}
+
 int
 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
        const char *name,
@@ -1060,6 +1823,60 @@ extern_func_find(struct rte_swx_pipeline *p, const char *name)
        return NULL;
 }
 
+static struct extern_func *
+extern_func_parse(struct rte_swx_pipeline *p,
+                 const char *name)
+{
+       if (name[0] != 'f' || name[1] != '.')
+               return NULL;
+
+       return extern_func_find(p, &name[2]);
+}
+
+static struct field *
+extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
+                               const char *name,
+                               struct extern_func **function)
+{
+       struct extern_func *func;
+       struct field *f;
+       char *func_name, *field_name;
+
+       if ((name[0] != 'f') || (name[1] != '.'))
+               return NULL;
+
+       func_name = strdup(&name[2]);
+       if (!func_name)
+               return NULL;
+
+       field_name = strchr(func_name, '.');
+       if (!field_name) {
+               free(func_name);
+               return NULL;
+       }
+
+       *field_name = 0;
+       field_name++;
+
+       func = extern_func_find(p, func_name);
+       if (!func) {
+               free(func_name);
+               return NULL;
+       }
+
+       f = struct_type_field_find(func->mailbox_struct_type, field_name);
+       if (!f) {
+               free(func_name);
+               return NULL;
+       }
+
+       if (function)
+               *function = func;
+
+       free(func_name);
+       return f;
+}
+
 int
 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
                                      const char *name,
@@ -1187,6 +2004,16 @@ header_find(struct rte_swx_pipeline *p, const char *name)
        return NULL;
 }
 
+static struct header *
+header_parse(struct rte_swx_pipeline *p,
+            const char *name)
+{
+       if (name[0] != 'h' || name[1] != '.')
+               return NULL;
+
+       return header_find(p, &name[2]);
+}
+
 static struct field *
 header_field_parse(struct rte_swx_pipeline *p,
                   const char *name,
@@ -1430,47 +2257,3924 @@ metadata_free(struct rte_swx_pipeline *p)
 /*
  * Instruction.
  */
-static inline void
-thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
-{
-       t->ip = p->instructions;
-}
-
 static int
-instruction_config(struct rte_swx_pipeline *p __rte_unused,
-                  struct action *a __rte_unused,
-                  const char **instructions __rte_unused,
-                  uint32_t n_instructions __rte_unused)
+instruction_is_jmp(struct instruction *instr)
 {
-       return 0;
+       switch (instr->type) {
+       case INSTR_JMP:
+       case INSTR_JMP_VALID:
+       case INSTR_JMP_INVALID:
+       case INSTR_JMP_HIT:
+       case INSTR_JMP_MISS:
+       case INSTR_JMP_ACTION_HIT:
+       case INSTR_JMP_ACTION_MISS:
+       case INSTR_JMP_EQ:
+       case INSTR_JMP_EQ_S:
+       case INSTR_JMP_EQ_I:
+       case INSTR_JMP_NEQ:
+       case INSTR_JMP_NEQ_S:
+       case INSTR_JMP_NEQ_I:
+       case INSTR_JMP_LT:
+       case INSTR_JMP_LT_MH:
+       case INSTR_JMP_LT_HM:
+       case INSTR_JMP_LT_HH:
+       case INSTR_JMP_LT_MI:
+       case INSTR_JMP_LT_HI:
+       case INSTR_JMP_GT:
+       case INSTR_JMP_GT_MH:
+       case INSTR_JMP_GT_HM:
+       case INSTR_JMP_GT_HH:
+       case INSTR_JMP_GT_MI:
+       case INSTR_JMP_GT_HI:
+               return 1;
+
+       default:
+               return 0;
+       }
 }
 
-/*
- * Action.
- */
-static struct action *
-action_find(struct rte_swx_pipeline *p, const char *name)
+static struct field *
+action_field_parse(struct action *action, const char *name);
+
+static struct field *
+struct_field_parse(struct rte_swx_pipeline *p,
+                  struct action *action,
+                  const char *name,
+                  uint32_t *struct_id)
 {
-       struct action *elem;
+       struct field *f;
 
-       if (!name)
-               return NULL;
+       switch (name[0]) {
+       case 'h':
+       {
+               struct header *header;
 
-       TAILQ_FOREACH(elem, &p->actions, node)
-               if (strcmp(elem->name, name) == 0)
-                       return elem;
+               f = header_field_parse(p, name, &header);
+               if (!f)
+                       return NULL;
 
-       return NULL;
-}
+               *struct_id = header->struct_id;
+               return f;
+       }
 
-int
-rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
-                              const char *name,
-                              const char *args_struct_type_name,
-                              const char **instructions,
-                              uint32_t n_instructions)
-{
-       struct struct_type *args_struct_type;
+       case 'm':
+       {
+               f = metadata_field_parse(p, name);
+               if (!f)
+                       return NULL;
+
+               *struct_id = p->metadata_struct_id;
+               return f;
+       }
+
+       case 't':
+       {
+               if (!action)
+                       return NULL;
+
+               f = action_field_parse(action, name);
+               if (!f)
+                       return NULL;
+
+               *struct_id = 0;
+               return f;
+       }
+
+       case 'e':
+       {
+               struct extern_obj *obj;
+
+               f = extern_obj_mailbox_field_parse(p, name, &obj);
+               if (!f)
+                       return NULL;
+
+               *struct_id = obj->struct_id;
+               return f;
+       }
+
+       case 'f':
+       {
+               struct extern_func *func;
+
+               f = extern_func_mailbox_field_parse(p, name, &func);
+               if (!f)
+                       return NULL;
+
+               *struct_id = func->struct_id;
+               return f;
+       }
+
+       default:
+               return NULL;
+       }
+}
+
+static inline void
+pipeline_port_inc(struct rte_swx_pipeline *p)
+{
+       p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
+}
+
+static inline void
+thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
+{
+       t->ip = p->instructions;
+}
+
+static inline void
+thread_ip_set(struct thread *t, struct instruction *ip)
+{
+       t->ip = ip;
+}
+
+static inline void
+thread_ip_action_call(struct rte_swx_pipeline *p,
+                     struct thread *t,
+                     uint32_t action_id)
+{
+       t->ret = t->ip + 1;
+       t->ip = p->action_instructions[action_id];
+}
+
+static inline void
+thread_ip_inc(struct rte_swx_pipeline *p);
+
+static inline void
+thread_ip_inc(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+
+       t->ip++;
+}
+
+static inline void
+thread_ip_inc_cond(struct thread *t, int cond)
+{
+       t->ip += cond;
+}
+
+static inline void
+thread_yield(struct rte_swx_pipeline *p)
+{
+       p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
+}
+
+static inline void
+thread_yield_cond(struct rte_swx_pipeline *p, int cond)
+{
+       p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
+}
+
+/*
+ * rx.
+ */
+static int
+instr_rx_translate(struct rte_swx_pipeline *p,
+                  struct action *action,
+                  char **tokens,
+                  int n_tokens,
+                  struct instruction *instr,
+                  struct instruction_data *data __rte_unused)
+{
+       struct field *f;
+
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 2, EINVAL);
+
+       f = metadata_field_parse(p, tokens[1]);
+       CHECK(f, EINVAL);
+
+       instr->type = INSTR_RX;
+       instr->io.io.offset = f->offset / 8;
+       instr->io.io.n_bits = f->n_bits;
+       return 0;
+}
+
+static inline void
+instr_rx_exec(struct rte_swx_pipeline *p);
+
+static inline void
+instr_rx_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       struct port_in_runtime *port = &p->in[p->port_id];
+       struct rte_swx_pkt *pkt = &t->pkt;
+       int pkt_received;
+
+       /* Packet. */
+       pkt_received = port->pkt_rx(port->obj, pkt);
+       t->ptr = &pkt->pkt[pkt->offset];
+       rte_prefetch0(t->ptr);
+
+       TRACE("[Thread %2u] rx %s from port %u\n",
+             p->thread_id,
+             pkt_received ? "1 pkt" : "0 pkts",
+             p->port_id);
+
+       /* Headers. */
+       t->valid_headers = 0;
+       t->n_headers_out = 0;
+
+       /* Meta-data. */
+       METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
+
+       /* Tables. */
+       t->table_state = p->table_state;
+
+       /* Thread. */
+       pipeline_port_inc(p);
+       thread_ip_inc_cond(t, pkt_received);
+       thread_yield(p);
+}
+
+/*
+ * tx.
+ */
+static int
+instr_tx_translate(struct rte_swx_pipeline *p,
+                  struct action *action __rte_unused,
+                  char **tokens,
+                  int n_tokens,
+                  struct instruction *instr,
+                  struct instruction_data *data __rte_unused)
+{
+       struct field *f;
+
+       CHECK(n_tokens == 2, EINVAL);
+
+       f = metadata_field_parse(p, tokens[1]);
+       CHECK(f, EINVAL);
+
+       instr->type = INSTR_TX;
+       instr->io.io.offset = f->offset / 8;
+       instr->io.io.n_bits = f->n_bits;
+       return 0;
+}
+
+static inline void
+emit_handler(struct thread *t)
+{
+       struct header_out_runtime *h0 = &t->headers_out[0];
+       struct header_out_runtime *h1 = &t->headers_out[1];
+       uint32_t offset = 0, i;
+
+       /* No header change or header decapsulation. */
+       if ((t->n_headers_out == 1) &&
+           (h0->ptr + h0->n_bytes == t->ptr)) {
+               TRACE("Emit handler: no header change or header decap.\n");
+
+               t->pkt.offset -= h0->n_bytes;
+               t->pkt.length += h0->n_bytes;
+
+               return;
+       }
+
+       /* Header encapsulation (optionally, with prior header decasulation). */
+       if ((t->n_headers_out == 2) &&
+           (h1->ptr + h1->n_bytes == t->ptr) &&
+           (h0->ptr == h0->ptr0)) {
+               uint32_t offset;
+
+               TRACE("Emit handler: header encapsulation.\n");
+
+               offset = h0->n_bytes + h1->n_bytes;
+               memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
+               t->pkt.offset -= offset;
+               t->pkt.length += offset;
+
+               return;
+       }
+
+       /* Header insertion. */
+       /* TBD */
+
+       /* Header extraction. */
+       /* TBD */
+
+       /* For any other case. */
+       TRACE("Emit handler: complex case.\n");
+
+       for (i = 0; i < t->n_headers_out; i++) {
+               struct header_out_runtime *h = &t->headers_out[i];
+
+               memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
+               offset += h->n_bytes;
+       }
+
+       if (offset) {
+               memcpy(t->ptr - offset, t->header_out_storage, offset);
+               t->pkt.offset -= offset;
+               t->pkt.length += offset;
+       }
+}
+
+static inline void
+instr_tx_exec(struct rte_swx_pipeline *p);
+
+static inline void
+instr_tx_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
+       struct port_out_runtime *port = &p->out[port_id];
+       struct rte_swx_pkt *pkt = &t->pkt;
+
+       TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
+             p->thread_id,
+             (uint32_t)port_id);
+
+       /* Headers. */
+       emit_handler(t);
+
+       /* Packet. */
+       port->pkt_tx(port->obj, pkt);
+
+       /* Thread. */
+       thread_ip_reset(p, t);
+       instr_rx_exec(p);
+}
+
+/*
+ * extract.
+ */
+static int
+instr_hdr_extract_translate(struct rte_swx_pipeline *p,
+                           struct action *action,
+                           char **tokens,
+                           int n_tokens,
+                           struct instruction *instr,
+                           struct instruction_data *data __rte_unused)
+{
+       struct header *h;
+
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 2, EINVAL);
+
+       h = header_parse(p, tokens[1]);
+       CHECK(h, EINVAL);
+
+       instr->type = INSTR_HDR_EXTRACT;
+       instr->io.hdr.header_id[0] = h->id;
+       instr->io.hdr.struct_id[0] = h->struct_id;
+       instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
+       return 0;
+}
+
+static inline void
+__instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
+
+static inline void
+__instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint64_t valid_headers = t->valid_headers;
+       uint8_t *ptr = t->ptr;
+       uint32_t offset = t->pkt.offset;
+       uint32_t length = t->pkt.length;
+       uint32_t i;
+
+       for (i = 0; i < n_extract; i++) {
+               uint32_t header_id = ip->io.hdr.header_id[i];
+               uint32_t struct_id = ip->io.hdr.struct_id[i];
+               uint32_t n_bytes = ip->io.hdr.n_bytes[i];
+
+               TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
+                     p->thread_id,
+                     header_id,
+                     n_bytes);
+
+               /* Headers. */
+               t->structs[struct_id] = ptr;
+               valid_headers = MASK64_BIT_SET(valid_headers, header_id);
+
+               /* Packet. */
+               offset += n_bytes;
+               length -= n_bytes;
+               ptr += n_bytes;
+       }
+
+       /* Headers. */
+       t->valid_headers = valid_headers;
+
+       /* Packet. */
+       t->pkt.offset = offset;
+       t->pkt.length = length;
+       t->ptr = ptr;
+}
+
+static inline void
+instr_hdr_extract_exec(struct rte_swx_pipeline *p)
+{
+       __instr_hdr_extract_exec(p, 1);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 2);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 3);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 4);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 5);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 6);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 7);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_extract_exec(p, 8);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+/*
+ * emit.
+ */
+static int
+instr_hdr_emit_translate(struct rte_swx_pipeline *p,
+                        struct action *action __rte_unused,
+                        char **tokens,
+                        int n_tokens,
+                        struct instruction *instr,
+                        struct instruction_data *data __rte_unused)
+{
+       struct header *h;
+
+       CHECK(n_tokens == 2, EINVAL);
+
+       h = header_parse(p, tokens[1]);
+       CHECK(h, EINVAL);
+
+       instr->type = INSTR_HDR_EMIT;
+       instr->io.hdr.header_id[0] = h->id;
+       instr->io.hdr.struct_id[0] = h->struct_id;
+       instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
+       return 0;
+}
+
+static inline void
+__instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
+
+static inline void
+__instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t n_headers_out = t->n_headers_out;
+       struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
+       uint8_t *ho_ptr = NULL;
+       uint32_t ho_nbytes = 0, i;
+
+       for (i = 0; i < n_emit; i++) {
+               uint32_t header_id = ip->io.hdr.header_id[i];
+               uint32_t struct_id = ip->io.hdr.struct_id[i];
+               uint32_t n_bytes = ip->io.hdr.n_bytes[i];
+
+               struct header_runtime *hi = &t->headers[header_id];
+               uint8_t *hi_ptr = t->structs[struct_id];
+
+               TRACE("[Thread %2u]: emit header %u\n",
+                     p->thread_id,
+                     header_id);
+
+               /* Headers. */
+               if (!i) {
+                       if (!t->n_headers_out) {
+                               ho = &t->headers_out[0];
+
+                               ho->ptr0 = hi->ptr0;
+                               ho->ptr = hi_ptr;
+
+                               ho_ptr = hi_ptr;
+                               ho_nbytes = n_bytes;
+
+                               n_headers_out = 1;
+
+                               continue;
+                       } else {
+                               ho_ptr = ho->ptr;
+                               ho_nbytes = ho->n_bytes;
+                       }
+               }
+
+               if (ho_ptr + ho_nbytes == hi_ptr) {
+                       ho_nbytes += n_bytes;
+               } else {
+                       ho->n_bytes = ho_nbytes;
+
+                       ho++;
+                       ho->ptr0 = hi->ptr0;
+                       ho->ptr = hi_ptr;
+
+                       ho_ptr = hi_ptr;
+                       ho_nbytes = n_bytes;
+
+                       n_headers_out++;
+               }
+       }
+
+       ho->n_bytes = ho_nbytes;
+       t->n_headers_out = n_headers_out;
+}
+
+static inline void
+instr_hdr_emit_exec(struct rte_swx_pipeline *p)
+{
+       __instr_hdr_emit_exec(p, 1);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 1);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 2);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 3);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 4);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 5);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 6);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 7);
+       instr_tx_exec(p);
+}
+
+static inline void
+instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_hdr_emit_exec(p, 8);
+       instr_tx_exec(p);
+}
+
+/*
+ * validate.
+ */
+static int
+instr_hdr_validate_translate(struct rte_swx_pipeline *p,
+                            struct action *action __rte_unused,
+                            char **tokens,
+                            int n_tokens,
+                            struct instruction *instr,
+                            struct instruction_data *data __rte_unused)
+{
+       struct header *h;
+
+       CHECK(n_tokens == 2, EINVAL);
+
+       h = header_parse(p, tokens[1]);
+       CHECK(h, EINVAL);
+
+       instr->type = INSTR_HDR_VALIDATE;
+       instr->valid.header_id = h->id;
+       return 0;
+}
+
+static inline void
+instr_hdr_validate_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t header_id = ip->valid.header_id;
+
+       TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
+
+       /* Headers. */
+       t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+/*
+ * invalidate.
+ */
+static int
+instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
+                              struct action *action __rte_unused,
+                              char **tokens,
+                              int n_tokens,
+                              struct instruction *instr,
+                              struct instruction_data *data __rte_unused)
+{
+       struct header *h;
+
+       CHECK(n_tokens == 2, EINVAL);
+
+       h = header_parse(p, tokens[1]);
+       CHECK(h, EINVAL);
+
+       instr->type = INSTR_HDR_INVALIDATE;
+       instr->valid.header_id = h->id;
+       return 0;
+}
+
+static inline void
+instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t header_id = ip->valid.header_id;
+
+       TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
+
+       /* Headers. */
+       t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+/*
+ * table.
+ */
+static struct table *
+table_find(struct rte_swx_pipeline *p, const char *name);
+
+static int
+instr_table_translate(struct rte_swx_pipeline *p,
+                     struct action *action,
+                     char **tokens,
+                     int n_tokens,
+                     struct instruction *instr,
+                     struct instruction_data *data __rte_unused)
+{
+       struct table *t;
+
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 2, EINVAL);
+
+       t = table_find(p, tokens[1]);
+       CHECK(t, EINVAL);
+
+       instr->type = INSTR_TABLE;
+       instr->table.table_id = t->id;
+       return 0;
+}
+
+static inline void
+instr_table_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t table_id = ip->table.table_id;
+       struct rte_swx_table_state *ts = &t->table_state[table_id];
+       struct table_runtime *table = &t->tables[table_id];
+       uint64_t action_id;
+       uint8_t *action_data;
+       int done, hit;
+
+       /* Table. */
+       done = table->func(ts->obj,
+                          table->mailbox,
+                          table->key,
+                          &action_id,
+                          &action_data,
+                          &hit);
+       if (!done) {
+               /* Thread. */
+               TRACE("[Thread %2u] table %u (not finalized)\n",
+                     p->thread_id,
+                     table_id);
+
+               thread_yield(p);
+               return;
+       }
+
+       action_id = hit ? action_id : ts->default_action_id;
+       action_data = hit ? action_data : ts->default_action_data;
+
+       TRACE("[Thread %2u] table %u (%s, action %u)\n",
+             p->thread_id,
+             table_id,
+             hit ? "hit" : "miss",
+             (uint32_t)action_id);
+
+       t->action_id = action_id;
+       t->structs[0] = action_data;
+       t->hit = hit;
+
+       /* Thread. */
+       thread_ip_action_call(p, t, action_id);
+}
+
+/*
+ * extern.
+ */
+static int
+instr_extern_translate(struct rte_swx_pipeline *p,
+                      struct action *action __rte_unused,
+                      char **tokens,
+                      int n_tokens,
+                      struct instruction *instr,
+                      struct instruction_data *data __rte_unused)
+{
+       char *token = tokens[1];
+
+       CHECK(n_tokens == 2, EINVAL);
+
+       if (token[0] == 'e') {
+               struct extern_obj *obj;
+               struct extern_type_member_func *func;
+
+               func = extern_obj_member_func_parse(p, token, &obj);
+               CHECK(func, EINVAL);
+
+               instr->type = INSTR_EXTERN_OBJ;
+               instr->ext_obj.ext_obj_id = obj->id;
+               instr->ext_obj.func_id = func->id;
+
+               return 0;
+       }
+
+       if (token[0] == 'f') {
+               struct extern_func *func;
+
+               func = extern_func_parse(p, token);
+               CHECK(func, EINVAL);
+
+               instr->type = INSTR_EXTERN_FUNC;
+               instr->ext_func.ext_func_id = func->id;
+
+               return 0;
+       }
+
+       CHECK(0, EINVAL);
+}
+
+static inline void
+instr_extern_obj_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t obj_id = ip->ext_obj.ext_obj_id;
+       uint32_t func_id = ip->ext_obj.func_id;
+       struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
+       rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
+
+       TRACE("[Thread %2u] extern obj %u member func %u\n",
+             p->thread_id,
+             obj_id,
+             func_id);
+
+       /* Extern object member function execute. */
+       uint32_t done = func(obj->obj, obj->mailbox);
+
+       /* Thread. */
+       thread_ip_inc_cond(t, done);
+       thread_yield_cond(p, done ^ 1);
+}
+
+static inline void
+instr_extern_func_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t ext_func_id = ip->ext_func.ext_func_id;
+       struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
+       rte_swx_extern_func_t func = ext_func->func;
+
+       TRACE("[Thread %2u] extern func %u\n",
+             p->thread_id,
+             ext_func_id);
+
+       /* Extern function execute. */
+       uint32_t done = func(ext_func->mailbox);
+
+       /* Thread. */
+       thread_ip_inc_cond(t, done);
+       thread_yield_cond(p, done ^ 1);
+}
+
+/*
+ * mov.
+ */
+static int
+instr_mov_translate(struct rte_swx_pipeline *p,
+                   struct action *action,
+                   char **tokens,
+                   int n_tokens,
+                   struct instruction *instr,
+                   struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* MOV or MOV_S. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_MOV;
+               if ((dst[0] == 'h' && src[0] != 'h') ||
+                   (dst[0] != 'h' && src[0] == 'h'))
+                       instr->type = INSTR_MOV_S;
+
+               instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->mov.dst.n_bits = fdst->n_bits;
+               instr->mov.dst.offset = fdst->offset / 8;
+               instr->mov.src.struct_id = (uint8_t)src_struct_id;
+               instr->mov.src.n_bits = fsrc->n_bits;
+               instr->mov.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* MOV_I. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       if (dst[0] == 'h')
+               src_val = htonl(src_val);
+
+       instr->type = INSTR_MOV_I;
+       instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->mov.dst.n_bits = fdst->n_bits;
+       instr->mov.dst.offset = fdst->offset / 8;
+       instr->mov.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static inline void
+instr_mov_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] mov\n",
+             p->thread_id);
+
+       MOV(t, ip);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_mov_s_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] mov (s)\n",
+             p->thread_id);
+
+       MOV_S(t, ip);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_mov_i_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] mov m.f %x\n",
+             p->thread_id,
+             ip->mov.src_val);
+
+       MOV_I(t, ip);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+/*
+ * dma.
+ */
+static int
+instr_dma_translate(struct rte_swx_pipeline *p,
+                   struct action *action,
+                   char **tokens,
+                   int n_tokens,
+                   struct instruction *instr,
+                   struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1];
+       char *src = tokens[2];
+       struct header *h;
+       struct field *tf;
+
+       CHECK(action, EINVAL);
+       CHECK(n_tokens == 3, EINVAL);
+
+       h = header_parse(p, dst);
+       CHECK(h, EINVAL);
+
+       tf = action_field_parse(action, src);
+       CHECK(tf, EINVAL);
+
+       instr->type = INSTR_DMA_HT;
+       instr->dma.dst.header_id[0] = h->id;
+       instr->dma.dst.struct_id[0] = h->struct_id;
+       instr->dma.n_bytes[0] = h->st->n_bits / 8;
+       instr->dma.src.offset[0] = tf->offset / 8;
+
+       return 0;
+}
+
+static inline void
+__instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
+
+static inline void
+__instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint8_t *action_data = t->structs[0];
+       uint64_t valid_headers = t->valid_headers;
+       uint32_t i;
+
+       for (i = 0; i < n_dma; i++) {
+               uint32_t header_id = ip->dma.dst.header_id[i];
+               uint32_t struct_id = ip->dma.dst.struct_id[i];
+               uint32_t offset = ip->dma.src.offset[i];
+               uint32_t n_bytes = ip->dma.n_bytes[i];
+
+               struct header_runtime *h = &t->headers[header_id];
+               uint8_t *h_ptr0 = h->ptr0;
+               uint8_t *h_ptr = t->structs[struct_id];
+
+               void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
+                       h_ptr : h_ptr0;
+               void *src = &action_data[offset];
+
+               TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
+
+               /* Headers. */
+               memcpy(dst, src, n_bytes);
+               t->structs[struct_id] = dst;
+               valid_headers = MASK64_BIT_SET(valid_headers, header_id);
+       }
+
+       t->valid_headers = valid_headers;
+}
+
+static inline void
+instr_dma_ht_exec(struct rte_swx_pipeline *p)
+{
+       __instr_dma_ht_exec(p, 1);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht2_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 2);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht3_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 3);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht4_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 4);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht5_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 5);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht6_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 6);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht7_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 7);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_dma_ht8_exec(struct rte_swx_pipeline *p)
+{
+       TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
+             p->thread_id);
+
+       __instr_dma_ht_exec(p, 8);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+/*
+ * alu.
+ */
+static int
+instr_alu_add_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* ADD, ADD_HM, ADD_MH, ADD_HH. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_ADD;
+               if (dst[0] == 'h' && src[0] == 'm')
+                       instr->type = INSTR_ALU_ADD_HM;
+               if (dst[0] == 'm' && src[0] == 'h')
+                       instr->type = INSTR_ALU_ADD_MH;
+               if (dst[0] == 'h' && src[0] == 'h')
+                       instr->type = INSTR_ALU_ADD_HH;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* ADD_MI, ADD_HI. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       instr->type = INSTR_ALU_ADD_MI;
+       if (dst[0] == 'h')
+               instr->type = INSTR_ALU_ADD_HI;
+
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static int
+instr_alu_sub_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* SUB, SUB_HM, SUB_MH, SUB_HH. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_SUB;
+               if (dst[0] == 'h' && src[0] == 'm')
+                       instr->type = INSTR_ALU_SUB_HM;
+               if (dst[0] == 'm' && src[0] == 'h')
+                       instr->type = INSTR_ALU_SUB_MH;
+               if (dst[0] == 'h' && src[0] == 'h')
+                       instr->type = INSTR_ALU_SUB_HH;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* SUB_MI, SUB_HI. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       instr->type = INSTR_ALU_SUB_MI;
+       if (dst[0] == 'h')
+               instr->type = INSTR_ALU_SUB_HI;
+
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static int
+instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
+                         struct action *action __rte_unused,
+                         char **tokens,
+                         int n_tokens,
+                         struct instruction *instr,
+                         struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct header *hdst, *hsrc;
+       struct field *fdst, *fsrc;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = header_field_parse(p, dst, &hdst);
+       CHECK(fdst && (fdst->n_bits == 16), EINVAL);
+
+       /* CKADD_FIELD. */
+       fsrc = header_field_parse(p, src, &hsrc);
+       if (fsrc) {
+               instr->type = INSTR_ALU_CKADD_FIELD;
+               instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* CKADD_STRUCT, CKADD_STRUCT20. */
+       hsrc = header_parse(p, src);
+       CHECK(hsrc, EINVAL);
+
+       instr->type = INSTR_ALU_CKADD_STRUCT;
+       if ((hsrc->st->n_bits / 8) == 20)
+               instr->type = INSTR_ALU_CKADD_STRUCT20;
+
+       instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
+       instr->alu.src.n_bits = hsrc->st->n_bits;
+       instr->alu.src.offset = 0; /* Unused. */
+       return 0;
+}
+
+static int
+instr_alu_cksub_translate(struct rte_swx_pipeline *p,
+                         struct action *action __rte_unused,
+                         char **tokens,
+                         int n_tokens,
+                         struct instruction *instr,
+                         struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct header *hdst, *hsrc;
+       struct field *fdst, *fsrc;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = header_field_parse(p, dst, &hdst);
+       CHECK(fdst && (fdst->n_bits == 16), EINVAL);
+
+       fsrc = header_field_parse(p, src, &hsrc);
+       CHECK(fsrc, EINVAL);
+
+       instr->type = INSTR_ALU_CKSUB_FIELD;
+       instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
+       instr->alu.src.n_bits = fsrc->n_bits;
+       instr->alu.src.offset = fsrc->offset / 8;
+       return 0;
+}
+
+static int
+instr_alu_shl_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* SHL, SHL_HM, SHL_MH, SHL_HH. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_SHL;
+               if (dst[0] == 'h' && src[0] == 'm')
+                       instr->type = INSTR_ALU_SHL_HM;
+               if (dst[0] == 'm' && src[0] == 'h')
+                       instr->type = INSTR_ALU_SHL_MH;
+               if (dst[0] == 'h' && src[0] == 'h')
+                       instr->type = INSTR_ALU_SHL_HH;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* SHL_MI, SHL_HI. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       instr->type = INSTR_ALU_SHL_MI;
+       if (dst[0] == 'h')
+               instr->type = INSTR_ALU_SHL_HI;
+
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static int
+instr_alu_shr_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* SHR, SHR_HM, SHR_MH, SHR_HH. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_SHR;
+               if (dst[0] == 'h' && src[0] == 'm')
+                       instr->type = INSTR_ALU_SHR_HM;
+               if (dst[0] == 'm' && src[0] == 'h')
+                       instr->type = INSTR_ALU_SHR_MH;
+               if (dst[0] == 'h' && src[0] == 'h')
+                       instr->type = INSTR_ALU_SHR_HH;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* SHR_MI, SHR_HI. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       instr->type = INSTR_ALU_SHR_MI;
+       if (dst[0] == 'h')
+               instr->type = INSTR_ALU_SHR_HI;
+
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static int
+instr_alu_and_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* AND or AND_S. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_AND;
+               if ((dst[0] == 'h' && src[0] != 'h') ||
+                   (dst[0] != 'h' && src[0] == 'h'))
+                       instr->type = INSTR_ALU_AND_S;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* AND_I. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       if (dst[0] == 'h')
+               src_val = htonl(src_val);
+
+       instr->type = INSTR_ALU_AND_I;
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static int
+instr_alu_or_translate(struct rte_swx_pipeline *p,
+                      struct action *action,
+                      char **tokens,
+                      int n_tokens,
+                      struct instruction *instr,
+                      struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* OR or OR_S. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_OR;
+               if ((dst[0] == 'h' && src[0] != 'h') ||
+                   (dst[0] != 'h' && src[0] == 'h'))
+                       instr->type = INSTR_ALU_OR_S;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* OR_I. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       if (dst[0] == 'h')
+               src_val = htonl(src_val);
+
+       instr->type = INSTR_ALU_OR_I;
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static int
+instr_alu_xor_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data __rte_unused)
+{
+       char *dst = tokens[1], *src = tokens[2];
+       struct field *fdst, *fsrc;
+       uint32_t dst_struct_id, src_struct_id, src_val;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
+       CHECK(fdst, EINVAL);
+
+       /* XOR or XOR_S. */
+       fsrc = struct_field_parse(p, action, src, &src_struct_id);
+       if (fsrc) {
+               instr->type = INSTR_ALU_XOR;
+               if ((dst[0] == 'h' && src[0] != 'h') ||
+                   (dst[0] != 'h' && src[0] == 'h'))
+                       instr->type = INSTR_ALU_XOR_S;
+
+               instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+               instr->alu.dst.n_bits = fdst->n_bits;
+               instr->alu.dst.offset = fdst->offset / 8;
+               instr->alu.src.struct_id = (uint8_t)src_struct_id;
+               instr->alu.src.n_bits = fsrc->n_bits;
+               instr->alu.src.offset = fsrc->offset / 8;
+               return 0;
+       }
+
+       /* XOR_I. */
+       src_val = strtoul(src, &src, 0);
+       CHECK(!src[0], EINVAL);
+
+       if (dst[0] == 'h')
+               src_val = htonl(src_val);
+
+       instr->type = INSTR_ALU_XOR_I;
+       instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
+       instr->alu.dst.n_bits = fdst->n_bits;
+       instr->alu.dst.offset = fdst->offset / 8;
+       instr->alu.src_val = (uint32_t)src_val;
+       return 0;
+}
+
+static inline void
+instr_alu_add_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] add\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, +);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] add (mh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MH(t, ip, +);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] add (hm)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HM(t, ip, +);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] add (hh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HH(t, ip, +);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] add (mi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MI(t, ip, +);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] add (hi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HI(t, ip, +);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] sub\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, -);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MH(t, ip, -);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HM(t, ip, -);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HH(t, ip, -);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MI(t, ip, -);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HI(t, ip, -);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shl\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, <<);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MH(t, ip, <<);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HM(t, ip, <<);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HH(t, ip, <<);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MI(t, ip, <<);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HI(t, ip, <<);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shr\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, >>);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MH(t, ip, >>);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HM(t, ip, >>);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HH(t, ip, >>);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_MI(t, ip, >>);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_HI(t, ip, >>);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_and_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] and\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, &);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_and_s_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] and (s)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_S(t, ip, &);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_and_i_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] and (i)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_I(t, ip, &);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_or_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] or\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, |);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_or_s_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] or (s)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_S(t, ip, |);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_or_i_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] or (i)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_I(t, ip, |);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_xor_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] xor\n", p->thread_id);
+
+       /* Structs. */
+       ALU(t, ip, ^);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] xor (s)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_S(t, ip, ^);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] xor (i)\n", p->thread_id);
+
+       /* Structs. */
+       ALU_I(t, ip, ^);
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint8_t *dst_struct, *src_struct;
+       uint16_t *dst16_ptr, dst;
+       uint64_t *src64_ptr, src64, src64_mask, src;
+       uint64_t r;
+
+       TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
+
+       /* Structs. */
+       dst_struct = t->structs[ip->alu.dst.struct_id];
+       dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+       dst = *dst16_ptr;
+
+       src_struct = t->structs[ip->alu.src.struct_id];
+       src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
+       src64 = *src64_ptr;
+       src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
+       src = src64 & src64_mask;
+
+       r = dst;
+       r = ~r & 0xFFFF;
+
+       /* The first input (r) is a 16-bit number. The second and the third
+        * inputs are 32-bit numbers. In the worst case scenario, the sum of the
+        * three numbers (output r) is a 34-bit number.
+        */
+       r += (src >> 32) + (src & 0xFFFFFFFF);
+
+       /* The first input is a 16-bit number. The second input is an 18-bit
+        * number. In the worst case scenario, the sum of the two numbers is a
+        * 19-bit number.
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+        * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+        * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+        * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
+        * therefore the output r is always a 16-bit number.
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       r = ~r & 0xFFFF;
+       r = r ? r : 0xFFFF;
+
+       *dst16_ptr = (uint16_t)r;
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint8_t *dst_struct, *src_struct;
+       uint16_t *dst16_ptr, dst;
+       uint64_t *src64_ptr, src64, src64_mask, src;
+       uint64_t r;
+
+       TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
+
+       /* Structs. */
+       dst_struct = t->structs[ip->alu.dst.struct_id];
+       dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+       dst = *dst16_ptr;
+
+       src_struct = t->structs[ip->alu.src.struct_id];
+       src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
+       src64 = *src64_ptr;
+       src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
+       src = src64 & src64_mask;
+
+       r = dst;
+       r = ~r & 0xFFFF;
+
+       /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
+        * the following sequence of operations in 2's complement arithmetic:
+        *    a '- b = (a - b) % 0xFFFF.
+        *
+        * In order to prevent an underflow for the below subtraction, in which
+        * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
+        * minuend), we first add a multiple of the 0xFFFF modulus to the
+        * minuend. The number we add to the minuend needs to be a 34-bit number
+        * or higher, so for readability reasons we picked the 36-bit multiple.
+        * We are effectively turning the 16-bit minuend into a 36-bit number:
+        *    (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
+        */
+       r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
+
+       /* A 33-bit number is subtracted from a 36-bit number (the input r). The
+        * result (the output r) is a 36-bit number.
+        */
+       r -= (src >> 32) + (src & 0xFFFFFFFF);
+
+       /* The first input is a 16-bit number. The second input is a 20-bit
+        * number. Their sum is a 21-bit number.
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+        * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+        * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+        * 0x1001E), the output r is (0 .. 31). So no carry bit can be
+        * generated, therefore the output r is always a 16-bit number.
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       r = ~r & 0xFFFF;
+       r = r ? r : 0xFFFF;
+
+       *dst16_ptr = (uint16_t)r;
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint8_t *dst_struct, *src_struct;
+       uint16_t *dst16_ptr;
+       uint32_t *src32_ptr;
+       uint64_t r0, r1;
+
+       TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
+
+       /* Structs. */
+       dst_struct = t->structs[ip->alu.dst.struct_id];
+       dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+
+       src_struct = t->structs[ip->alu.src.struct_id];
+       src32_ptr = (uint32_t *)&src_struct[0];
+
+       r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
+       r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
+       r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
+       r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
+       r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
+
+       /* The first input is a 16-bit number. The second input is a 19-bit
+        * number. Their sum is a 20-bit number.
+        */
+       r0 = (r0 & 0xFFFF) + (r0 >> 16);
+
+       /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+        * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
+        */
+       r0 = (r0 & 0xFFFF) + (r0 >> 16);
+
+       /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+        * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+        * 0x1000E), the output r is (0 .. 15). So no carry bit can be
+        * generated, therefore the output r is always a 16-bit number.
+        */
+       r0 = (r0 & 0xFFFF) + (r0 >> 16);
+
+       r0 = ~r0 & 0xFFFF;
+       r0 = r0 ? r0 : 0xFFFF;
+
+       *dst16_ptr = (uint16_t)r0;
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+static inline void
+instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint8_t *dst_struct, *src_struct;
+       uint16_t *dst16_ptr;
+       uint32_t *src32_ptr;
+       uint64_t r = 0;
+       uint32_t i;
+
+       TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
+
+       /* Structs. */
+       dst_struct = t->structs[ip->alu.dst.struct_id];
+       dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
+
+       src_struct = t->structs[ip->alu.src.struct_id];
+       src32_ptr = (uint32_t *)&src_struct[0];
+
+       /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
+        * Therefore, in the worst case scenario, a 35-bit number is added to a
+        * 16-bit number (the input r), so the output r is 36-bit number.
+        */
+       for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
+               r += *src32_ptr;
+
+       /* The first input is a 16-bit number. The second input is a 20-bit
+        * number. Their sum is a 21-bit number.
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
+        * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
+        * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
+        * 0x1001E), the output r is (0 .. 31). So no carry bit can be
+        * generated, therefore the output r is always a 16-bit number.
+        */
+       r = (r & 0xFFFF) + (r >> 16);
+
+       r = ~r & 0xFFFF;
+       r = r ? r : 0xFFFF;
+
+       *dst16_ptr = (uint16_t)r;
+
+       /* Thread. */
+       thread_ip_inc(p);
+}
+
+/*
+ * jmp.
+ */
+static struct action *
+action_find(struct rte_swx_pipeline *p, const char *name);
+
+static int
+instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
+                   struct action *action __rte_unused,
+                   char **tokens,
+                   int n_tokens,
+                   struct instruction *instr,
+                   struct instruction_data *data)
+{
+       CHECK(n_tokens == 2, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       instr->type = INSTR_JMP;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       return 0;
+}
+
+static int
+instr_jmp_valid_translate(struct rte_swx_pipeline *p,
+                         struct action *action __rte_unused,
+                         char **tokens,
+                         int n_tokens,
+                         struct instruction *instr,
+                         struct instruction_data *data)
+{
+       struct header *h;
+
+       CHECK(n_tokens == 3, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       h = header_parse(p, tokens[2]);
+       CHECK(h, EINVAL);
+
+       instr->type = INSTR_JMP_VALID;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       instr->jmp.header_id = h->id;
+       return 0;
+}
+
+static int
+instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
+                           struct action *action __rte_unused,
+                           char **tokens,
+                           int n_tokens,
+                           struct instruction *instr,
+                           struct instruction_data *data)
+{
+       struct header *h;
+
+       CHECK(n_tokens == 2, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       h = header_parse(p, tokens[2]);
+       CHECK(h, EINVAL);
+
+       instr->type = INSTR_JMP_INVALID;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       instr->jmp.header_id = h->id;
+       return 0;
+}
+
+static int
+instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data)
+{
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 2, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       instr->type = INSTR_JMP_HIT;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       return 0;
+}
+
+static int
+instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
+                        struct action *action,
+                        char **tokens,
+                        int n_tokens,
+                        struct instruction *instr,
+                        struct instruction_data *data)
+{
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 2, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       instr->type = INSTR_JMP_MISS;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       return 0;
+}
+
+static int
+instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
+                              struct action *action,
+                              char **tokens,
+                              int n_tokens,
+                              struct instruction *instr,
+                              struct instruction_data *data)
+{
+       struct action *a;
+
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 3, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       a = action_find(p, tokens[2]);
+       CHECK(a, EINVAL);
+
+       instr->type = INSTR_JMP_ACTION_HIT;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       instr->jmp.action_id = a->id;
+       return 0;
+}
+
+static int
+instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
+                               struct action *action,
+                               char **tokens,
+                               int n_tokens,
+                               struct instruction *instr,
+                               struct instruction_data *data)
+{
+       struct action *a;
+
+       CHECK(!action, EINVAL);
+       CHECK(n_tokens == 3, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       a = action_find(p, tokens[2]);
+       CHECK(a, EINVAL);
+
+       instr->type = INSTR_JMP_ACTION_MISS;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       instr->jmp.action_id = a->id;
+       return 0;
+}
+
+static int
+instr_jmp_eq_translate(struct rte_swx_pipeline *p,
+                      struct action *action,
+                      char **tokens,
+                      int n_tokens,
+                      struct instruction *instr,
+                      struct instruction_data *data)
+{
+       char *a = tokens[2], *b = tokens[3];
+       struct field *fa, *fb;
+       uint32_t a_struct_id, b_struct_id, b_val;
+
+       CHECK(n_tokens == 4, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       fa = struct_field_parse(p, action, a, &a_struct_id);
+       CHECK(fa, EINVAL);
+
+       /* JMP_EQ or JMP_EQ_S. */
+       fb = struct_field_parse(p, action, b, &b_struct_id);
+       if (fb) {
+               instr->type = INSTR_JMP_EQ;
+               if ((a[0] == 'h' && b[0] != 'h') ||
+                   (a[0] != 'h' && b[0] == 'h'))
+                       instr->type = INSTR_JMP_EQ_S;
+               instr->jmp.ip = NULL; /* Resolved later. */
+
+               instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+               instr->jmp.a.n_bits = fa->n_bits;
+               instr->jmp.a.offset = fa->offset / 8;
+               instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+               instr->jmp.b.n_bits = fb->n_bits;
+               instr->jmp.b.offset = fb->offset / 8;
+               return 0;
+       }
+
+       /* JMP_EQ_I. */
+       b_val = strtoul(b, &b, 0);
+       CHECK(!b[0], EINVAL);
+
+       if (a[0] == 'h')
+               b_val = htonl(b_val);
+
+       instr->type = INSTR_JMP_EQ_I;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+       instr->jmp.a.n_bits = fa->n_bits;
+       instr->jmp.a.offset = fa->offset / 8;
+       instr->jmp.b_val = (uint32_t)b_val;
+       return 0;
+}
+
+static int
+instr_jmp_neq_translate(struct rte_swx_pipeline *p,
+                       struct action *action,
+                       char **tokens,
+                       int n_tokens,
+                       struct instruction *instr,
+                       struct instruction_data *data)
+{
+       char *a = tokens[2], *b = tokens[3];
+       struct field *fa, *fb;
+       uint32_t a_struct_id, b_struct_id, b_val;
+
+       CHECK(n_tokens == 4, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       fa = struct_field_parse(p, action, a, &a_struct_id);
+       CHECK(fa, EINVAL);
+
+       /* JMP_NEQ or JMP_NEQ_S. */
+       fb = struct_field_parse(p, action, b, &b_struct_id);
+       if (fb) {
+               instr->type = INSTR_JMP_NEQ;
+               if ((a[0] == 'h' && b[0] != 'h') ||
+                   (a[0] != 'h' && b[0] == 'h'))
+                       instr->type = INSTR_JMP_NEQ_S;
+               instr->jmp.ip = NULL; /* Resolved later. */
+
+               instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+               instr->jmp.a.n_bits = fa->n_bits;
+               instr->jmp.a.offset = fa->offset / 8;
+               instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+               instr->jmp.b.n_bits = fb->n_bits;
+               instr->jmp.b.offset = fb->offset / 8;
+               return 0;
+       }
+
+       /* JMP_NEQ_I. */
+       b_val = strtoul(b, &b, 0);
+       CHECK(!b[0], EINVAL);
+
+       if (a[0] == 'h')
+               b_val = htonl(b_val);
+
+       instr->type = INSTR_JMP_NEQ_I;
+       instr->jmp.ip = NULL; /* Resolved later. */
+       instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+       instr->jmp.a.n_bits = fa->n_bits;
+       instr->jmp.a.offset = fa->offset / 8;
+       instr->jmp.b_val = (uint32_t)b_val;
+       return 0;
+}
+
+static int
+instr_jmp_lt_translate(struct rte_swx_pipeline *p,
+                      struct action *action,
+                      char **tokens,
+                      int n_tokens,
+                      struct instruction *instr,
+                      struct instruction_data *data)
+{
+       char *a = tokens[2], *b = tokens[3];
+       struct field *fa, *fb;
+       uint32_t a_struct_id, b_struct_id, b_val;
+
+       CHECK(n_tokens == 4, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       fa = struct_field_parse(p, action, a, &a_struct_id);
+       CHECK(fa, EINVAL);
+
+       /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
+       fb = struct_field_parse(p, action, b, &b_struct_id);
+       if (fb) {
+               instr->type = INSTR_JMP_LT;
+               if (a[0] == 'h' && b[0] == 'm')
+                       instr->type = INSTR_JMP_LT_HM;
+               if (a[0] == 'm' && b[0] == 'h')
+                       instr->type = INSTR_JMP_LT_MH;
+               if (a[0] == 'h' && b[0] == 'h')
+                       instr->type = INSTR_JMP_LT_HH;
+               instr->jmp.ip = NULL; /* Resolved later. */
+
+               instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+               instr->jmp.a.n_bits = fa->n_bits;
+               instr->jmp.a.offset = fa->offset / 8;
+               instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+               instr->jmp.b.n_bits = fb->n_bits;
+               instr->jmp.b.offset = fb->offset / 8;
+               return 0;
+       }
+
+       /* JMP_LT_MI, JMP_LT_HI. */
+       b_val = strtoul(b, &b, 0);
+       CHECK(!b[0], EINVAL);
+
+       instr->type = INSTR_JMP_LT_MI;
+       if (a[0] == 'h')
+               instr->type = INSTR_JMP_LT_HI;
+       instr->jmp.ip = NULL; /* Resolved later. */
+
+       instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+       instr->jmp.a.n_bits = fa->n_bits;
+       instr->jmp.a.offset = fa->offset / 8;
+       instr->jmp.b_val = (uint32_t)b_val;
+       return 0;
+}
+
+static int
+instr_jmp_gt_translate(struct rte_swx_pipeline *p,
+                      struct action *action,
+                      char **tokens,
+                      int n_tokens,
+                      struct instruction *instr,
+                      struct instruction_data *data)
+{
+       char *a = tokens[2], *b = tokens[3];
+       struct field *fa, *fb;
+       uint32_t a_struct_id, b_struct_id, b_val;
+
+       CHECK(n_tokens == 4, EINVAL);
+
+       strcpy(data->jmp_label, tokens[1]);
+
+       fa = struct_field_parse(p, action, a, &a_struct_id);
+       CHECK(fa, EINVAL);
+
+       /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
+       fb = struct_field_parse(p, action, b, &b_struct_id);
+       if (fb) {
+               instr->type = INSTR_JMP_GT;
+               if (a[0] == 'h' && b[0] == 'm')
+                       instr->type = INSTR_JMP_GT_HM;
+               if (a[0] == 'm' && b[0] == 'h')
+                       instr->type = INSTR_JMP_GT_MH;
+               if (a[0] == 'h' && b[0] == 'h')
+                       instr->type = INSTR_JMP_GT_HH;
+               instr->jmp.ip = NULL; /* Resolved later. */
+
+               instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+               instr->jmp.a.n_bits = fa->n_bits;
+               instr->jmp.a.offset = fa->offset / 8;
+               instr->jmp.b.struct_id = (uint8_t)b_struct_id;
+               instr->jmp.b.n_bits = fb->n_bits;
+               instr->jmp.b.offset = fb->offset / 8;
+               return 0;
+       }
+
+       /* JMP_GT_MI, JMP_GT_HI. */
+       b_val = strtoul(b, &b, 0);
+       CHECK(!b[0], EINVAL);
+
+       instr->type = INSTR_JMP_GT_MI;
+       if (a[0] == 'h')
+               instr->type = INSTR_JMP_GT_HI;
+       instr->jmp.ip = NULL; /* Resolved later. */
+
+       instr->jmp.a.struct_id = (uint8_t)a_struct_id;
+       instr->jmp.a.n_bits = fa->n_bits;
+       instr->jmp.a.offset = fa->offset / 8;
+       instr->jmp.b_val = (uint32_t)b_val;
+       return 0;
+}
+
+static inline void
+instr_jmp_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmp\n", p->thread_id);
+
+       thread_ip_set(t, ip->jmp.ip);
+}
+
+static inline void
+instr_jmp_valid_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t header_id = ip->jmp.header_id;
+
+       TRACE("[Thread %2u] jmpv\n", p->thread_id);
+
+       t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
+}
+
+static inline void
+instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       uint32_t header_id = ip->jmp.header_id;
+
+       TRACE("[Thread %2u] jmpnv\n", p->thread_id);
+
+       t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
+}
+
+static inline void
+instr_jmp_hit_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
+
+       TRACE("[Thread %2u] jmph\n", p->thread_id);
+
+       t->ip = ip_next[t->hit];
+}
+
+static inline void
+instr_jmp_miss_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
+
+       TRACE("[Thread %2u] jmpnh\n", p->thread_id);
+
+       t->ip = ip_next[t->hit];
+}
+
+static inline void
+instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpa\n", p->thread_id);
+
+       t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
+}
+
+static inline void
+instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpna\n", p->thread_id);
+
+       t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
+}
+
+static inline void
+instr_jmp_eq_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpeq\n", p->thread_id);
+
+       JMP_CMP(t, ip, ==);
+}
+
+static inline void
+instr_jmp_eq_s_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id);
+
+       JMP_CMP_S(t, ip, ==);
+}
+
+static inline void
+instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
+
+       JMP_CMP_I(t, ip, ==);
+}
+
+static inline void
+instr_jmp_neq_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpneq\n", p->thread_id);
+
+       JMP_CMP(t, ip, !=);
+}
+
+static inline void
+instr_jmp_neq_s_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id);
+
+       JMP_CMP_S(t, ip, !=);
+}
+
+static inline void
+instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
+
+       JMP_CMP_I(t, ip, !=);
+}
+
+static inline void
+instr_jmp_lt_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmplt\n", p->thread_id);
+
+       JMP_CMP(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
+
+       JMP_CMP_MH(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
+
+       JMP_CMP_HM(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
+
+       JMP_CMP_HH(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
+
+       JMP_CMP_MI(t, ip, <);
+}
+
+static inline void
+instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
+
+       JMP_CMP_HI(t, ip, <);
+}
+
+static inline void
+instr_jmp_gt_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpgt\n", p->thread_id);
+
+       JMP_CMP(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
+
+       JMP_CMP_MH(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
+
+       JMP_CMP_HM(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
+
+       JMP_CMP_HH(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
+
+       JMP_CMP_MI(t, ip, >);
+}
+
+static inline void
+instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+
+       TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
+
+       JMP_CMP_HI(t, ip, >);
+}
+
+/*
+ * return.
+ */
+static int
+instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
+                      struct action *action,
+                      char **tokens __rte_unused,
+                      int n_tokens,
+                      struct instruction *instr,
+                      struct instruction_data *data __rte_unused)
+{
+       CHECK(action, EINVAL);
+       CHECK(n_tokens == 1, EINVAL);
+
+       instr->type = INSTR_RETURN;
+       return 0;
+}
+
+static inline void
+instr_return_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+
+       TRACE("[Thread %2u] return\n", p->thread_id);
+
+       t->ip = t->ret;
+}
+
+#define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
+
+static int
+instr_translate(struct rte_swx_pipeline *p,
+               struct action *action,
+               char *string,
+               struct instruction *instr,
+               struct instruction_data *data)
+{
+       char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
+       int n_tokens = 0, tpos = 0;
+
+       /* Parse the instruction string into tokens. */
+       for ( ; ; ) {
+               char *token;
+
+               token = strtok_r(string, " \t\v", &string);
+               if (!token)
+                       break;
+
+               CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
+
+               tokens[n_tokens] = token;
+               n_tokens++;
+       }
+
+       CHECK(n_tokens, EINVAL);
+
+       /* Handle the optional instruction label. */
+       if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
+               strcpy(data->label, tokens[0]);
+
+               tpos += 2;
+               CHECK(n_tokens - tpos, EINVAL);
+       }
+
+       /* Identify the instruction type. */
+       if (!strcmp(tokens[tpos], "rx"))
+               return instr_rx_translate(p,
+                                         action,
+                                         &tokens[tpos],
+                                         n_tokens - tpos,
+                                         instr,
+                                         data);
+
+       if (!strcmp(tokens[tpos], "tx"))
+               return instr_tx_translate(p,
+                                         action,
+                                         &tokens[tpos],
+                                         n_tokens - tpos,
+                                         instr,
+                                         data);
+
+       if (!strcmp(tokens[tpos], "extract"))
+               return instr_hdr_extract_translate(p,
+                                                  action,
+                                                  &tokens[tpos],
+                                                  n_tokens - tpos,
+                                                  instr,
+                                                  data);
+
+       if (!strcmp(tokens[tpos], "emit"))
+               return instr_hdr_emit_translate(p,
+                                               action,
+                                               &tokens[tpos],
+                                               n_tokens - tpos,
+                                               instr,
+                                               data);
+
+       if (!strcmp(tokens[tpos], "validate"))
+               return instr_hdr_validate_translate(p,
+                                                   action,
+                                                   &tokens[tpos],
+                                                   n_tokens - tpos,
+                                                   instr,
+                                                   data);
+
+       if (!strcmp(tokens[tpos], "invalidate"))
+               return instr_hdr_invalidate_translate(p,
+                                                     action,
+                                                     &tokens[tpos],
+                                                     n_tokens - tpos,
+                                                     instr,
+                                                     data);
+
+       if (!strcmp(tokens[tpos], "mov"))
+               return instr_mov_translate(p,
+                                          action,
+                                          &tokens[tpos],
+                                          n_tokens - tpos,
+                                          instr,
+                                          data);
+
+       if (!strcmp(tokens[tpos], "dma"))
+               return instr_dma_translate(p,
+                                          action,
+                                          &tokens[tpos],
+                                          n_tokens - tpos,
+                                          instr,
+                                          data);
+
+       if (!strcmp(tokens[tpos], "add"))
+               return instr_alu_add_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "sub"))
+               return instr_alu_sub_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "ckadd"))
+               return instr_alu_ckadd_translate(p,
+                                                action,
+                                                &tokens[tpos],
+                                                n_tokens - tpos,
+                                                instr,
+                                                data);
+
+       if (!strcmp(tokens[tpos], "cksub"))
+               return instr_alu_cksub_translate(p,
+                                                action,
+                                                &tokens[tpos],
+                                                n_tokens - tpos,
+                                                instr,
+                                                data);
+
+       if (!strcmp(tokens[tpos], "and"))
+               return instr_alu_and_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "or"))
+               return instr_alu_or_translate(p,
+                                             action,
+                                             &tokens[tpos],
+                                             n_tokens - tpos,
+                                             instr,
+                                             data);
+
+       if (!strcmp(tokens[tpos], "xor"))
+               return instr_alu_xor_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "shl"))
+               return instr_alu_shl_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "shr"))
+               return instr_alu_shr_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "table"))
+               return instr_table_translate(p,
+                                            action,
+                                            &tokens[tpos],
+                                            n_tokens - tpos,
+                                            instr,
+                                            data);
+
+       if (!strcmp(tokens[tpos], "extern"))
+               return instr_extern_translate(p,
+                                             action,
+                                             &tokens[tpos],
+                                             n_tokens - tpos,
+                                             instr,
+                                             data);
+
+       if (!strcmp(tokens[tpos], "jmp"))
+               return instr_jmp_translate(p,
+                                          action,
+                                          &tokens[tpos],
+                                          n_tokens - tpos,
+                                          instr,
+                                          data);
+
+       if (!strcmp(tokens[tpos], "jmpv"))
+               return instr_jmp_valid_translate(p,
+                                                action,
+                                                &tokens[tpos],
+                                                n_tokens - tpos,
+                                                instr,
+                                                data);
+
+       if (!strcmp(tokens[tpos], "jmpnv"))
+               return instr_jmp_invalid_translate(p,
+                                                  action,
+                                                  &tokens[tpos],
+                                                  n_tokens - tpos,
+                                                  instr,
+                                                  data);
+
+       if (!strcmp(tokens[tpos], "jmph"))
+               return instr_jmp_hit_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "jmpnh"))
+               return instr_jmp_miss_translate(p,
+                                               action,
+                                               &tokens[tpos],
+                                               n_tokens - tpos,
+                                               instr,
+                                               data);
+
+       if (!strcmp(tokens[tpos], "jmpa"))
+               return instr_jmp_action_hit_translate(p,
+                                                     action,
+                                                     &tokens[tpos],
+                                                     n_tokens - tpos,
+                                                     instr,
+                                                     data);
+
+       if (!strcmp(tokens[tpos], "jmpna"))
+               return instr_jmp_action_miss_translate(p,
+                                                      action,
+                                                      &tokens[tpos],
+                                                      n_tokens - tpos,
+                                                      instr,
+                                                      data);
+
+       if (!strcmp(tokens[tpos], "jmpeq"))
+               return instr_jmp_eq_translate(p,
+                                             action,
+                                             &tokens[tpos],
+                                             n_tokens - tpos,
+                                             instr,
+                                             data);
+
+       if (!strcmp(tokens[tpos], "jmpneq"))
+               return instr_jmp_neq_translate(p,
+                                              action,
+                                              &tokens[tpos],
+                                              n_tokens - tpos,
+                                              instr,
+                                              data);
+
+       if (!strcmp(tokens[tpos], "jmplt"))
+               return instr_jmp_lt_translate(p,
+                                             action,
+                                             &tokens[tpos],
+                                             n_tokens - tpos,
+                                             instr,
+                                             data);
+
+       if (!strcmp(tokens[tpos], "jmpgt"))
+               return instr_jmp_gt_translate(p,
+                                             action,
+                                             &tokens[tpos],
+                                             n_tokens - tpos,
+                                             instr,
+                                             data);
+
+       if (!strcmp(tokens[tpos], "return"))
+               return instr_return_translate(p,
+                                             action,
+                                             &tokens[tpos],
+                                             n_tokens - tpos,
+                                             instr,
+                                             data);
+
+       CHECK(0, EINVAL);
+}
+
+static struct instruction_data *
+label_find(struct instruction_data *data, uint32_t n, const char *label)
+{
+       uint32_t i;
+
+       for (i = 0; i < n; i++)
+               if (!strcmp(label, data[i].label))
+                       return &data[i];
+
+       return NULL;
+}
+
+static uint32_t
+label_is_used(struct instruction_data *data, uint32_t n, const char *label)
+{
+       uint32_t count = 0, i;
+
+       if (!label[0])
+               return 0;
+
+       for (i = 0; i < n; i++)
+               if (!strcmp(label, data[i].jmp_label))
+                       count++;
+
+       return count;
+}
+
+static int
+instr_label_check(struct instruction_data *instruction_data,
+                 uint32_t n_instructions)
+{
+       uint32_t i;
+
+       /* Check that all instruction labels are unique. */
+       for (i = 0; i < n_instructions; i++) {
+               struct instruction_data *data = &instruction_data[i];
+               char *label = data->label;
+               uint32_t j;
+
+               if (!label[0])
+                       continue;
+
+               for (j = i + 1; j < n_instructions; j++)
+                       CHECK(strcmp(label, data[j].label), EINVAL);
+       }
+
+       /* Get users for each instruction label. */
+       for (i = 0; i < n_instructions; i++) {
+               struct instruction_data *data = &instruction_data[i];
+               char *label = data->label;
+
+               data->n_users = label_is_used(instruction_data,
+                                             n_instructions,
+                                             label);
+       }
+
+       return 0;
+}
+
+static int
+instr_jmp_resolve(struct instruction *instructions,
+                 struct instruction_data *instruction_data,
+                 uint32_t n_instructions)
+{
+       uint32_t i;
+
+       for (i = 0; i < n_instructions; i++) {
+               struct instruction *instr = &instructions[i];
+               struct instruction_data *data = &instruction_data[i];
+               struct instruction_data *found;
+
+               if (!instruction_is_jmp(instr))
+                       continue;
+
+               found = label_find(instruction_data,
+                                  n_instructions,
+                                  data->jmp_label);
+               CHECK(found, EINVAL);
+
+               instr->jmp.ip = &instr[found - instruction_data];
+       }
+
+       return 0;
+}
+
+static int
+instr_verify(struct rte_swx_pipeline *p __rte_unused,
+            struct action *a,
+            struct instruction *instr,
+            struct instruction_data *data __rte_unused,
+            uint32_t n_instructions)
+{
+       if (!a) {
+               enum instruction_type type;
+               uint32_t i;
+
+               /* Check that the first instruction is rx. */
+               CHECK(instr[0].type == INSTR_RX, EINVAL);
+
+               /* Check that there is at least one tx instruction. */
+               for (i = 0; i < n_instructions; i++) {
+                       type = instr[i].type;
+
+                       if (instr[i].type == INSTR_TX)
+                               break;
+               }
+               CHECK(i < n_instructions, EINVAL);
+
+               /* Check that the last instruction is either tx or unconditional
+                * jump.
+                */
+               type = instr[n_instructions - 1].type;
+               CHECK((type == INSTR_TX) || (type == INSTR_JMP), EINVAL);
+       }
+
+       if (a) {
+               enum instruction_type type;
+               uint32_t i;
+
+               /* Check that there is at least one return or tx instruction. */
+               for (i = 0; i < n_instructions; i++) {
+                       type = instr[i].type;
+
+                       if ((type == INSTR_RETURN) || (type == INSTR_TX))
+                               break;
+               }
+               CHECK(i < n_instructions, EINVAL);
+       }
+
+       return 0;
+}
+
+static int
+instr_pattern_extract_many_detect(struct instruction *instr,
+                                 struct instruction_data *data,
+                                 uint32_t n_instr,
+                                 uint32_t *n_pattern_instr)
+{
+       uint32_t i;
+
+       for (i = 0; i < n_instr; i++) {
+               if (data[i].invalid)
+                       break;
+
+               if (instr[i].type != INSTR_HDR_EXTRACT)
+                       break;
+
+               if (i == RTE_DIM(instr->io.hdr.header_id))
+                       break;
+
+               if (i && data[i].n_users)
+                       break;
+       }
+
+       if (i < 2)
+               return 0;
+
+       *n_pattern_instr = i;
+       return 1;
+}
+
+static void
+instr_pattern_extract_many_optimize(struct instruction *instr,
+                                   struct instruction_data *data,
+                                   uint32_t n_instr)
+{
+       uint32_t i;
+
+       for (i = 1; i < n_instr; i++) {
+               instr[0].type++;
+               instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
+               instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
+               instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
+
+               data[i].invalid = 1;
+       }
+}
+
+static int
+instr_pattern_emit_many_tx_detect(struct instruction *instr,
+                                 struct instruction_data *data,
+                                 uint32_t n_instr,
+                                 uint32_t *n_pattern_instr)
+{
+       uint32_t i;
+
+       for (i = 0; i < n_instr; i++) {
+               if (data[i].invalid)
+                       break;
+
+               if (instr[i].type != INSTR_HDR_EMIT)
+                       break;
+
+               if (i == RTE_DIM(instr->io.hdr.header_id))
+                       break;
+
+               if (i && data[i].n_users)
+                       break;
+       }
+
+       if (!i)
+               return 0;
+
+       if (instr[i].type != INSTR_TX)
+               return 0;
+
+       i++;
+
+       *n_pattern_instr = i;
+       return 1;
+}
+
+static void
+instr_pattern_emit_many_tx_optimize(struct instruction *instr,
+                                   struct instruction_data *data,
+                                   uint32_t n_instr)
+{
+       uint32_t i;
+
+       /* Any emit instruction in addition to the first one. */
+       for (i = 1; i < n_instr - 1; i++) {
+               instr[0].type++;
+               instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
+               instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
+               instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
+
+               data[i].invalid = 1;
+       }
+
+       /* The TX instruction is the last one in the pattern. */
+       instr[0].type++;
+       instr[0].io.io.offset = instr[i].io.io.offset;
+       instr[0].io.io.n_bits = instr[i].io.io.n_bits;
+       data[i].invalid = 1;
+}
+
+static int
+instr_pattern_dma_many_detect(struct instruction *instr,
+                             struct instruction_data *data,
+                             uint32_t n_instr,
+                             uint32_t *n_pattern_instr)
+{
+       uint32_t i;
+
+       for (i = 0; i < n_instr; i++) {
+               if (data[i].invalid)
+                       break;
+
+               if (instr[i].type != INSTR_DMA_HT)
+                       break;
+
+               if (i == RTE_DIM(instr->dma.dst.header_id))
+                       break;
+
+               if (i && data[i].n_users)
+                       break;
+       }
+
+       if (i < 2)
+               return 0;
+
+       *n_pattern_instr = i;
+       return 1;
+}
+
+static void
+instr_pattern_dma_many_optimize(struct instruction *instr,
+                               struct instruction_data *data,
+                               uint32_t n_instr)
+{
+       uint32_t i;
+
+       for (i = 1; i < n_instr; i++) {
+               instr[0].type++;
+               instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
+               instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
+               instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
+               instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
+
+               data[i].invalid = 1;
+       }
+}
+
+static uint32_t
+instr_optimize(struct instruction *instructions,
+              struct instruction_data *instruction_data,
+              uint32_t n_instructions)
+{
+       uint32_t i, pos = 0;
+
+       for (i = 0; i < n_instructions; ) {
+               struct instruction *instr = &instructions[i];
+               struct instruction_data *data = &instruction_data[i];
+               uint32_t n_instr = 0;
+               int detected;
+
+               /* Extract many. */
+               detected = instr_pattern_extract_many_detect(instr,
+                                                            data,
+                                                            n_instructions - i,
+                                                            &n_instr);
+               if (detected) {
+                       instr_pattern_extract_many_optimize(instr,
+                                                           data,
+                                                           n_instr);
+                       i += n_instr;
+                       continue;
+               }
+
+               /* Emit many + TX. */
+               detected = instr_pattern_emit_many_tx_detect(instr,
+                                                            data,
+                                                            n_instructions - i,
+                                                            &n_instr);
+               if (detected) {
+                       instr_pattern_emit_many_tx_optimize(instr,
+                                                           data,
+                                                           n_instr);
+                       i += n_instr;
+                       continue;
+               }
+
+               /* DMA many. */
+               detected = instr_pattern_dma_many_detect(instr,
+                                                        data,
+                                                        n_instructions - i,
+                                                        &n_instr);
+               if (detected) {
+                       instr_pattern_dma_many_optimize(instr, data, n_instr);
+                       i += n_instr;
+                       continue;
+               }
+
+               /* No pattern starting at the current instruction. */
+               i++;
+       }
+
+       /* Eliminate the invalid instructions that have been optimized out. */
+       for (i = 0; i < n_instructions; i++) {
+               struct instruction *instr = &instructions[i];
+               struct instruction_data *data = &instruction_data[i];
+
+               if (data->invalid)
+                       continue;
+
+               if (i != pos) {
+                       memcpy(&instructions[pos], instr, sizeof(*instr));
+                       memcpy(&instruction_data[pos], data, sizeof(*data));
+               }
+
+               pos++;
+       }
+
+       return pos;
+}
+
+static int
+instruction_config(struct rte_swx_pipeline *p,
+                  struct action *a,
+                  const char **instructions,
+                  uint32_t n_instructions)
+{
+       struct instruction *instr = NULL;
+       struct instruction_data *data = NULL;
+       char *string = NULL;
+       int err = 0;
+       uint32_t i;
+
+       CHECK(n_instructions, EINVAL);
+       CHECK(instructions, EINVAL);
+       for (i = 0; i < n_instructions; i++)
+               CHECK(instructions[i], EINVAL);
+
+       /* Memory allocation. */
+       instr = calloc(n_instructions, sizeof(struct instruction));
+       if (!instr) {
+               err = ENOMEM;
+               goto error;
+       }
+
+       data = calloc(n_instructions, sizeof(struct instruction_data));
+       if (!data) {
+               err = ENOMEM;
+               goto error;
+       }
+
+       for (i = 0; i < n_instructions; i++) {
+               string = strdup(instructions[i]);
+               if (!string) {
+                       err = ENOMEM;
+                       goto error;
+               }
+
+               err = instr_translate(p, a, string, &instr[i], &data[i]);
+               if (err)
+                       goto error;
+
+               free(string);
+       }
+
+       err = instr_label_check(data, n_instructions);
+       if (err)
+               goto error;
+
+       err = instr_verify(p, a, instr, data, n_instructions);
+       if (err)
+               goto error;
+
+       n_instructions = instr_optimize(instr, data, n_instructions);
+
+       err = instr_jmp_resolve(instr, data, n_instructions);
+       if (err)
+               goto error;
+
+       free(data);
+
+       if (a) {
+               a->instructions = instr;
+               a->n_instructions = n_instructions;
+       } else {
+               p->instructions = instr;
+               p->n_instructions = n_instructions;
+       }
+
+       return 0;
+
+error:
+       free(string);
+       free(data);
+       free(instr);
+       return err;
+}
+
+typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
+
+static instr_exec_t instruction_table[] = {
+       [INSTR_RX] = instr_rx_exec,
+       [INSTR_TX] = instr_tx_exec,
+
+       [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
+       [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
+       [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
+       [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
+       [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
+       [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
+       [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
+       [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
+
+       [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
+       [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
+       [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
+       [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
+       [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
+       [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
+       [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
+       [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
+       [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
+
+       [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
+       [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
+
+       [INSTR_MOV] = instr_mov_exec,
+       [INSTR_MOV_S] = instr_mov_s_exec,
+       [INSTR_MOV_I] = instr_mov_i_exec,
+
+       [INSTR_DMA_HT] = instr_dma_ht_exec,
+       [INSTR_DMA_HT2] = instr_dma_ht2_exec,
+       [INSTR_DMA_HT3] = instr_dma_ht3_exec,
+       [INSTR_DMA_HT4] = instr_dma_ht4_exec,
+       [INSTR_DMA_HT5] = instr_dma_ht5_exec,
+       [INSTR_DMA_HT6] = instr_dma_ht6_exec,
+       [INSTR_DMA_HT7] = instr_dma_ht7_exec,
+       [INSTR_DMA_HT8] = instr_dma_ht8_exec,
+
+       [INSTR_ALU_ADD] = instr_alu_add_exec,
+       [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
+       [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
+       [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
+       [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
+       [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
+
+       [INSTR_ALU_SUB] = instr_alu_sub_exec,
+       [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
+       [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
+       [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
+       [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
+       [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
+
+       [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
+       [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
+       [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
+       [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
+
+       [INSTR_ALU_AND] = instr_alu_and_exec,
+       [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
+       [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
+
+       [INSTR_ALU_OR] = instr_alu_or_exec,
+       [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
+       [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
+
+       [INSTR_ALU_XOR] = instr_alu_xor_exec,
+       [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
+       [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
+
+       [INSTR_ALU_SHL] = instr_alu_shl_exec,
+       [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
+       [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
+       [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
+       [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
+       [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
+
+       [INSTR_ALU_SHR] = instr_alu_shr_exec,
+       [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
+       [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
+       [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
+       [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
+       [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
+
+       [INSTR_TABLE] = instr_table_exec,
+       [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
+       [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
+
+       [INSTR_JMP] = instr_jmp_exec,
+       [INSTR_JMP_VALID] = instr_jmp_valid_exec,
+       [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
+       [INSTR_JMP_HIT] = instr_jmp_hit_exec,
+       [INSTR_JMP_MISS] = instr_jmp_miss_exec,
+       [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
+       [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
+
+       [INSTR_JMP_EQ] = instr_jmp_eq_exec,
+       [INSTR_JMP_EQ_S] = instr_jmp_eq_s_exec,
+       [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
+
+       [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
+       [INSTR_JMP_NEQ_S] = instr_jmp_neq_s_exec,
+       [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
+
+       [INSTR_JMP_LT] = instr_jmp_lt_exec,
+       [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
+       [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
+       [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
+       [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
+       [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
+
+       [INSTR_JMP_GT] = instr_jmp_gt_exec,
+       [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
+       [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
+       [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
+       [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
+       [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
+
+       [INSTR_RETURN] = instr_return_exec,
+};
+
+static inline void
+instr_exec(struct rte_swx_pipeline *p)
+{
+       struct thread *t = &p->threads[p->thread_id];
+       struct instruction *ip = t->ip;
+       instr_exec_t instr = instruction_table[ip->type];
+
+       instr(p);
+}
+
+/*
+ * Action.
+ */
+static struct action *
+action_find(struct rte_swx_pipeline *p, const char *name)
+{
+       struct action *elem;
+
+       if (!name)
+               return NULL;
+
+       TAILQ_FOREACH(elem, &p->actions, node)
+               if (strcmp(elem->name, name) == 0)
+                       return elem;
+
+       return NULL;
+}
+
+static struct field *
+action_field_find(struct action *a, const char *name)
+{
+       return a->st ? struct_type_field_find(a->st, name) : NULL;
+}
+
+static struct field *
+action_field_parse(struct action *action, const char *name)
+{
+       if (name[0] != 't' || name[1] != '.')
+               return NULL;
+
+       return action_field_find(action, &name[2]);
+}
+
+int
+rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
+                              const char *name,
+                              const char *args_struct_type_name,
+                              const char **instructions,
+                              uint32_t n_instructions)
+{
+       struct struct_type *args_struct_type;
        struct action *a;
        int err;
 
@@ -2226,6 +6930,15 @@ error:
        return status;
 }
 
+void
+rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
+{
+       uint32_t i;
+
+       for (i = 0; i < n_instructions; i++)
+               instr_exec(p);
+}
+
 /*
  * Control.
  */