1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
27 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
30 #define CHECK_INSTRUCTION(instr, err_code) \
33 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
34 RTE_SWX_INSTRUCTION_SIZE), \
42 #define TRACE(...) printf(__VA_ARGS__)
47 #define ntoh64(x) rte_be_to_cpu_64(x)
48 #define hton64(x) rte_cpu_to_be_64(x)
54 char name[RTE_SWX_NAME_SIZE];
60 TAILQ_ENTRY(struct_type) node;
61 char name[RTE_SWX_NAME_SIZE];
67 TAILQ_HEAD(struct_type_tailq, struct_type);
73 TAILQ_ENTRY(port_in_type) node;
74 char name[RTE_SWX_NAME_SIZE];
75 struct rte_swx_port_in_ops ops;
78 TAILQ_HEAD(port_in_type_tailq, port_in_type);
81 TAILQ_ENTRY(port_in) node;
82 struct port_in_type *type;
87 TAILQ_HEAD(port_in_tailq, port_in);
89 struct port_in_runtime {
90 rte_swx_port_in_pkt_rx_t pkt_rx;
97 struct port_out_type {
98 TAILQ_ENTRY(port_out_type) node;
99 char name[RTE_SWX_NAME_SIZE];
100 struct rte_swx_port_out_ops ops;
103 TAILQ_HEAD(port_out_type_tailq, port_out_type);
106 TAILQ_ENTRY(port_out) node;
107 struct port_out_type *type;
112 TAILQ_HEAD(port_out_tailq, port_out);
114 struct port_out_runtime {
115 rte_swx_port_out_pkt_tx_t pkt_tx;
116 rte_swx_port_out_flush_t flush;
123 struct extern_type_member_func {
124 TAILQ_ENTRY(extern_type_member_func) node;
125 char name[RTE_SWX_NAME_SIZE];
126 rte_swx_extern_type_member_func_t func;
130 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
133 TAILQ_ENTRY(extern_type) node;
134 char name[RTE_SWX_NAME_SIZE];
135 struct struct_type *mailbox_struct_type;
136 rte_swx_extern_type_constructor_t constructor;
137 rte_swx_extern_type_destructor_t destructor;
138 struct extern_type_member_func_tailq funcs;
142 TAILQ_HEAD(extern_type_tailq, extern_type);
145 TAILQ_ENTRY(extern_obj) node;
146 char name[RTE_SWX_NAME_SIZE];
147 struct extern_type *type;
153 TAILQ_HEAD(extern_obj_tailq, extern_obj);
155 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
156 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
159 struct extern_obj_runtime {
162 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
169 TAILQ_ENTRY(extern_func) node;
170 char name[RTE_SWX_NAME_SIZE];
171 struct struct_type *mailbox_struct_type;
172 rte_swx_extern_func_t func;
177 TAILQ_HEAD(extern_func_tailq, extern_func);
179 struct extern_func_runtime {
181 rte_swx_extern_func_t func;
188 TAILQ_ENTRY(header) node;
189 char name[RTE_SWX_NAME_SIZE];
190 struct struct_type *st;
195 TAILQ_HEAD(header_tailq, header);
197 struct header_runtime {
201 struct header_out_runtime {
211 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
212 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
213 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
214 * when transferred to packet meta-data and in NBO when transferred to packet
218 /* Notation conventions:
219 * -Header field: H = h.header.field (dst/src)
220 * -Meta-data field: M = m.field (dst/src)
221 * -Extern object mailbox field: E = e.field (dst/src)
222 * -Extern function mailbox field: F = f.field (dst/src)
223 * -Table action data field: T = t.field (src only)
224 * -Immediate value: I = 32-bit unsigned value (src only)
227 enum instruction_type {
234 /* extract h.header */
255 /* validate h.header */
258 /* invalidate h.header */
259 INSTR_HDR_INVALIDATE,
263 * dst = HMEF, src = HMEFTI
265 INSTR_MOV, /* dst = MEF, src = MEFT */
266 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
267 INSTR_MOV_I, /* dst = HMEF, src = I */
269 /* dma h.header t.field
270 * memcpy(h.header, t.field, sizeof(h.header))
283 * dst = HMEF, src = HMEFTI
285 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
286 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
287 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
288 INSTR_ALU_ADD_HH, /* dst = H, src = H */
289 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
290 INSTR_ALU_ADD_HI, /* dst = H, src = I */
294 * dst = HMEF, src = HMEFTI
296 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
297 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
298 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
299 INSTR_ALU_SUB_HH, /* dst = H, src = H */
300 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
301 INSTR_ALU_SUB_HI, /* dst = H, src = I */
304 * dst = dst '+ src[0:1] '+ src[2:3] + ...
305 * dst = H, src = {H, h.header}
307 INSTR_ALU_CKADD_FIELD, /* src = H */
308 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
309 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
315 INSTR_ALU_CKSUB_FIELD,
319 * dst = HMEF, src = HMEFTI
321 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
322 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
323 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
327 * dst = HMEF, src = HMEFTI
329 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
330 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
331 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
335 * dst = HMEF, src = HMEFTI
337 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
338 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
339 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
343 * dst = HMEF, src = HMEFTI
345 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
346 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
347 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
348 INSTR_ALU_SHL_HH, /* dst = H, src = H */
349 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
350 INSTR_ALU_SHL_HI, /* dst = H, src = I */
354 * dst = HMEF, src = HMEFTI
356 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
357 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
358 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
359 INSTR_ALU_SHR_HH, /* dst = H, src = H */
360 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
361 INSTR_ALU_SHR_HI, /* dst = H, src = I */
366 /* extern e.obj.func */
377 /* jmpv LABEL h.header
378 * Jump if header is valid
382 /* jmpnv LABEL h.header
383 * Jump if header is invalid
388 * Jump if table lookup hit
393 * Jump if table lookup miss
400 INSTR_JMP_ACTION_HIT,
402 /* jmpna LABEL ACTION
403 * Jump if action not run
405 INSTR_JMP_ACTION_MISS,
408 * Jump is a is equal to b
409 * a = HMEFT, b = HMEFTI
411 INSTR_JMP_EQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
412 INSTR_JMP_EQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
413 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
416 * Jump is a is not equal to b
417 * a = HMEFT, b = HMEFTI
419 INSTR_JMP_NEQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
420 INSTR_JMP_NEQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
421 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
424 * Jump if a is less than b
425 * a = HMEFT, b = HMEFTI
427 INSTR_JMP_LT, /* a = MEF, b = MEF */
428 INSTR_JMP_LT_MH, /* a = MEF, b = H */
429 INSTR_JMP_LT_HM, /* a = H, b = MEF */
430 INSTR_JMP_LT_HH, /* a = H, b = H */
431 INSTR_JMP_LT_MI, /* a = MEF, b = I */
432 INSTR_JMP_LT_HI, /* a = H, b = I */
435 * Jump if a is greater than b
436 * a = HMEFT, b = HMEFTI
438 INSTR_JMP_GT, /* a = MEF, b = MEF */
439 INSTR_JMP_GT_MH, /* a = MEF, b = H */
440 INSTR_JMP_GT_HM, /* a = H, b = MEF */
441 INSTR_JMP_GT_HH, /* a = H, b = H */
442 INSTR_JMP_GT_MI, /* a = MEF, b = I */
443 INSTR_JMP_GT_HI, /* a = H, b = I */
451 struct instr_operand {
466 uint8_t header_id[8];
467 uint8_t struct_id[8];
472 struct instr_hdr_validity {
480 struct instr_extern_obj {
485 struct instr_extern_func {
489 struct instr_dst_src {
490 struct instr_operand dst;
492 struct instr_operand src;
499 uint8_t header_id[8];
500 uint8_t struct_id[8];
511 struct instruction *ip;
514 struct instr_operand a;
520 struct instr_operand b;
526 enum instruction_type type;
529 struct instr_hdr_validity valid;
530 struct instr_dst_src mov;
531 struct instr_dma dma;
532 struct instr_dst_src alu;
533 struct instr_table table;
534 struct instr_extern_obj ext_obj;
535 struct instr_extern_func ext_func;
536 struct instr_jmp jmp;
540 struct instruction_data {
541 char label[RTE_SWX_NAME_SIZE];
542 char jmp_label[RTE_SWX_NAME_SIZE];
543 uint32_t n_users; /* user = jmp instruction to this instruction. */
551 TAILQ_ENTRY(action) node;
552 char name[RTE_SWX_NAME_SIZE];
553 struct struct_type *st;
554 struct instruction *instructions;
555 uint32_t n_instructions;
559 TAILQ_HEAD(action_tailq, action);
565 TAILQ_ENTRY(table_type) node;
566 char name[RTE_SWX_NAME_SIZE];
567 enum rte_swx_table_match_type match_type;
568 struct rte_swx_table_ops ops;
571 TAILQ_HEAD(table_type_tailq, table_type);
574 enum rte_swx_table_match_type match_type;
579 TAILQ_ENTRY(table) node;
580 char name[RTE_SWX_NAME_SIZE];
581 char args[RTE_SWX_NAME_SIZE];
582 struct table_type *type; /* NULL when n_fields == 0. */
585 struct match_field *fields;
587 int is_header; /* Only valid when n_fields > 0. */
588 struct header *header; /* Only valid when n_fields > 0. */
591 struct action **actions;
592 struct action *default_action;
593 uint8_t *default_action_data;
595 int default_action_is_const;
596 uint32_t action_data_size_max;
602 TAILQ_HEAD(table_tailq, table);
604 struct table_runtime {
605 rte_swx_table_lookup_t func;
615 struct rte_swx_pkt pkt;
621 /* Packet headers. */
622 struct header_runtime *headers; /* Extracted or generated headers. */
623 struct header_out_runtime *headers_out; /* Emitted headers. */
624 uint8_t *header_storage;
625 uint8_t *header_out_storage;
626 uint64_t valid_headers;
627 uint32_t n_headers_out;
629 /* Packet meta-data. */
633 struct table_runtime *tables;
634 struct rte_swx_table_state *table_state;
636 int hit; /* 0 = Miss, 1 = Hit. */
638 /* Extern objects and functions. */
639 struct extern_obj_runtime *extern_objs;
640 struct extern_func_runtime *extern_funcs;
643 struct instruction *ip;
644 struct instruction *ret;
647 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
648 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
649 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
651 #define HEADER_VALID(thread, header_id) \
652 MASK64_BIT_GET((thread)->valid_headers, header_id)
654 #define ALU(thread, ip, operator) \
656 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
657 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
658 uint64_t dst64 = *dst64_ptr; \
659 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
660 uint64_t dst = dst64 & dst64_mask; \
662 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
663 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
664 uint64_t src64 = *src64_ptr; \
665 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
666 uint64_t src = src64 & src64_mask; \
668 uint64_t result = dst operator src; \
670 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
673 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
675 #define ALU_S(thread, ip, operator) \
677 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
678 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
679 uint64_t dst64 = *dst64_ptr; \
680 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
681 uint64_t dst = dst64 & dst64_mask; \
683 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
684 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
685 uint64_t src64 = *src64_ptr; \
686 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
688 uint64_t result = dst operator src; \
690 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
695 #define ALU_HM(thread, ip, operator) \
697 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
698 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
699 uint64_t dst64 = *dst64_ptr; \
700 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
701 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
703 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
704 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
705 uint64_t src64 = *src64_ptr; \
706 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
707 uint64_t src = src64 & src64_mask; \
709 uint64_t result = dst operator src; \
710 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
712 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
715 #define ALU_HH(thread, ip, operator) \
717 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
718 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
719 uint64_t dst64 = *dst64_ptr; \
720 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
721 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
723 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
724 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
725 uint64_t src64 = *src64_ptr; \
726 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
728 uint64_t result = dst operator src; \
729 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
731 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
743 #define ALU_I(thread, ip, operator) \
745 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
746 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
747 uint64_t dst64 = *dst64_ptr; \
748 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
749 uint64_t dst = dst64 & dst64_mask; \
751 uint64_t src = (ip)->alu.src_val; \
753 uint64_t result = dst operator src; \
755 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
760 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
762 #define ALU_HI(thread, ip, operator) \
764 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
765 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
766 uint64_t dst64 = *dst64_ptr; \
767 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
768 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
770 uint64_t src = (ip)->alu.src_val; \
772 uint64_t result = dst operator src; \
773 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
775 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
784 #define MOV(thread, ip) \
786 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
787 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
788 uint64_t dst64 = *dst64_ptr; \
789 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
791 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
792 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
793 uint64_t src64 = *src64_ptr; \
794 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
795 uint64_t src = src64 & src64_mask; \
797 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
800 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
802 #define MOV_S(thread, ip) \
804 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
805 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
806 uint64_t dst64 = *dst64_ptr; \
807 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
809 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
810 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
811 uint64_t src64 = *src64_ptr; \
812 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
814 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
823 #define MOV_I(thread, ip) \
825 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
826 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
827 uint64_t dst64 = *dst64_ptr; \
828 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
830 uint64_t src = (ip)->mov.src_val; \
832 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
835 #define JMP_CMP(thread, ip, operator) \
837 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
838 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
839 uint64_t a64 = *a64_ptr; \
840 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
841 uint64_t a = a64 & a64_mask; \
843 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
844 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
845 uint64_t b64 = *b64_ptr; \
846 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
847 uint64_t b = b64 & b64_mask; \
849 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
852 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
854 #define JMP_CMP_S(thread, ip, operator) \
856 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
857 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
858 uint64_t a64 = *a64_ptr; \
859 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
860 uint64_t a = a64 & a64_mask; \
862 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
863 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
864 uint64_t b64 = *b64_ptr; \
865 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
867 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
870 #define JMP_CMP_MH JMP_CMP_S
872 #define JMP_CMP_HM(thread, ip, operator) \
874 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
875 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
876 uint64_t a64 = *a64_ptr; \
877 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
879 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
880 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
881 uint64_t b64 = *b64_ptr; \
882 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
883 uint64_t b = b64 & b64_mask; \
885 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
888 #define JMP_CMP_HH(thread, ip, operator) \
890 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
891 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
892 uint64_t a64 = *a64_ptr; \
893 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
895 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
896 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
897 uint64_t b64 = *b64_ptr; \
898 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
900 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
905 #define JMP_CMP_S JMP_CMP
906 #define JMP_CMP_MH JMP_CMP
907 #define JMP_CMP_HM JMP_CMP
908 #define JMP_CMP_HH JMP_CMP
912 #define JMP_CMP_I(thread, ip, operator) \
914 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
915 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
916 uint64_t a64 = *a64_ptr; \
917 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
918 uint64_t a = a64 & a64_mask; \
920 uint64_t b = (ip)->jmp.b_val; \
922 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
925 #define JMP_CMP_MI JMP_CMP_I
927 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
929 #define JMP_CMP_HI(thread, ip, operator) \
931 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
932 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
933 uint64_t a64 = *a64_ptr; \
934 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
936 uint64_t b = (ip)->jmp.b_val; \
938 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
943 #define JMP_CMP_HI JMP_CMP_I
947 #define METADATA_READ(thread, offset, n_bits) \
949 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
950 uint64_t m64 = *m64_ptr; \
951 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
955 #define METADATA_WRITE(thread, offset, n_bits, value) \
957 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
958 uint64_t m64 = *m64_ptr; \
959 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
961 uint64_t m_new = value; \
963 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
966 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
967 #define RTE_SWX_PIPELINE_THREADS_MAX 16
970 struct rte_swx_pipeline {
971 struct struct_type_tailq struct_types;
972 struct port_in_type_tailq port_in_types;
973 struct port_in_tailq ports_in;
974 struct port_out_type_tailq port_out_types;
975 struct port_out_tailq ports_out;
976 struct extern_type_tailq extern_types;
977 struct extern_obj_tailq extern_objs;
978 struct extern_func_tailq extern_funcs;
979 struct header_tailq headers;
980 struct struct_type *metadata_st;
981 uint32_t metadata_struct_id;
982 struct action_tailq actions;
983 struct table_type_tailq table_types;
984 struct table_tailq tables;
986 struct port_in_runtime *in;
987 struct port_out_runtime *out;
988 struct instruction **action_instructions;
989 struct rte_swx_table_state *table_state;
990 struct instruction *instructions;
991 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
995 uint32_t n_ports_out;
996 uint32_t n_extern_objs;
997 uint32_t n_extern_funcs;
1003 uint32_t n_instructions;
1011 static struct struct_type *
1012 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1014 struct struct_type *elem;
1016 TAILQ_FOREACH(elem, &p->struct_types, node)
1017 if (strcmp(elem->name, name) == 0)
1023 static struct field *
1024 struct_type_field_find(struct struct_type *st, const char *name)
1028 for (i = 0; i < st->n_fields; i++) {
1029 struct field *f = &st->fields[i];
1031 if (strcmp(f->name, name) == 0)
1039 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1041 struct rte_swx_field_params *fields,
1044 struct struct_type *st;
1048 CHECK_NAME(name, EINVAL);
1049 CHECK(fields, EINVAL);
1050 CHECK(n_fields, EINVAL);
1052 for (i = 0; i < n_fields; i++) {
1053 struct rte_swx_field_params *f = &fields[i];
1056 CHECK_NAME(f->name, EINVAL);
1057 CHECK(f->n_bits, EINVAL);
1058 CHECK(f->n_bits <= 64, EINVAL);
1059 CHECK((f->n_bits & 7) == 0, EINVAL);
1061 for (j = 0; j < i; j++) {
1062 struct rte_swx_field_params *f_prev = &fields[j];
1064 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1068 CHECK(!struct_type_find(p, name), EEXIST);
1070 /* Node allocation. */
1071 st = calloc(1, sizeof(struct struct_type));
1074 st->fields = calloc(n_fields, sizeof(struct field));
1080 /* Node initialization. */
1081 strcpy(st->name, name);
1082 for (i = 0; i < n_fields; i++) {
1083 struct field *dst = &st->fields[i];
1084 struct rte_swx_field_params *src = &fields[i];
1086 strcpy(dst->name, src->name);
1087 dst->n_bits = src->n_bits;
1088 dst->offset = st->n_bits;
1090 st->n_bits += src->n_bits;
1092 st->n_fields = n_fields;
1094 /* Node add to tailq. */
1095 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1101 struct_build(struct rte_swx_pipeline *p)
1105 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1106 struct thread *t = &p->threads[i];
1108 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1109 CHECK(t->structs, ENOMEM);
1116 struct_build_free(struct rte_swx_pipeline *p)
1120 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1121 struct thread *t = &p->threads[i];
1129 struct_free(struct rte_swx_pipeline *p)
1131 struct_build_free(p);
1135 struct struct_type *elem;
1137 elem = TAILQ_FIRST(&p->struct_types);
1141 TAILQ_REMOVE(&p->struct_types, elem, node);
1150 static struct port_in_type *
1151 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1153 struct port_in_type *elem;
1158 TAILQ_FOREACH(elem, &p->port_in_types, node)
1159 if (strcmp(elem->name, name) == 0)
1166 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1168 struct rte_swx_port_in_ops *ops)
1170 struct port_in_type *elem;
1173 CHECK_NAME(name, EINVAL);
1175 CHECK(ops->create, EINVAL);
1176 CHECK(ops->free, EINVAL);
1177 CHECK(ops->pkt_rx, EINVAL);
1178 CHECK(ops->stats_read, EINVAL);
1180 CHECK(!port_in_type_find(p, name), EEXIST);
1182 /* Node allocation. */
1183 elem = calloc(1, sizeof(struct port_in_type));
1184 CHECK(elem, ENOMEM);
1186 /* Node initialization. */
1187 strcpy(elem->name, name);
1188 memcpy(&elem->ops, ops, sizeof(*ops));
1190 /* Node add to tailq. */
1191 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1196 static struct port_in *
1197 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1199 struct port_in *port;
1201 TAILQ_FOREACH(port, &p->ports_in, node)
1202 if (port->id == port_id)
1209 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1211 const char *port_type_name,
1214 struct port_in_type *type = NULL;
1215 struct port_in *port = NULL;
1220 CHECK(!port_in_find(p, port_id), EINVAL);
1222 CHECK_NAME(port_type_name, EINVAL);
1223 type = port_in_type_find(p, port_type_name);
1224 CHECK(type, EINVAL);
1226 obj = type->ops.create(args);
1229 /* Node allocation. */
1230 port = calloc(1, sizeof(struct port_in));
1231 CHECK(port, ENOMEM);
1233 /* Node initialization. */
1238 /* Node add to tailq. */
1239 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1240 if (p->n_ports_in < port_id + 1)
1241 p->n_ports_in = port_id + 1;
1247 port_in_build(struct rte_swx_pipeline *p)
1249 struct port_in *port;
1252 CHECK(p->n_ports_in, EINVAL);
1253 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1255 for (i = 0; i < p->n_ports_in; i++)
1256 CHECK(port_in_find(p, i), EINVAL);
1258 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1259 CHECK(p->in, ENOMEM);
1261 TAILQ_FOREACH(port, &p->ports_in, node) {
1262 struct port_in_runtime *in = &p->in[port->id];
1264 in->pkt_rx = port->type->ops.pkt_rx;
1265 in->obj = port->obj;
1272 port_in_build_free(struct rte_swx_pipeline *p)
1279 port_in_free(struct rte_swx_pipeline *p)
1281 port_in_build_free(p);
1285 struct port_in *port;
1287 port = TAILQ_FIRST(&p->ports_in);
1291 TAILQ_REMOVE(&p->ports_in, port, node);
1292 port->type->ops.free(port->obj);
1296 /* Input port types. */
1298 struct port_in_type *elem;
1300 elem = TAILQ_FIRST(&p->port_in_types);
1304 TAILQ_REMOVE(&p->port_in_types, elem, node);
1312 static struct port_out_type *
1313 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1315 struct port_out_type *elem;
1320 TAILQ_FOREACH(elem, &p->port_out_types, node)
1321 if (!strcmp(elem->name, name))
1328 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1330 struct rte_swx_port_out_ops *ops)
1332 struct port_out_type *elem;
1335 CHECK_NAME(name, EINVAL);
1337 CHECK(ops->create, EINVAL);
1338 CHECK(ops->free, EINVAL);
1339 CHECK(ops->pkt_tx, EINVAL);
1340 CHECK(ops->stats_read, EINVAL);
1342 CHECK(!port_out_type_find(p, name), EEXIST);
1344 /* Node allocation. */
1345 elem = calloc(1, sizeof(struct port_out_type));
1346 CHECK(elem, ENOMEM);
1348 /* Node initialization. */
1349 strcpy(elem->name, name);
1350 memcpy(&elem->ops, ops, sizeof(*ops));
1352 /* Node add to tailq. */
1353 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1358 static struct port_out *
1359 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1361 struct port_out *port;
1363 TAILQ_FOREACH(port, &p->ports_out, node)
1364 if (port->id == port_id)
1371 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1373 const char *port_type_name,
1376 struct port_out_type *type = NULL;
1377 struct port_out *port = NULL;
1382 CHECK(!port_out_find(p, port_id), EINVAL);
1384 CHECK_NAME(port_type_name, EINVAL);
1385 type = port_out_type_find(p, port_type_name);
1386 CHECK(type, EINVAL);
1388 obj = type->ops.create(args);
1391 /* Node allocation. */
1392 port = calloc(1, sizeof(struct port_out));
1393 CHECK(port, ENOMEM);
1395 /* Node initialization. */
1400 /* Node add to tailq. */
1401 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1402 if (p->n_ports_out < port_id + 1)
1403 p->n_ports_out = port_id + 1;
1409 port_out_build(struct rte_swx_pipeline *p)
1411 struct port_out *port;
1414 CHECK(p->n_ports_out, EINVAL);
1416 for (i = 0; i < p->n_ports_out; i++)
1417 CHECK(port_out_find(p, i), EINVAL);
1419 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1420 CHECK(p->out, ENOMEM);
1422 TAILQ_FOREACH(port, &p->ports_out, node) {
1423 struct port_out_runtime *out = &p->out[port->id];
1425 out->pkt_tx = port->type->ops.pkt_tx;
1426 out->flush = port->type->ops.flush;
1427 out->obj = port->obj;
1434 port_out_build_free(struct rte_swx_pipeline *p)
1441 port_out_free(struct rte_swx_pipeline *p)
1443 port_out_build_free(p);
1447 struct port_out *port;
1449 port = TAILQ_FIRST(&p->ports_out);
1453 TAILQ_REMOVE(&p->ports_out, port, node);
1454 port->type->ops.free(port->obj);
1458 /* Output port types. */
1460 struct port_out_type *elem;
1462 elem = TAILQ_FIRST(&p->port_out_types);
1466 TAILQ_REMOVE(&p->port_out_types, elem, node);
1474 static struct extern_type *
1475 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1477 struct extern_type *elem;
1479 TAILQ_FOREACH(elem, &p->extern_types, node)
1480 if (strcmp(elem->name, name) == 0)
1486 static struct extern_type_member_func *
1487 extern_type_member_func_find(struct extern_type *type, const char *name)
1489 struct extern_type_member_func *elem;
1491 TAILQ_FOREACH(elem, &type->funcs, node)
1492 if (strcmp(elem->name, name) == 0)
1498 static struct extern_obj *
1499 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1501 struct extern_obj *elem;
1503 TAILQ_FOREACH(elem, &p->extern_objs, node)
1504 if (strcmp(elem->name, name) == 0)
1510 static struct extern_type_member_func *
1511 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1513 struct extern_obj **obj)
1515 struct extern_obj *object;
1516 struct extern_type_member_func *func;
1517 char *object_name, *func_name;
1519 if (name[0] != 'e' || name[1] != '.')
1522 object_name = strdup(&name[2]);
1526 func_name = strchr(object_name, '.');
1535 object = extern_obj_find(p, object_name);
1541 func = extern_type_member_func_find(object->type, func_name);
1554 static struct field *
1555 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1557 struct extern_obj **object)
1559 struct extern_obj *obj;
1561 char *obj_name, *field_name;
1563 if ((name[0] != 'e') || (name[1] != '.'))
1566 obj_name = strdup(&name[2]);
1570 field_name = strchr(obj_name, '.');
1579 obj = extern_obj_find(p, obj_name);
1585 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1599 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1601 const char *mailbox_struct_type_name,
1602 rte_swx_extern_type_constructor_t constructor,
1603 rte_swx_extern_type_destructor_t destructor)
1605 struct extern_type *elem;
1606 struct struct_type *mailbox_struct_type;
1610 CHECK_NAME(name, EINVAL);
1611 CHECK(!extern_type_find(p, name), EEXIST);
1613 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1614 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1615 CHECK(mailbox_struct_type, EINVAL);
1617 CHECK(constructor, EINVAL);
1618 CHECK(destructor, EINVAL);
1620 /* Node allocation. */
1621 elem = calloc(1, sizeof(struct extern_type));
1622 CHECK(elem, ENOMEM);
1624 /* Node initialization. */
1625 strcpy(elem->name, name);
1626 elem->mailbox_struct_type = mailbox_struct_type;
1627 elem->constructor = constructor;
1628 elem->destructor = destructor;
1629 TAILQ_INIT(&elem->funcs);
1631 /* Node add to tailq. */
1632 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1638 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1639 const char *extern_type_name,
1641 rte_swx_extern_type_member_func_t member_func)
1643 struct extern_type *type;
1644 struct extern_type_member_func *type_member;
1648 CHECK_NAME(extern_type_name, EINVAL);
1649 type = extern_type_find(p, extern_type_name);
1650 CHECK(type, EINVAL);
1651 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1653 CHECK_NAME(name, EINVAL);
1654 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1656 CHECK(member_func, EINVAL);
1658 /* Node allocation. */
1659 type_member = calloc(1, sizeof(struct extern_type_member_func));
1660 CHECK(type_member, ENOMEM);
1662 /* Node initialization. */
1663 strcpy(type_member->name, name);
1664 type_member->func = member_func;
1665 type_member->id = type->n_funcs;
1667 /* Node add to tailq. */
1668 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1675 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1676 const char *extern_type_name,
1680 struct extern_type *type;
1681 struct extern_obj *obj;
1686 CHECK_NAME(extern_type_name, EINVAL);
1687 type = extern_type_find(p, extern_type_name);
1688 CHECK(type, EINVAL);
1690 CHECK_NAME(name, EINVAL);
1691 CHECK(!extern_obj_find(p, name), EEXIST);
1693 /* Node allocation. */
1694 obj = calloc(1, sizeof(struct extern_obj));
1697 /* Object construction. */
1698 obj_handle = type->constructor(args);
1704 /* Node initialization. */
1705 strcpy(obj->name, name);
1707 obj->obj = obj_handle;
1708 obj->struct_id = p->n_structs;
1709 obj->id = p->n_extern_objs;
1711 /* Node add to tailq. */
1712 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1720 extern_obj_build(struct rte_swx_pipeline *p)
1724 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1725 struct thread *t = &p->threads[i];
1726 struct extern_obj *obj;
1728 t->extern_objs = calloc(p->n_extern_objs,
1729 sizeof(struct extern_obj_runtime));
1730 CHECK(t->extern_objs, ENOMEM);
1732 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1733 struct extern_obj_runtime *r =
1734 &t->extern_objs[obj->id];
1735 struct extern_type_member_func *func;
1736 uint32_t mailbox_size =
1737 obj->type->mailbox_struct_type->n_bits / 8;
1741 r->mailbox = calloc(1, mailbox_size);
1742 CHECK(r->mailbox, ENOMEM);
1744 TAILQ_FOREACH(func, &obj->type->funcs, node)
1745 r->funcs[func->id] = func->func;
1747 t->structs[obj->struct_id] = r->mailbox;
1755 extern_obj_build_free(struct rte_swx_pipeline *p)
1759 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1760 struct thread *t = &p->threads[i];
1763 if (!t->extern_objs)
1766 for (j = 0; j < p->n_extern_objs; j++) {
1767 struct extern_obj_runtime *r = &t->extern_objs[j];
1772 free(t->extern_objs);
1773 t->extern_objs = NULL;
1778 extern_obj_free(struct rte_swx_pipeline *p)
1780 extern_obj_build_free(p);
1782 /* Extern objects. */
1784 struct extern_obj *elem;
1786 elem = TAILQ_FIRST(&p->extern_objs);
1790 TAILQ_REMOVE(&p->extern_objs, elem, node);
1792 elem->type->destructor(elem->obj);
1798 struct extern_type *elem;
1800 elem = TAILQ_FIRST(&p->extern_types);
1804 TAILQ_REMOVE(&p->extern_types, elem, node);
1807 struct extern_type_member_func *func;
1809 func = TAILQ_FIRST(&elem->funcs);
1813 TAILQ_REMOVE(&elem->funcs, func, node);
1824 static struct extern_func *
1825 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1827 struct extern_func *elem;
1829 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1830 if (strcmp(elem->name, name) == 0)
1836 static struct extern_func *
1837 extern_func_parse(struct rte_swx_pipeline *p,
1840 if (name[0] != 'f' || name[1] != '.')
1843 return extern_func_find(p, &name[2]);
1846 static struct field *
1847 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1849 struct extern_func **function)
1851 struct extern_func *func;
1853 char *func_name, *field_name;
1855 if ((name[0] != 'f') || (name[1] != '.'))
1858 func_name = strdup(&name[2]);
1862 field_name = strchr(func_name, '.');
1871 func = extern_func_find(p, func_name);
1877 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1891 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1893 const char *mailbox_struct_type_name,
1894 rte_swx_extern_func_t func)
1896 struct extern_func *f;
1897 struct struct_type *mailbox_struct_type;
1901 CHECK_NAME(name, EINVAL);
1902 CHECK(!extern_func_find(p, name), EEXIST);
1904 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1905 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1906 CHECK(mailbox_struct_type, EINVAL);
1908 CHECK(func, EINVAL);
1910 /* Node allocation. */
1911 f = calloc(1, sizeof(struct extern_func));
1912 CHECK(func, ENOMEM);
1914 /* Node initialization. */
1915 strcpy(f->name, name);
1916 f->mailbox_struct_type = mailbox_struct_type;
1918 f->struct_id = p->n_structs;
1919 f->id = p->n_extern_funcs;
1921 /* Node add to tailq. */
1922 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1923 p->n_extern_funcs++;
1930 extern_func_build(struct rte_swx_pipeline *p)
1934 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1935 struct thread *t = &p->threads[i];
1936 struct extern_func *func;
1938 /* Memory allocation. */
1939 t->extern_funcs = calloc(p->n_extern_funcs,
1940 sizeof(struct extern_func_runtime));
1941 CHECK(t->extern_funcs, ENOMEM);
1943 /* Extern function. */
1944 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1945 struct extern_func_runtime *r =
1946 &t->extern_funcs[func->id];
1947 uint32_t mailbox_size =
1948 func->mailbox_struct_type->n_bits / 8;
1950 r->func = func->func;
1952 r->mailbox = calloc(1, mailbox_size);
1953 CHECK(r->mailbox, ENOMEM);
1955 t->structs[func->struct_id] = r->mailbox;
1963 extern_func_build_free(struct rte_swx_pipeline *p)
1967 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1968 struct thread *t = &p->threads[i];
1971 if (!t->extern_funcs)
1974 for (j = 0; j < p->n_extern_funcs; j++) {
1975 struct extern_func_runtime *r = &t->extern_funcs[j];
1980 free(t->extern_funcs);
1981 t->extern_funcs = NULL;
1986 extern_func_free(struct rte_swx_pipeline *p)
1988 extern_func_build_free(p);
1991 struct extern_func *elem;
1993 elem = TAILQ_FIRST(&p->extern_funcs);
1997 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2005 static struct header *
2006 header_find(struct rte_swx_pipeline *p, const char *name)
2008 struct header *elem;
2010 TAILQ_FOREACH(elem, &p->headers, node)
2011 if (strcmp(elem->name, name) == 0)
2017 static struct header *
2018 header_parse(struct rte_swx_pipeline *p,
2021 if (name[0] != 'h' || name[1] != '.')
2024 return header_find(p, &name[2]);
2027 static struct field *
2028 header_field_parse(struct rte_swx_pipeline *p,
2030 struct header **header)
2034 char *header_name, *field_name;
2036 if ((name[0] != 'h') || (name[1] != '.'))
2039 header_name = strdup(&name[2]);
2043 field_name = strchr(header_name, '.');
2052 h = header_find(p, header_name);
2058 f = struct_type_field_find(h->st, field_name);
2072 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2074 const char *struct_type_name)
2076 struct struct_type *st;
2078 size_t n_headers_max;
2081 CHECK_NAME(name, EINVAL);
2082 CHECK_NAME(struct_type_name, EINVAL);
2084 CHECK(!header_find(p, name), EEXIST);
2086 st = struct_type_find(p, struct_type_name);
2089 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2090 CHECK(p->n_headers < n_headers_max, ENOSPC);
2092 /* Node allocation. */
2093 h = calloc(1, sizeof(struct header));
2096 /* Node initialization. */
2097 strcpy(h->name, name);
2099 h->struct_id = p->n_structs;
2100 h->id = p->n_headers;
2102 /* Node add to tailq. */
2103 TAILQ_INSERT_TAIL(&p->headers, h, node);
2111 header_build(struct rte_swx_pipeline *p)
2114 uint32_t n_bytes = 0, i;
2116 TAILQ_FOREACH(h, &p->headers, node) {
2117 n_bytes += h->st->n_bits / 8;
2120 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2121 struct thread *t = &p->threads[i];
2122 uint32_t offset = 0;
2124 t->headers = calloc(p->n_headers,
2125 sizeof(struct header_runtime));
2126 CHECK(t->headers, ENOMEM);
2128 t->headers_out = calloc(p->n_headers,
2129 sizeof(struct header_out_runtime));
2130 CHECK(t->headers_out, ENOMEM);
2132 t->header_storage = calloc(1, n_bytes);
2133 CHECK(t->header_storage, ENOMEM);
2135 t->header_out_storage = calloc(1, n_bytes);
2136 CHECK(t->header_out_storage, ENOMEM);
2138 TAILQ_FOREACH(h, &p->headers, node) {
2139 uint8_t *header_storage;
2141 header_storage = &t->header_storage[offset];
2142 offset += h->st->n_bits / 8;
2144 t->headers[h->id].ptr0 = header_storage;
2145 t->structs[h->struct_id] = header_storage;
2153 header_build_free(struct rte_swx_pipeline *p)
2157 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2158 struct thread *t = &p->threads[i];
2160 free(t->headers_out);
2161 t->headers_out = NULL;
2166 free(t->header_out_storage);
2167 t->header_out_storage = NULL;
2169 free(t->header_storage);
2170 t->header_storage = NULL;
2175 header_free(struct rte_swx_pipeline *p)
2177 header_build_free(p);
2180 struct header *elem;
2182 elem = TAILQ_FIRST(&p->headers);
2186 TAILQ_REMOVE(&p->headers, elem, node);
2194 static struct field *
2195 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2197 if (!p->metadata_st)
2200 if (name[0] != 'm' || name[1] != '.')
2203 return struct_type_field_find(p->metadata_st, &name[2]);
2207 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2208 const char *struct_type_name)
2210 struct struct_type *st = NULL;
2214 CHECK_NAME(struct_type_name, EINVAL);
2215 st = struct_type_find(p, struct_type_name);
2217 CHECK(!p->metadata_st, EINVAL);
2219 p->metadata_st = st;
2220 p->metadata_struct_id = p->n_structs;
2228 metadata_build(struct rte_swx_pipeline *p)
2230 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2233 /* Thread-level initialization. */
2234 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2235 struct thread *t = &p->threads[i];
2238 metadata = calloc(1, n_bytes);
2239 CHECK(metadata, ENOMEM);
2241 t->metadata = metadata;
2242 t->structs[p->metadata_struct_id] = metadata;
2249 metadata_build_free(struct rte_swx_pipeline *p)
2253 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2254 struct thread *t = &p->threads[i];
2262 metadata_free(struct rte_swx_pipeline *p)
2264 metadata_build_free(p);
2271 instruction_is_jmp(struct instruction *instr)
2273 switch (instr->type) {
2275 case INSTR_JMP_VALID:
2276 case INSTR_JMP_INVALID:
2278 case INSTR_JMP_MISS:
2279 case INSTR_JMP_ACTION_HIT:
2280 case INSTR_JMP_ACTION_MISS:
2282 case INSTR_JMP_EQ_S:
2283 case INSTR_JMP_EQ_I:
2285 case INSTR_JMP_NEQ_S:
2286 case INSTR_JMP_NEQ_I:
2288 case INSTR_JMP_LT_MH:
2289 case INSTR_JMP_LT_HM:
2290 case INSTR_JMP_LT_HH:
2291 case INSTR_JMP_LT_MI:
2292 case INSTR_JMP_LT_HI:
2294 case INSTR_JMP_GT_MH:
2295 case INSTR_JMP_GT_HM:
2296 case INSTR_JMP_GT_HH:
2297 case INSTR_JMP_GT_MI:
2298 case INSTR_JMP_GT_HI:
2306 static struct field *
2307 action_field_parse(struct action *action, const char *name);
2309 static struct field *
2310 struct_field_parse(struct rte_swx_pipeline *p,
2311 struct action *action,
2313 uint32_t *struct_id)
2320 struct header *header;
2322 f = header_field_parse(p, name, &header);
2326 *struct_id = header->struct_id;
2332 f = metadata_field_parse(p, name);
2336 *struct_id = p->metadata_struct_id;
2345 f = action_field_parse(action, name);
2355 struct extern_obj *obj;
2357 f = extern_obj_mailbox_field_parse(p, name, &obj);
2361 *struct_id = obj->struct_id;
2367 struct extern_func *func;
2369 f = extern_func_mailbox_field_parse(p, name, &func);
2373 *struct_id = func->struct_id;
2383 pipeline_port_inc(struct rte_swx_pipeline *p)
2385 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2389 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2391 t->ip = p->instructions;
2395 thread_ip_set(struct thread *t, struct instruction *ip)
2401 thread_ip_action_call(struct rte_swx_pipeline *p,
2406 t->ip = p->action_instructions[action_id];
2410 thread_ip_inc(struct rte_swx_pipeline *p);
2413 thread_ip_inc(struct rte_swx_pipeline *p)
2415 struct thread *t = &p->threads[p->thread_id];
2421 thread_ip_inc_cond(struct thread *t, int cond)
2427 thread_yield(struct rte_swx_pipeline *p)
2429 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2433 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2435 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2442 instr_rx_translate(struct rte_swx_pipeline *p,
2443 struct action *action,
2446 struct instruction *instr,
2447 struct instruction_data *data __rte_unused)
2451 CHECK(!action, EINVAL);
2452 CHECK(n_tokens == 2, EINVAL);
2454 f = metadata_field_parse(p, tokens[1]);
2457 instr->type = INSTR_RX;
2458 instr->io.io.offset = f->offset / 8;
2459 instr->io.io.n_bits = f->n_bits;
2464 instr_rx_exec(struct rte_swx_pipeline *p);
2467 instr_rx_exec(struct rte_swx_pipeline *p)
2469 struct thread *t = &p->threads[p->thread_id];
2470 struct instruction *ip = t->ip;
2471 struct port_in_runtime *port = &p->in[p->port_id];
2472 struct rte_swx_pkt *pkt = &t->pkt;
2476 pkt_received = port->pkt_rx(port->obj, pkt);
2477 t->ptr = &pkt->pkt[pkt->offset];
2478 rte_prefetch0(t->ptr);
2480 TRACE("[Thread %2u] rx %s from port %u\n",
2482 pkt_received ? "1 pkt" : "0 pkts",
2486 t->valid_headers = 0;
2487 t->n_headers_out = 0;
2490 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2493 t->table_state = p->table_state;
2496 pipeline_port_inc(p);
2497 thread_ip_inc_cond(t, pkt_received);
2505 instr_tx_translate(struct rte_swx_pipeline *p,
2506 struct action *action __rte_unused,
2509 struct instruction *instr,
2510 struct instruction_data *data __rte_unused)
2514 CHECK(n_tokens == 2, EINVAL);
2516 f = metadata_field_parse(p, tokens[1]);
2519 instr->type = INSTR_TX;
2520 instr->io.io.offset = f->offset / 8;
2521 instr->io.io.n_bits = f->n_bits;
2526 emit_handler(struct thread *t)
2528 struct header_out_runtime *h0 = &t->headers_out[0];
2529 struct header_out_runtime *h1 = &t->headers_out[1];
2530 uint32_t offset = 0, i;
2532 /* No header change or header decapsulation. */
2533 if ((t->n_headers_out == 1) &&
2534 (h0->ptr + h0->n_bytes == t->ptr)) {
2535 TRACE("Emit handler: no header change or header decap.\n");
2537 t->pkt.offset -= h0->n_bytes;
2538 t->pkt.length += h0->n_bytes;
2543 /* Header encapsulation (optionally, with prior header decasulation). */
2544 if ((t->n_headers_out == 2) &&
2545 (h1->ptr + h1->n_bytes == t->ptr) &&
2546 (h0->ptr == h0->ptr0)) {
2549 TRACE("Emit handler: header encapsulation.\n");
2551 offset = h0->n_bytes + h1->n_bytes;
2552 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2553 t->pkt.offset -= offset;
2554 t->pkt.length += offset;
2559 /* Header insertion. */
2562 /* Header extraction. */
2565 /* For any other case. */
2566 TRACE("Emit handler: complex case.\n");
2568 for (i = 0; i < t->n_headers_out; i++) {
2569 struct header_out_runtime *h = &t->headers_out[i];
2571 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2572 offset += h->n_bytes;
2576 memcpy(t->ptr - offset, t->header_out_storage, offset);
2577 t->pkt.offset -= offset;
2578 t->pkt.length += offset;
2583 instr_tx_exec(struct rte_swx_pipeline *p);
2586 instr_tx_exec(struct rte_swx_pipeline *p)
2588 struct thread *t = &p->threads[p->thread_id];
2589 struct instruction *ip = t->ip;
2590 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2591 struct port_out_runtime *port = &p->out[port_id];
2592 struct rte_swx_pkt *pkt = &t->pkt;
2594 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2602 port->pkt_tx(port->obj, pkt);
2605 thread_ip_reset(p, t);
2613 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2614 struct action *action,
2617 struct instruction *instr,
2618 struct instruction_data *data __rte_unused)
2622 CHECK(!action, EINVAL);
2623 CHECK(n_tokens == 2, EINVAL);
2625 h = header_parse(p, tokens[1]);
2628 instr->type = INSTR_HDR_EXTRACT;
2629 instr->io.hdr.header_id[0] = h->id;
2630 instr->io.hdr.struct_id[0] = h->struct_id;
2631 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2636 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2639 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2641 struct thread *t = &p->threads[p->thread_id];
2642 struct instruction *ip = t->ip;
2643 uint64_t valid_headers = t->valid_headers;
2644 uint8_t *ptr = t->ptr;
2645 uint32_t offset = t->pkt.offset;
2646 uint32_t length = t->pkt.length;
2649 for (i = 0; i < n_extract; i++) {
2650 uint32_t header_id = ip->io.hdr.header_id[i];
2651 uint32_t struct_id = ip->io.hdr.struct_id[i];
2652 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2654 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2660 t->structs[struct_id] = ptr;
2661 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2670 t->valid_headers = valid_headers;
2673 t->pkt.offset = offset;
2674 t->pkt.length = length;
2679 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2681 __instr_hdr_extract_exec(p, 1);
2688 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2690 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2693 __instr_hdr_extract_exec(p, 2);
2700 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2702 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2705 __instr_hdr_extract_exec(p, 3);
2712 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2714 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2717 __instr_hdr_extract_exec(p, 4);
2724 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2726 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2729 __instr_hdr_extract_exec(p, 5);
2736 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2738 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2741 __instr_hdr_extract_exec(p, 6);
2748 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2750 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2753 __instr_hdr_extract_exec(p, 7);
2760 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2762 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2765 __instr_hdr_extract_exec(p, 8);
2775 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2776 struct action *action __rte_unused,
2779 struct instruction *instr,
2780 struct instruction_data *data __rte_unused)
2784 CHECK(n_tokens == 2, EINVAL);
2786 h = header_parse(p, tokens[1]);
2789 instr->type = INSTR_HDR_EMIT;
2790 instr->io.hdr.header_id[0] = h->id;
2791 instr->io.hdr.struct_id[0] = h->struct_id;
2792 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2797 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2800 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2802 struct thread *t = &p->threads[p->thread_id];
2803 struct instruction *ip = t->ip;
2804 uint32_t n_headers_out = t->n_headers_out;
2805 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2806 uint8_t *ho_ptr = NULL;
2807 uint32_t ho_nbytes = 0, i;
2809 for (i = 0; i < n_emit; i++) {
2810 uint32_t header_id = ip->io.hdr.header_id[i];
2811 uint32_t struct_id = ip->io.hdr.struct_id[i];
2812 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2814 struct header_runtime *hi = &t->headers[header_id];
2815 uint8_t *hi_ptr = t->structs[struct_id];
2817 TRACE("[Thread %2u]: emit header %u\n",
2823 if (!t->n_headers_out) {
2824 ho = &t->headers_out[0];
2826 ho->ptr0 = hi->ptr0;
2830 ho_nbytes = n_bytes;
2837 ho_nbytes = ho->n_bytes;
2841 if (ho_ptr + ho_nbytes == hi_ptr) {
2842 ho_nbytes += n_bytes;
2844 ho->n_bytes = ho_nbytes;
2847 ho->ptr0 = hi->ptr0;
2851 ho_nbytes = n_bytes;
2857 ho->n_bytes = ho_nbytes;
2858 t->n_headers_out = n_headers_out;
2862 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2864 __instr_hdr_emit_exec(p, 1);
2871 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2873 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2876 __instr_hdr_emit_exec(p, 1);
2881 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2883 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2886 __instr_hdr_emit_exec(p, 2);
2891 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2893 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2896 __instr_hdr_emit_exec(p, 3);
2901 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2903 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2906 __instr_hdr_emit_exec(p, 4);
2911 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2913 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2916 __instr_hdr_emit_exec(p, 5);
2921 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2923 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2926 __instr_hdr_emit_exec(p, 6);
2931 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2933 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2936 __instr_hdr_emit_exec(p, 7);
2941 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2943 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2946 __instr_hdr_emit_exec(p, 8);
2954 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2955 struct action *action __rte_unused,
2958 struct instruction *instr,
2959 struct instruction_data *data __rte_unused)
2963 CHECK(n_tokens == 2, EINVAL);
2965 h = header_parse(p, tokens[1]);
2968 instr->type = INSTR_HDR_VALIDATE;
2969 instr->valid.header_id = h->id;
2974 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2976 struct thread *t = &p->threads[p->thread_id];
2977 struct instruction *ip = t->ip;
2978 uint32_t header_id = ip->valid.header_id;
2980 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2983 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2993 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2994 struct action *action __rte_unused,
2997 struct instruction *instr,
2998 struct instruction_data *data __rte_unused)
3002 CHECK(n_tokens == 2, EINVAL);
3004 h = header_parse(p, tokens[1]);
3007 instr->type = INSTR_HDR_INVALIDATE;
3008 instr->valid.header_id = h->id;
3013 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3015 struct thread *t = &p->threads[p->thread_id];
3016 struct instruction *ip = t->ip;
3017 uint32_t header_id = ip->valid.header_id;
3019 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3022 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3031 static struct table *
3032 table_find(struct rte_swx_pipeline *p, const char *name);
3035 instr_table_translate(struct rte_swx_pipeline *p,
3036 struct action *action,
3039 struct instruction *instr,
3040 struct instruction_data *data __rte_unused)
3044 CHECK(!action, EINVAL);
3045 CHECK(n_tokens == 2, EINVAL);
3047 t = table_find(p, tokens[1]);
3050 instr->type = INSTR_TABLE;
3051 instr->table.table_id = t->id;
3056 instr_table_exec(struct rte_swx_pipeline *p)
3058 struct thread *t = &p->threads[p->thread_id];
3059 struct instruction *ip = t->ip;
3060 uint32_t table_id = ip->table.table_id;
3061 struct rte_swx_table_state *ts = &t->table_state[table_id];
3062 struct table_runtime *table = &t->tables[table_id];
3064 uint8_t *action_data;
3068 done = table->func(ts->obj,
3076 TRACE("[Thread %2u] table %u (not finalized)\n",
3084 action_id = hit ? action_id : ts->default_action_id;
3085 action_data = hit ? action_data : ts->default_action_data;
3087 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3090 hit ? "hit" : "miss",
3091 (uint32_t)action_id);
3093 t->action_id = action_id;
3094 t->structs[0] = action_data;
3098 thread_ip_action_call(p, t, action_id);
3105 instr_extern_translate(struct rte_swx_pipeline *p,
3106 struct action *action __rte_unused,
3109 struct instruction *instr,
3110 struct instruction_data *data __rte_unused)
3112 char *token = tokens[1];
3114 CHECK(n_tokens == 2, EINVAL);
3116 if (token[0] == 'e') {
3117 struct extern_obj *obj;
3118 struct extern_type_member_func *func;
3120 func = extern_obj_member_func_parse(p, token, &obj);
3121 CHECK(func, EINVAL);
3123 instr->type = INSTR_EXTERN_OBJ;
3124 instr->ext_obj.ext_obj_id = obj->id;
3125 instr->ext_obj.func_id = func->id;
3130 if (token[0] == 'f') {
3131 struct extern_func *func;
3133 func = extern_func_parse(p, token);
3134 CHECK(func, EINVAL);
3136 instr->type = INSTR_EXTERN_FUNC;
3137 instr->ext_func.ext_func_id = func->id;
3146 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3148 struct thread *t = &p->threads[p->thread_id];
3149 struct instruction *ip = t->ip;
3150 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3151 uint32_t func_id = ip->ext_obj.func_id;
3152 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3153 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3155 TRACE("[Thread %2u] extern obj %u member func %u\n",
3160 /* Extern object member function execute. */
3161 uint32_t done = func(obj->obj, obj->mailbox);
3164 thread_ip_inc_cond(t, done);
3165 thread_yield_cond(p, done ^ 1);
3169 instr_extern_func_exec(struct rte_swx_pipeline *p)
3171 struct thread *t = &p->threads[p->thread_id];
3172 struct instruction *ip = t->ip;
3173 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3174 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3175 rte_swx_extern_func_t func = ext_func->func;
3177 TRACE("[Thread %2u] extern func %u\n",
3181 /* Extern function execute. */
3182 uint32_t done = func(ext_func->mailbox);
3185 thread_ip_inc_cond(t, done);
3186 thread_yield_cond(p, done ^ 1);
3193 instr_mov_translate(struct rte_swx_pipeline *p,
3194 struct action *action,
3197 struct instruction *instr,
3198 struct instruction_data *data __rte_unused)
3200 char *dst = tokens[1], *src = tokens[2];
3201 struct field *fdst, *fsrc;
3202 uint32_t dst_struct_id, src_struct_id, src_val;
3204 CHECK(n_tokens == 3, EINVAL);
3206 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3207 CHECK(fdst, EINVAL);
3210 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3212 instr->type = INSTR_MOV;
3213 if ((dst[0] == 'h' && src[0] != 'h') ||
3214 (dst[0] != 'h' && src[0] == 'h'))
3215 instr->type = INSTR_MOV_S;
3217 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3218 instr->mov.dst.n_bits = fdst->n_bits;
3219 instr->mov.dst.offset = fdst->offset / 8;
3220 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3221 instr->mov.src.n_bits = fsrc->n_bits;
3222 instr->mov.src.offset = fsrc->offset / 8;
3227 src_val = strtoul(src, &src, 0);
3228 CHECK(!src[0], EINVAL);
3231 src_val = htonl(src_val);
3233 instr->type = INSTR_MOV_I;
3234 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3235 instr->mov.dst.n_bits = fdst->n_bits;
3236 instr->mov.dst.offset = fdst->offset / 8;
3237 instr->mov.src_val = (uint32_t)src_val;
3242 instr_mov_exec(struct rte_swx_pipeline *p)
3244 struct thread *t = &p->threads[p->thread_id];
3245 struct instruction *ip = t->ip;
3247 TRACE("[Thread %2u] mov\n",
3257 instr_mov_s_exec(struct rte_swx_pipeline *p)
3259 struct thread *t = &p->threads[p->thread_id];
3260 struct instruction *ip = t->ip;
3262 TRACE("[Thread %2u] mov (s)\n",
3272 instr_mov_i_exec(struct rte_swx_pipeline *p)
3274 struct thread *t = &p->threads[p->thread_id];
3275 struct instruction *ip = t->ip;
3277 TRACE("[Thread %2u] mov m.f %x\n",
3291 instr_dma_translate(struct rte_swx_pipeline *p,
3292 struct action *action,
3295 struct instruction *instr,
3296 struct instruction_data *data __rte_unused)
3298 char *dst = tokens[1];
3299 char *src = tokens[2];
3303 CHECK(action, EINVAL);
3304 CHECK(n_tokens == 3, EINVAL);
3306 h = header_parse(p, dst);
3309 tf = action_field_parse(action, src);
3312 instr->type = INSTR_DMA_HT;
3313 instr->dma.dst.header_id[0] = h->id;
3314 instr->dma.dst.struct_id[0] = h->struct_id;
3315 instr->dma.n_bytes[0] = h->st->n_bits / 8;
3316 instr->dma.src.offset[0] = tf->offset / 8;
3322 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3325 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3327 struct thread *t = &p->threads[p->thread_id];
3328 struct instruction *ip = t->ip;
3329 uint8_t *action_data = t->structs[0];
3330 uint64_t valid_headers = t->valid_headers;
3333 for (i = 0; i < n_dma; i++) {
3334 uint32_t header_id = ip->dma.dst.header_id[i];
3335 uint32_t struct_id = ip->dma.dst.struct_id[i];
3336 uint32_t offset = ip->dma.src.offset[i];
3337 uint32_t n_bytes = ip->dma.n_bytes[i];
3339 struct header_runtime *h = &t->headers[header_id];
3340 uint8_t *h_ptr0 = h->ptr0;
3341 uint8_t *h_ptr = t->structs[struct_id];
3343 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3345 void *src = &action_data[offset];
3347 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3350 memcpy(dst, src, n_bytes);
3351 t->structs[struct_id] = dst;
3352 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3355 t->valid_headers = valid_headers;
3359 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3361 __instr_dma_ht_exec(p, 1);
3368 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3370 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3373 __instr_dma_ht_exec(p, 2);
3380 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3382 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3385 __instr_dma_ht_exec(p, 3);
3392 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3394 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3397 __instr_dma_ht_exec(p, 4);
3404 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3406 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3409 __instr_dma_ht_exec(p, 5);
3416 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3418 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3421 __instr_dma_ht_exec(p, 6);
3428 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3430 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3433 __instr_dma_ht_exec(p, 7);
3440 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3442 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3445 __instr_dma_ht_exec(p, 8);
3455 instr_alu_add_translate(struct rte_swx_pipeline *p,
3456 struct action *action,
3459 struct instruction *instr,
3460 struct instruction_data *data __rte_unused)
3462 char *dst = tokens[1], *src = tokens[2];
3463 struct field *fdst, *fsrc;
3464 uint32_t dst_struct_id, src_struct_id, src_val;
3466 CHECK(n_tokens == 3, EINVAL);
3468 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3469 CHECK(fdst, EINVAL);
3471 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3472 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3474 instr->type = INSTR_ALU_ADD;
3475 if (dst[0] == 'h' && src[0] == 'm')
3476 instr->type = INSTR_ALU_ADD_HM;
3477 if (dst[0] == 'm' && src[0] == 'h')
3478 instr->type = INSTR_ALU_ADD_MH;
3479 if (dst[0] == 'h' && src[0] == 'h')
3480 instr->type = INSTR_ALU_ADD_HH;
3482 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3483 instr->alu.dst.n_bits = fdst->n_bits;
3484 instr->alu.dst.offset = fdst->offset / 8;
3485 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3486 instr->alu.src.n_bits = fsrc->n_bits;
3487 instr->alu.src.offset = fsrc->offset / 8;
3491 /* ADD_MI, ADD_HI. */
3492 src_val = strtoul(src, &src, 0);
3493 CHECK(!src[0], EINVAL);
3495 instr->type = INSTR_ALU_ADD_MI;
3497 instr->type = INSTR_ALU_ADD_HI;
3499 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3500 instr->alu.dst.n_bits = fdst->n_bits;
3501 instr->alu.dst.offset = fdst->offset / 8;
3502 instr->alu.src_val = (uint32_t)src_val;
3507 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3508 struct action *action,
3511 struct instruction *instr,
3512 struct instruction_data *data __rte_unused)
3514 char *dst = tokens[1], *src = tokens[2];
3515 struct field *fdst, *fsrc;
3516 uint32_t dst_struct_id, src_struct_id, src_val;
3518 CHECK(n_tokens == 3, EINVAL);
3520 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3521 CHECK(fdst, EINVAL);
3523 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3524 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3526 instr->type = INSTR_ALU_SUB;
3527 if (dst[0] == 'h' && src[0] == 'm')
3528 instr->type = INSTR_ALU_SUB_HM;
3529 if (dst[0] == 'm' && src[0] == 'h')
3530 instr->type = INSTR_ALU_SUB_MH;
3531 if (dst[0] == 'h' && src[0] == 'h')
3532 instr->type = INSTR_ALU_SUB_HH;
3534 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3535 instr->alu.dst.n_bits = fdst->n_bits;
3536 instr->alu.dst.offset = fdst->offset / 8;
3537 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3538 instr->alu.src.n_bits = fsrc->n_bits;
3539 instr->alu.src.offset = fsrc->offset / 8;
3543 /* SUB_MI, SUB_HI. */
3544 src_val = strtoul(src, &src, 0);
3545 CHECK(!src[0], EINVAL);
3547 instr->type = INSTR_ALU_SUB_MI;
3549 instr->type = INSTR_ALU_SUB_HI;
3551 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3552 instr->alu.dst.n_bits = fdst->n_bits;
3553 instr->alu.dst.offset = fdst->offset / 8;
3554 instr->alu.src_val = (uint32_t)src_val;
3559 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3560 struct action *action __rte_unused,
3563 struct instruction *instr,
3564 struct instruction_data *data __rte_unused)
3566 char *dst = tokens[1], *src = tokens[2];
3567 struct header *hdst, *hsrc;
3568 struct field *fdst, *fsrc;
3570 CHECK(n_tokens == 3, EINVAL);
3572 fdst = header_field_parse(p, dst, &hdst);
3573 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3576 fsrc = header_field_parse(p, src, &hsrc);
3578 instr->type = INSTR_ALU_CKADD_FIELD;
3579 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3580 instr->alu.dst.n_bits = fdst->n_bits;
3581 instr->alu.dst.offset = fdst->offset / 8;
3582 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3583 instr->alu.src.n_bits = fsrc->n_bits;
3584 instr->alu.src.offset = fsrc->offset / 8;
3588 /* CKADD_STRUCT, CKADD_STRUCT20. */
3589 hsrc = header_parse(p, src);
3590 CHECK(hsrc, EINVAL);
3592 instr->type = INSTR_ALU_CKADD_STRUCT;
3593 if ((hsrc->st->n_bits / 8) == 20)
3594 instr->type = INSTR_ALU_CKADD_STRUCT20;
3596 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3597 instr->alu.dst.n_bits = fdst->n_bits;
3598 instr->alu.dst.offset = fdst->offset / 8;
3599 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3600 instr->alu.src.n_bits = hsrc->st->n_bits;
3601 instr->alu.src.offset = 0; /* Unused. */
3606 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3607 struct action *action __rte_unused,
3610 struct instruction *instr,
3611 struct instruction_data *data __rte_unused)
3613 char *dst = tokens[1], *src = tokens[2];
3614 struct header *hdst, *hsrc;
3615 struct field *fdst, *fsrc;
3617 CHECK(n_tokens == 3, EINVAL);
3619 fdst = header_field_parse(p, dst, &hdst);
3620 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3622 fsrc = header_field_parse(p, src, &hsrc);
3623 CHECK(fsrc, EINVAL);
3625 instr->type = INSTR_ALU_CKSUB_FIELD;
3626 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3627 instr->alu.dst.n_bits = fdst->n_bits;
3628 instr->alu.dst.offset = fdst->offset / 8;
3629 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3630 instr->alu.src.n_bits = fsrc->n_bits;
3631 instr->alu.src.offset = fsrc->offset / 8;
3636 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3637 struct action *action,
3640 struct instruction *instr,
3641 struct instruction_data *data __rte_unused)
3643 char *dst = tokens[1], *src = tokens[2];
3644 struct field *fdst, *fsrc;
3645 uint32_t dst_struct_id, src_struct_id, src_val;
3647 CHECK(n_tokens == 3, EINVAL);
3649 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3650 CHECK(fdst, EINVAL);
3652 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3653 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3655 instr->type = INSTR_ALU_SHL;
3656 if (dst[0] == 'h' && src[0] == 'm')
3657 instr->type = INSTR_ALU_SHL_HM;
3658 if (dst[0] == 'm' && src[0] == 'h')
3659 instr->type = INSTR_ALU_SHL_MH;
3660 if (dst[0] == 'h' && src[0] == 'h')
3661 instr->type = INSTR_ALU_SHL_HH;
3663 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3664 instr->alu.dst.n_bits = fdst->n_bits;
3665 instr->alu.dst.offset = fdst->offset / 8;
3666 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3667 instr->alu.src.n_bits = fsrc->n_bits;
3668 instr->alu.src.offset = fsrc->offset / 8;
3672 /* SHL_MI, SHL_HI. */
3673 src_val = strtoul(src, &src, 0);
3674 CHECK(!src[0], EINVAL);
3676 instr->type = INSTR_ALU_SHL_MI;
3678 instr->type = INSTR_ALU_SHL_HI;
3680 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3681 instr->alu.dst.n_bits = fdst->n_bits;
3682 instr->alu.dst.offset = fdst->offset / 8;
3683 instr->alu.src_val = (uint32_t)src_val;
3688 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3689 struct action *action,
3692 struct instruction *instr,
3693 struct instruction_data *data __rte_unused)
3695 char *dst = tokens[1], *src = tokens[2];
3696 struct field *fdst, *fsrc;
3697 uint32_t dst_struct_id, src_struct_id, src_val;
3699 CHECK(n_tokens == 3, EINVAL);
3701 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3702 CHECK(fdst, EINVAL);
3704 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3705 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3707 instr->type = INSTR_ALU_SHR;
3708 if (dst[0] == 'h' && src[0] == 'm')
3709 instr->type = INSTR_ALU_SHR_HM;
3710 if (dst[0] == 'm' && src[0] == 'h')
3711 instr->type = INSTR_ALU_SHR_MH;
3712 if (dst[0] == 'h' && src[0] == 'h')
3713 instr->type = INSTR_ALU_SHR_HH;
3715 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3716 instr->alu.dst.n_bits = fdst->n_bits;
3717 instr->alu.dst.offset = fdst->offset / 8;
3718 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3719 instr->alu.src.n_bits = fsrc->n_bits;
3720 instr->alu.src.offset = fsrc->offset / 8;
3724 /* SHR_MI, SHR_HI. */
3725 src_val = strtoul(src, &src, 0);
3726 CHECK(!src[0], EINVAL);
3728 instr->type = INSTR_ALU_SHR_MI;
3730 instr->type = INSTR_ALU_SHR_HI;
3732 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3733 instr->alu.dst.n_bits = fdst->n_bits;
3734 instr->alu.dst.offset = fdst->offset / 8;
3735 instr->alu.src_val = (uint32_t)src_val;
3740 instr_alu_and_translate(struct rte_swx_pipeline *p,
3741 struct action *action,
3744 struct instruction *instr,
3745 struct instruction_data *data __rte_unused)
3747 char *dst = tokens[1], *src = tokens[2];
3748 struct field *fdst, *fsrc;
3749 uint32_t dst_struct_id, src_struct_id, src_val;
3751 CHECK(n_tokens == 3, EINVAL);
3753 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3754 CHECK(fdst, EINVAL);
3757 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3759 instr->type = INSTR_ALU_AND;
3760 if ((dst[0] == 'h' && src[0] != 'h') ||
3761 (dst[0] != 'h' && src[0] == 'h'))
3762 instr->type = INSTR_ALU_AND_S;
3764 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3765 instr->alu.dst.n_bits = fdst->n_bits;
3766 instr->alu.dst.offset = fdst->offset / 8;
3767 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3768 instr->alu.src.n_bits = fsrc->n_bits;
3769 instr->alu.src.offset = fsrc->offset / 8;
3774 src_val = strtoul(src, &src, 0);
3775 CHECK(!src[0], EINVAL);
3778 src_val = htonl(src_val);
3780 instr->type = INSTR_ALU_AND_I;
3781 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3782 instr->alu.dst.n_bits = fdst->n_bits;
3783 instr->alu.dst.offset = fdst->offset / 8;
3784 instr->alu.src_val = (uint32_t)src_val;
3789 instr_alu_or_translate(struct rte_swx_pipeline *p,
3790 struct action *action,
3793 struct instruction *instr,
3794 struct instruction_data *data __rte_unused)
3796 char *dst = tokens[1], *src = tokens[2];
3797 struct field *fdst, *fsrc;
3798 uint32_t dst_struct_id, src_struct_id, src_val;
3800 CHECK(n_tokens == 3, EINVAL);
3802 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3803 CHECK(fdst, EINVAL);
3806 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3808 instr->type = INSTR_ALU_OR;
3809 if ((dst[0] == 'h' && src[0] != 'h') ||
3810 (dst[0] != 'h' && src[0] == 'h'))
3811 instr->type = INSTR_ALU_OR_S;
3813 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3814 instr->alu.dst.n_bits = fdst->n_bits;
3815 instr->alu.dst.offset = fdst->offset / 8;
3816 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3817 instr->alu.src.n_bits = fsrc->n_bits;
3818 instr->alu.src.offset = fsrc->offset / 8;
3823 src_val = strtoul(src, &src, 0);
3824 CHECK(!src[0], EINVAL);
3827 src_val = htonl(src_val);
3829 instr->type = INSTR_ALU_OR_I;
3830 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3831 instr->alu.dst.n_bits = fdst->n_bits;
3832 instr->alu.dst.offset = fdst->offset / 8;
3833 instr->alu.src_val = (uint32_t)src_val;
3838 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3839 struct action *action,
3842 struct instruction *instr,
3843 struct instruction_data *data __rte_unused)
3845 char *dst = tokens[1], *src = tokens[2];
3846 struct field *fdst, *fsrc;
3847 uint32_t dst_struct_id, src_struct_id, src_val;
3849 CHECK(n_tokens == 3, EINVAL);
3851 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3852 CHECK(fdst, EINVAL);
3855 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3857 instr->type = INSTR_ALU_XOR;
3858 if ((dst[0] == 'h' && src[0] != 'h') ||
3859 (dst[0] != 'h' && src[0] == 'h'))
3860 instr->type = INSTR_ALU_XOR_S;
3862 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3863 instr->alu.dst.n_bits = fdst->n_bits;
3864 instr->alu.dst.offset = fdst->offset / 8;
3865 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3866 instr->alu.src.n_bits = fsrc->n_bits;
3867 instr->alu.src.offset = fsrc->offset / 8;
3872 src_val = strtoul(src, &src, 0);
3873 CHECK(!src[0], EINVAL);
3876 src_val = htonl(src_val);
3878 instr->type = INSTR_ALU_XOR_I;
3879 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3880 instr->alu.dst.n_bits = fdst->n_bits;
3881 instr->alu.dst.offset = fdst->offset / 8;
3882 instr->alu.src_val = (uint32_t)src_val;
3887 instr_alu_add_exec(struct rte_swx_pipeline *p)
3889 struct thread *t = &p->threads[p->thread_id];
3890 struct instruction *ip = t->ip;
3892 TRACE("[Thread %2u] add\n", p->thread_id);
3902 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3904 struct thread *t = &p->threads[p->thread_id];
3905 struct instruction *ip = t->ip;
3907 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3917 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3919 struct thread *t = &p->threads[p->thread_id];
3920 struct instruction *ip = t->ip;
3922 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3932 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3934 struct thread *t = &p->threads[p->thread_id];
3935 struct instruction *ip = t->ip;
3937 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3947 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3949 struct thread *t = &p->threads[p->thread_id];
3950 struct instruction *ip = t->ip;
3952 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3962 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3964 struct thread *t = &p->threads[p->thread_id];
3965 struct instruction *ip = t->ip;
3967 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3977 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3979 struct thread *t = &p->threads[p->thread_id];
3980 struct instruction *ip = t->ip;
3982 TRACE("[Thread %2u] sub\n", p->thread_id);
3992 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3994 struct thread *t = &p->threads[p->thread_id];
3995 struct instruction *ip = t->ip;
3997 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4007 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4009 struct thread *t = &p->threads[p->thread_id];
4010 struct instruction *ip = t->ip;
4012 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4022 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4024 struct thread *t = &p->threads[p->thread_id];
4025 struct instruction *ip = t->ip;
4027 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4037 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4039 struct thread *t = &p->threads[p->thread_id];
4040 struct instruction *ip = t->ip;
4042 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4052 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4054 struct thread *t = &p->threads[p->thread_id];
4055 struct instruction *ip = t->ip;
4057 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4067 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4069 struct thread *t = &p->threads[p->thread_id];
4070 struct instruction *ip = t->ip;
4072 TRACE("[Thread %2u] shl\n", p->thread_id);
4082 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4084 struct thread *t = &p->threads[p->thread_id];
4085 struct instruction *ip = t->ip;
4087 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4097 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4099 struct thread *t = &p->threads[p->thread_id];
4100 struct instruction *ip = t->ip;
4102 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4112 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4114 struct thread *t = &p->threads[p->thread_id];
4115 struct instruction *ip = t->ip;
4117 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4127 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4129 struct thread *t = &p->threads[p->thread_id];
4130 struct instruction *ip = t->ip;
4132 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4142 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4144 struct thread *t = &p->threads[p->thread_id];
4145 struct instruction *ip = t->ip;
4147 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4157 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4159 struct thread *t = &p->threads[p->thread_id];
4160 struct instruction *ip = t->ip;
4162 TRACE("[Thread %2u] shr\n", p->thread_id);
4172 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4174 struct thread *t = &p->threads[p->thread_id];
4175 struct instruction *ip = t->ip;
4177 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4187 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4189 struct thread *t = &p->threads[p->thread_id];
4190 struct instruction *ip = t->ip;
4192 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4202 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4204 struct thread *t = &p->threads[p->thread_id];
4205 struct instruction *ip = t->ip;
4207 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4217 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4219 struct thread *t = &p->threads[p->thread_id];
4220 struct instruction *ip = t->ip;
4222 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4232 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4234 struct thread *t = &p->threads[p->thread_id];
4235 struct instruction *ip = t->ip;
4237 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4247 instr_alu_and_exec(struct rte_swx_pipeline *p)
4249 struct thread *t = &p->threads[p->thread_id];
4250 struct instruction *ip = t->ip;
4252 TRACE("[Thread %2u] and\n", p->thread_id);
4262 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
4264 struct thread *t = &p->threads[p->thread_id];
4265 struct instruction *ip = t->ip;
4267 TRACE("[Thread %2u] and (s)\n", p->thread_id);
4277 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4279 struct thread *t = &p->threads[p->thread_id];
4280 struct instruction *ip = t->ip;
4282 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4292 instr_alu_or_exec(struct rte_swx_pipeline *p)
4294 struct thread *t = &p->threads[p->thread_id];
4295 struct instruction *ip = t->ip;
4297 TRACE("[Thread %2u] or\n", p->thread_id);
4307 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
4309 struct thread *t = &p->threads[p->thread_id];
4310 struct instruction *ip = t->ip;
4312 TRACE("[Thread %2u] or (s)\n", p->thread_id);
4322 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4324 struct thread *t = &p->threads[p->thread_id];
4325 struct instruction *ip = t->ip;
4327 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4337 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4339 struct thread *t = &p->threads[p->thread_id];
4340 struct instruction *ip = t->ip;
4342 TRACE("[Thread %2u] xor\n", p->thread_id);
4352 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
4354 struct thread *t = &p->threads[p->thread_id];
4355 struct instruction *ip = t->ip;
4357 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
4367 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4369 struct thread *t = &p->threads[p->thread_id];
4370 struct instruction *ip = t->ip;
4372 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4382 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4384 struct thread *t = &p->threads[p->thread_id];
4385 struct instruction *ip = t->ip;
4386 uint8_t *dst_struct, *src_struct;
4387 uint16_t *dst16_ptr, dst;
4388 uint64_t *src64_ptr, src64, src64_mask, src;
4391 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4394 dst_struct = t->structs[ip->alu.dst.struct_id];
4395 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4398 src_struct = t->structs[ip->alu.src.struct_id];
4399 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4401 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4402 src = src64 & src64_mask;
4407 /* The first input (r) is a 16-bit number. The second and the third
4408 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4409 * three numbers (output r) is a 34-bit number.
4411 r += (src >> 32) + (src & 0xFFFFFFFF);
4413 /* The first input is a 16-bit number. The second input is an 18-bit
4414 * number. In the worst case scenario, the sum of the two numbers is a
4417 r = (r & 0xFFFF) + (r >> 16);
4419 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4420 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4422 r = (r & 0xFFFF) + (r >> 16);
4424 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4425 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4426 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4427 * therefore the output r is always a 16-bit number.
4429 r = (r & 0xFFFF) + (r >> 16);
4434 *dst16_ptr = (uint16_t)r;
4441 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4443 struct thread *t = &p->threads[p->thread_id];
4444 struct instruction *ip = t->ip;
4445 uint8_t *dst_struct, *src_struct;
4446 uint16_t *dst16_ptr, dst;
4447 uint64_t *src64_ptr, src64, src64_mask, src;
4450 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4453 dst_struct = t->structs[ip->alu.dst.struct_id];
4454 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4457 src_struct = t->structs[ip->alu.src.struct_id];
4458 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4460 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4461 src = src64 & src64_mask;
4466 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
4467 * the following sequence of operations in 2's complement arithmetic:
4468 * a '- b = (a - b) % 0xFFFF.
4470 * In order to prevent an underflow for the below subtraction, in which
4471 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
4472 * minuend), we first add a multiple of the 0xFFFF modulus to the
4473 * minuend. The number we add to the minuend needs to be a 34-bit number
4474 * or higher, so for readability reasons we picked the 36-bit multiple.
4475 * We are effectively turning the 16-bit minuend into a 36-bit number:
4476 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
4478 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
4480 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
4481 * result (the output r) is a 36-bit number.
4483 r -= (src >> 32) + (src & 0xFFFFFFFF);
4485 /* The first input is a 16-bit number. The second input is a 20-bit
4486 * number. Their sum is a 21-bit number.
4488 r = (r & 0xFFFF) + (r >> 16);
4490 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4491 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
4493 r = (r & 0xFFFF) + (r >> 16);
4495 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4496 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4497 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4498 * generated, therefore the output r is always a 16-bit number.
4500 r = (r & 0xFFFF) + (r >> 16);
4505 *dst16_ptr = (uint16_t)r;
4512 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
4514 struct thread *t = &p->threads[p->thread_id];
4515 struct instruction *ip = t->ip;
4516 uint8_t *dst_struct, *src_struct;
4517 uint16_t *dst16_ptr;
4518 uint32_t *src32_ptr;
4521 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4524 dst_struct = t->structs[ip->alu.dst.struct_id];
4525 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4527 src_struct = t->structs[ip->alu.src.struct_id];
4528 src32_ptr = (uint32_t *)&src_struct[0];
4530 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4531 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4532 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4533 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4534 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4536 /* The first input is a 16-bit number. The second input is a 19-bit
4537 * number. Their sum is a 20-bit number.
4539 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4541 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4542 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4544 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4546 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4547 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4548 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4549 * generated, therefore the output r is always a 16-bit number.
4551 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4554 r0 = r0 ? r0 : 0xFFFF;
4556 *dst16_ptr = (uint16_t)r0;
4563 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4565 struct thread *t = &p->threads[p->thread_id];
4566 struct instruction *ip = t->ip;
4567 uint8_t *dst_struct, *src_struct;
4568 uint16_t *dst16_ptr;
4569 uint32_t *src32_ptr;
4573 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4576 dst_struct = t->structs[ip->alu.dst.struct_id];
4577 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4579 src_struct = t->structs[ip->alu.src.struct_id];
4580 src32_ptr = (uint32_t *)&src_struct[0];
4582 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4583 * Therefore, in the worst case scenario, a 35-bit number is added to a
4584 * 16-bit number (the input r), so the output r is 36-bit number.
4586 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4589 /* The first input is a 16-bit number. The second input is a 20-bit
4590 * number. Their sum is a 21-bit number.
4592 r = (r & 0xFFFF) + (r >> 16);
4594 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4595 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4597 r = (r & 0xFFFF) + (r >> 16);
4599 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4600 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4601 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4602 * generated, therefore the output r is always a 16-bit number.
4604 r = (r & 0xFFFF) + (r >> 16);
4609 *dst16_ptr = (uint16_t)r;
4618 static struct action *
4619 action_find(struct rte_swx_pipeline *p, const char *name);
4622 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
4623 struct action *action __rte_unused,
4626 struct instruction *instr,
4627 struct instruction_data *data)
4629 CHECK(n_tokens == 2, EINVAL);
4631 strcpy(data->jmp_label, tokens[1]);
4633 instr->type = INSTR_JMP;
4634 instr->jmp.ip = NULL; /* Resolved later. */
4639 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
4640 struct action *action __rte_unused,
4643 struct instruction *instr,
4644 struct instruction_data *data)
4648 CHECK(n_tokens == 3, EINVAL);
4650 strcpy(data->jmp_label, tokens[1]);
4652 h = header_parse(p, tokens[2]);
4655 instr->type = INSTR_JMP_VALID;
4656 instr->jmp.ip = NULL; /* Resolved later. */
4657 instr->jmp.header_id = h->id;
4662 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
4663 struct action *action __rte_unused,
4666 struct instruction *instr,
4667 struct instruction_data *data)
4671 CHECK(n_tokens == 3, EINVAL);
4673 strcpy(data->jmp_label, tokens[1]);
4675 h = header_parse(p, tokens[2]);
4678 instr->type = INSTR_JMP_INVALID;
4679 instr->jmp.ip = NULL; /* Resolved later. */
4680 instr->jmp.header_id = h->id;
4685 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
4686 struct action *action,
4689 struct instruction *instr,
4690 struct instruction_data *data)
4692 CHECK(!action, EINVAL);
4693 CHECK(n_tokens == 2, EINVAL);
4695 strcpy(data->jmp_label, tokens[1]);
4697 instr->type = INSTR_JMP_HIT;
4698 instr->jmp.ip = NULL; /* Resolved later. */
4703 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
4704 struct action *action,
4707 struct instruction *instr,
4708 struct instruction_data *data)
4710 CHECK(!action, EINVAL);
4711 CHECK(n_tokens == 2, EINVAL);
4713 strcpy(data->jmp_label, tokens[1]);
4715 instr->type = INSTR_JMP_MISS;
4716 instr->jmp.ip = NULL; /* Resolved later. */
4721 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
4722 struct action *action,
4725 struct instruction *instr,
4726 struct instruction_data *data)
4730 CHECK(!action, EINVAL);
4731 CHECK(n_tokens == 3, EINVAL);
4733 strcpy(data->jmp_label, tokens[1]);
4735 a = action_find(p, tokens[2]);
4738 instr->type = INSTR_JMP_ACTION_HIT;
4739 instr->jmp.ip = NULL; /* Resolved later. */
4740 instr->jmp.action_id = a->id;
4745 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
4746 struct action *action,
4749 struct instruction *instr,
4750 struct instruction_data *data)
4754 CHECK(!action, EINVAL);
4755 CHECK(n_tokens == 3, EINVAL);
4757 strcpy(data->jmp_label, tokens[1]);
4759 a = action_find(p, tokens[2]);
4762 instr->type = INSTR_JMP_ACTION_MISS;
4763 instr->jmp.ip = NULL; /* Resolved later. */
4764 instr->jmp.action_id = a->id;
4769 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
4770 struct action *action,
4773 struct instruction *instr,
4774 struct instruction_data *data)
4776 char *a = tokens[2], *b = tokens[3];
4777 struct field *fa, *fb;
4778 uint32_t a_struct_id, b_struct_id, b_val;
4780 CHECK(n_tokens == 4, EINVAL);
4782 strcpy(data->jmp_label, tokens[1]);
4784 fa = struct_field_parse(p, action, a, &a_struct_id);
4787 /* JMP_EQ or JMP_EQ_S. */
4788 fb = struct_field_parse(p, action, b, &b_struct_id);
4790 instr->type = INSTR_JMP_EQ;
4791 if ((a[0] == 'h' && b[0] != 'h') ||
4792 (a[0] != 'h' && b[0] == 'h'))
4793 instr->type = INSTR_JMP_EQ_S;
4794 instr->jmp.ip = NULL; /* Resolved later. */
4796 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4797 instr->jmp.a.n_bits = fa->n_bits;
4798 instr->jmp.a.offset = fa->offset / 8;
4799 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4800 instr->jmp.b.n_bits = fb->n_bits;
4801 instr->jmp.b.offset = fb->offset / 8;
4806 b_val = strtoul(b, &b, 0);
4807 CHECK(!b[0], EINVAL);
4810 b_val = htonl(b_val);
4812 instr->type = INSTR_JMP_EQ_I;
4813 instr->jmp.ip = NULL; /* Resolved later. */
4814 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4815 instr->jmp.a.n_bits = fa->n_bits;
4816 instr->jmp.a.offset = fa->offset / 8;
4817 instr->jmp.b_val = (uint32_t)b_val;
4822 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
4823 struct action *action,
4826 struct instruction *instr,
4827 struct instruction_data *data)
4829 char *a = tokens[2], *b = tokens[3];
4830 struct field *fa, *fb;
4831 uint32_t a_struct_id, b_struct_id, b_val;
4833 CHECK(n_tokens == 4, EINVAL);
4835 strcpy(data->jmp_label, tokens[1]);
4837 fa = struct_field_parse(p, action, a, &a_struct_id);
4840 /* JMP_NEQ or JMP_NEQ_S. */
4841 fb = struct_field_parse(p, action, b, &b_struct_id);
4843 instr->type = INSTR_JMP_NEQ;
4844 if ((a[0] == 'h' && b[0] != 'h') ||
4845 (a[0] != 'h' && b[0] == 'h'))
4846 instr->type = INSTR_JMP_NEQ_S;
4847 instr->jmp.ip = NULL; /* Resolved later. */
4849 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4850 instr->jmp.a.n_bits = fa->n_bits;
4851 instr->jmp.a.offset = fa->offset / 8;
4852 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4853 instr->jmp.b.n_bits = fb->n_bits;
4854 instr->jmp.b.offset = fb->offset / 8;
4859 b_val = strtoul(b, &b, 0);
4860 CHECK(!b[0], EINVAL);
4863 b_val = htonl(b_val);
4865 instr->type = INSTR_JMP_NEQ_I;
4866 instr->jmp.ip = NULL; /* Resolved later. */
4867 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4868 instr->jmp.a.n_bits = fa->n_bits;
4869 instr->jmp.a.offset = fa->offset / 8;
4870 instr->jmp.b_val = (uint32_t)b_val;
4875 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
4876 struct action *action,
4879 struct instruction *instr,
4880 struct instruction_data *data)
4882 char *a = tokens[2], *b = tokens[3];
4883 struct field *fa, *fb;
4884 uint32_t a_struct_id, b_struct_id, b_val;
4886 CHECK(n_tokens == 4, EINVAL);
4888 strcpy(data->jmp_label, tokens[1]);
4890 fa = struct_field_parse(p, action, a, &a_struct_id);
4893 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
4894 fb = struct_field_parse(p, action, b, &b_struct_id);
4896 instr->type = INSTR_JMP_LT;
4897 if (a[0] == 'h' && b[0] == 'm')
4898 instr->type = INSTR_JMP_LT_HM;
4899 if (a[0] == 'm' && b[0] == 'h')
4900 instr->type = INSTR_JMP_LT_MH;
4901 if (a[0] == 'h' && b[0] == 'h')
4902 instr->type = INSTR_JMP_LT_HH;
4903 instr->jmp.ip = NULL; /* Resolved later. */
4905 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4906 instr->jmp.a.n_bits = fa->n_bits;
4907 instr->jmp.a.offset = fa->offset / 8;
4908 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4909 instr->jmp.b.n_bits = fb->n_bits;
4910 instr->jmp.b.offset = fb->offset / 8;
4914 /* JMP_LT_MI, JMP_LT_HI. */
4915 b_val = strtoul(b, &b, 0);
4916 CHECK(!b[0], EINVAL);
4918 instr->type = INSTR_JMP_LT_MI;
4920 instr->type = INSTR_JMP_LT_HI;
4921 instr->jmp.ip = NULL; /* Resolved later. */
4923 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4924 instr->jmp.a.n_bits = fa->n_bits;
4925 instr->jmp.a.offset = fa->offset / 8;
4926 instr->jmp.b_val = (uint32_t)b_val;
4931 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
4932 struct action *action,
4935 struct instruction *instr,
4936 struct instruction_data *data)
4938 char *a = tokens[2], *b = tokens[3];
4939 struct field *fa, *fb;
4940 uint32_t a_struct_id, b_struct_id, b_val;
4942 CHECK(n_tokens == 4, EINVAL);
4944 strcpy(data->jmp_label, tokens[1]);
4946 fa = struct_field_parse(p, action, a, &a_struct_id);
4949 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
4950 fb = struct_field_parse(p, action, b, &b_struct_id);
4952 instr->type = INSTR_JMP_GT;
4953 if (a[0] == 'h' && b[0] == 'm')
4954 instr->type = INSTR_JMP_GT_HM;
4955 if (a[0] == 'm' && b[0] == 'h')
4956 instr->type = INSTR_JMP_GT_MH;
4957 if (a[0] == 'h' && b[0] == 'h')
4958 instr->type = INSTR_JMP_GT_HH;
4959 instr->jmp.ip = NULL; /* Resolved later. */
4961 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4962 instr->jmp.a.n_bits = fa->n_bits;
4963 instr->jmp.a.offset = fa->offset / 8;
4964 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4965 instr->jmp.b.n_bits = fb->n_bits;
4966 instr->jmp.b.offset = fb->offset / 8;
4970 /* JMP_GT_MI, JMP_GT_HI. */
4971 b_val = strtoul(b, &b, 0);
4972 CHECK(!b[0], EINVAL);
4974 instr->type = INSTR_JMP_GT_MI;
4976 instr->type = INSTR_JMP_GT_HI;
4977 instr->jmp.ip = NULL; /* Resolved later. */
4979 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4980 instr->jmp.a.n_bits = fa->n_bits;
4981 instr->jmp.a.offset = fa->offset / 8;
4982 instr->jmp.b_val = (uint32_t)b_val;
4987 instr_jmp_exec(struct rte_swx_pipeline *p)
4989 struct thread *t = &p->threads[p->thread_id];
4990 struct instruction *ip = t->ip;
4992 TRACE("[Thread %2u] jmp\n", p->thread_id);
4994 thread_ip_set(t, ip->jmp.ip);
4998 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
5000 struct thread *t = &p->threads[p->thread_id];
5001 struct instruction *ip = t->ip;
5002 uint32_t header_id = ip->jmp.header_id;
5004 TRACE("[Thread %2u] jmpv\n", p->thread_id);
5006 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
5010 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
5012 struct thread *t = &p->threads[p->thread_id];
5013 struct instruction *ip = t->ip;
5014 uint32_t header_id = ip->jmp.header_id;
5016 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
5018 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
5022 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
5024 struct thread *t = &p->threads[p->thread_id];
5025 struct instruction *ip = t->ip;
5026 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
5028 TRACE("[Thread %2u] jmph\n", p->thread_id);
5030 t->ip = ip_next[t->hit];
5034 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
5036 struct thread *t = &p->threads[p->thread_id];
5037 struct instruction *ip = t->ip;
5038 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
5040 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
5042 t->ip = ip_next[t->hit];
5046 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
5048 struct thread *t = &p->threads[p->thread_id];
5049 struct instruction *ip = t->ip;
5051 TRACE("[Thread %2u] jmpa\n", p->thread_id);
5053 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
5057 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
5059 struct thread *t = &p->threads[p->thread_id];
5060 struct instruction *ip = t->ip;
5062 TRACE("[Thread %2u] jmpna\n", p->thread_id);
5064 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
5068 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
5070 struct thread *t = &p->threads[p->thread_id];
5071 struct instruction *ip = t->ip;
5073 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
5079 instr_jmp_eq_s_exec(struct rte_swx_pipeline *p)
5081 struct thread *t = &p->threads[p->thread_id];
5082 struct instruction *ip = t->ip;
5084 TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id);
5086 JMP_CMP_S(t, ip, ==);
5090 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
5092 struct thread *t = &p->threads[p->thread_id];
5093 struct instruction *ip = t->ip;
5095 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
5097 JMP_CMP_I(t, ip, ==);
5101 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
5103 struct thread *t = &p->threads[p->thread_id];
5104 struct instruction *ip = t->ip;
5106 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
5112 instr_jmp_neq_s_exec(struct rte_swx_pipeline *p)
5114 struct thread *t = &p->threads[p->thread_id];
5115 struct instruction *ip = t->ip;
5117 TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id);
5119 JMP_CMP_S(t, ip, !=);
5123 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
5125 struct thread *t = &p->threads[p->thread_id];
5126 struct instruction *ip = t->ip;
5128 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
5130 JMP_CMP_I(t, ip, !=);
5134 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
5136 struct thread *t = &p->threads[p->thread_id];
5137 struct instruction *ip = t->ip;
5139 TRACE("[Thread %2u] jmplt\n", p->thread_id);
5145 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
5147 struct thread *t = &p->threads[p->thread_id];
5148 struct instruction *ip = t->ip;
5150 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
5152 JMP_CMP_MH(t, ip, <);
5156 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
5158 struct thread *t = &p->threads[p->thread_id];
5159 struct instruction *ip = t->ip;
5161 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
5163 JMP_CMP_HM(t, ip, <);
5167 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
5169 struct thread *t = &p->threads[p->thread_id];
5170 struct instruction *ip = t->ip;
5172 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
5174 JMP_CMP_HH(t, ip, <);
5178 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
5180 struct thread *t = &p->threads[p->thread_id];
5181 struct instruction *ip = t->ip;
5183 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
5185 JMP_CMP_MI(t, ip, <);
5189 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
5191 struct thread *t = &p->threads[p->thread_id];
5192 struct instruction *ip = t->ip;
5194 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
5196 JMP_CMP_HI(t, ip, <);
5200 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
5202 struct thread *t = &p->threads[p->thread_id];
5203 struct instruction *ip = t->ip;
5205 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
5211 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
5213 struct thread *t = &p->threads[p->thread_id];
5214 struct instruction *ip = t->ip;
5216 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
5218 JMP_CMP_MH(t, ip, >);
5222 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
5224 struct thread *t = &p->threads[p->thread_id];
5225 struct instruction *ip = t->ip;
5227 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
5229 JMP_CMP_HM(t, ip, >);
5233 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
5235 struct thread *t = &p->threads[p->thread_id];
5236 struct instruction *ip = t->ip;
5238 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
5240 JMP_CMP_HH(t, ip, >);
5244 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
5246 struct thread *t = &p->threads[p->thread_id];
5247 struct instruction *ip = t->ip;
5249 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
5251 JMP_CMP_MI(t, ip, >);
5255 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
5257 struct thread *t = &p->threads[p->thread_id];
5258 struct instruction *ip = t->ip;
5260 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
5262 JMP_CMP_HI(t, ip, >);
5269 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
5270 struct action *action,
5271 char **tokens __rte_unused,
5273 struct instruction *instr,
5274 struct instruction_data *data __rte_unused)
5276 CHECK(action, EINVAL);
5277 CHECK(n_tokens == 1, EINVAL);
5279 instr->type = INSTR_RETURN;
5284 instr_return_exec(struct rte_swx_pipeline *p)
5286 struct thread *t = &p->threads[p->thread_id];
5288 TRACE("[Thread %2u] return\n", p->thread_id);
5294 instr_translate(struct rte_swx_pipeline *p,
5295 struct action *action,
5297 struct instruction *instr,
5298 struct instruction_data *data)
5300 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
5301 int n_tokens = 0, tpos = 0;
5303 /* Parse the instruction string into tokens. */
5307 token = strtok_r(string, " \t\v", &string);
5311 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
5312 CHECK_NAME(token, EINVAL);
5314 tokens[n_tokens] = token;
5318 CHECK(n_tokens, EINVAL);
5320 /* Handle the optional instruction label. */
5321 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
5322 strcpy(data->label, tokens[0]);
5325 CHECK(n_tokens - tpos, EINVAL);
5328 /* Identify the instruction type. */
5329 if (!strcmp(tokens[tpos], "rx"))
5330 return instr_rx_translate(p,
5337 if (!strcmp(tokens[tpos], "tx"))
5338 return instr_tx_translate(p,
5345 if (!strcmp(tokens[tpos], "extract"))
5346 return instr_hdr_extract_translate(p,
5353 if (!strcmp(tokens[tpos], "emit"))
5354 return instr_hdr_emit_translate(p,
5361 if (!strcmp(tokens[tpos], "validate"))
5362 return instr_hdr_validate_translate(p,
5369 if (!strcmp(tokens[tpos], "invalidate"))
5370 return instr_hdr_invalidate_translate(p,
5377 if (!strcmp(tokens[tpos], "mov"))
5378 return instr_mov_translate(p,
5385 if (!strcmp(tokens[tpos], "dma"))
5386 return instr_dma_translate(p,
5393 if (!strcmp(tokens[tpos], "add"))
5394 return instr_alu_add_translate(p,
5401 if (!strcmp(tokens[tpos], "sub"))
5402 return instr_alu_sub_translate(p,
5409 if (!strcmp(tokens[tpos], "ckadd"))
5410 return instr_alu_ckadd_translate(p,
5417 if (!strcmp(tokens[tpos], "cksub"))
5418 return instr_alu_cksub_translate(p,
5425 if (!strcmp(tokens[tpos], "and"))
5426 return instr_alu_and_translate(p,
5433 if (!strcmp(tokens[tpos], "or"))
5434 return instr_alu_or_translate(p,
5441 if (!strcmp(tokens[tpos], "xor"))
5442 return instr_alu_xor_translate(p,
5449 if (!strcmp(tokens[tpos], "shl"))
5450 return instr_alu_shl_translate(p,
5457 if (!strcmp(tokens[tpos], "shr"))
5458 return instr_alu_shr_translate(p,
5465 if (!strcmp(tokens[tpos], "table"))
5466 return instr_table_translate(p,
5473 if (!strcmp(tokens[tpos], "extern"))
5474 return instr_extern_translate(p,
5481 if (!strcmp(tokens[tpos], "jmp"))
5482 return instr_jmp_translate(p,
5489 if (!strcmp(tokens[tpos], "jmpv"))
5490 return instr_jmp_valid_translate(p,
5497 if (!strcmp(tokens[tpos], "jmpnv"))
5498 return instr_jmp_invalid_translate(p,
5505 if (!strcmp(tokens[tpos], "jmph"))
5506 return instr_jmp_hit_translate(p,
5513 if (!strcmp(tokens[tpos], "jmpnh"))
5514 return instr_jmp_miss_translate(p,
5521 if (!strcmp(tokens[tpos], "jmpa"))
5522 return instr_jmp_action_hit_translate(p,
5529 if (!strcmp(tokens[tpos], "jmpna"))
5530 return instr_jmp_action_miss_translate(p,
5537 if (!strcmp(tokens[tpos], "jmpeq"))
5538 return instr_jmp_eq_translate(p,
5545 if (!strcmp(tokens[tpos], "jmpneq"))
5546 return instr_jmp_neq_translate(p,
5553 if (!strcmp(tokens[tpos], "jmplt"))
5554 return instr_jmp_lt_translate(p,
5561 if (!strcmp(tokens[tpos], "jmpgt"))
5562 return instr_jmp_gt_translate(p,
5569 if (!strcmp(tokens[tpos], "return"))
5570 return instr_return_translate(p,
5580 static struct instruction_data *
5581 label_find(struct instruction_data *data, uint32_t n, const char *label)
5585 for (i = 0; i < n; i++)
5586 if (!strcmp(label, data[i].label))
5593 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
5595 uint32_t count = 0, i;
5600 for (i = 0; i < n; i++)
5601 if (!strcmp(label, data[i].jmp_label))
5608 instr_label_check(struct instruction_data *instruction_data,
5609 uint32_t n_instructions)
5613 /* Check that all instruction labels are unique. */
5614 for (i = 0; i < n_instructions; i++) {
5615 struct instruction_data *data = &instruction_data[i];
5616 char *label = data->label;
5622 for (j = i + 1; j < n_instructions; j++)
5623 CHECK(strcmp(label, data[j].label), EINVAL);
5626 /* Get users for each instruction label. */
5627 for (i = 0; i < n_instructions; i++) {
5628 struct instruction_data *data = &instruction_data[i];
5629 char *label = data->label;
5631 data->n_users = label_is_used(instruction_data,
5640 instr_jmp_resolve(struct instruction *instructions,
5641 struct instruction_data *instruction_data,
5642 uint32_t n_instructions)
5646 for (i = 0; i < n_instructions; i++) {
5647 struct instruction *instr = &instructions[i];
5648 struct instruction_data *data = &instruction_data[i];
5649 struct instruction_data *found;
5651 if (!instruction_is_jmp(instr))
5654 found = label_find(instruction_data,
5657 CHECK(found, EINVAL);
5659 instr->jmp.ip = &instructions[found - instruction_data];
5666 instr_verify(struct rte_swx_pipeline *p __rte_unused,
5668 struct instruction *instr,
5669 struct instruction_data *data __rte_unused,
5670 uint32_t n_instructions)
5673 enum instruction_type type;
5676 /* Check that the first instruction is rx. */
5677 CHECK(instr[0].type == INSTR_RX, EINVAL);
5679 /* Check that there is at least one tx instruction. */
5680 for (i = 0; i < n_instructions; i++) {
5681 type = instr[i].type;
5683 if (type == INSTR_TX)
5686 CHECK(i < n_instructions, EINVAL);
5688 /* Check that the last instruction is either tx or unconditional
5691 type = instr[n_instructions - 1].type;
5692 CHECK((type == INSTR_TX) || (type == INSTR_JMP), EINVAL);
5696 enum instruction_type type;
5699 /* Check that there is at least one return or tx instruction. */
5700 for (i = 0; i < n_instructions; i++) {
5701 type = instr[i].type;
5703 if ((type == INSTR_RETURN) || (type == INSTR_TX))
5706 CHECK(i < n_instructions, EINVAL);
5713 instr_pattern_extract_many_detect(struct instruction *instr,
5714 struct instruction_data *data,
5716 uint32_t *n_pattern_instr)
5720 for (i = 0; i < n_instr; i++) {
5721 if (data[i].invalid)
5724 if (instr[i].type != INSTR_HDR_EXTRACT)
5727 if (i == RTE_DIM(instr->io.hdr.header_id))
5730 if (i && data[i].n_users)
5737 *n_pattern_instr = i;
5742 instr_pattern_extract_many_optimize(struct instruction *instr,
5743 struct instruction_data *data,
5748 for (i = 1; i < n_instr; i++) {
5750 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
5751 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
5752 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
5754 data[i].invalid = 1;
5759 instr_pattern_emit_many_tx_detect(struct instruction *instr,
5760 struct instruction_data *data,
5762 uint32_t *n_pattern_instr)
5766 for (i = 0; i < n_instr; i++) {
5767 if (data[i].invalid)
5770 if (instr[i].type != INSTR_HDR_EMIT)
5773 if (i == RTE_DIM(instr->io.hdr.header_id))
5776 if (i && data[i].n_users)
5783 if (instr[i].type != INSTR_TX)
5788 *n_pattern_instr = i;
5793 instr_pattern_emit_many_tx_optimize(struct instruction *instr,
5794 struct instruction_data *data,
5799 /* Any emit instruction in addition to the first one. */
5800 for (i = 1; i < n_instr - 1; i++) {
5802 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
5803 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
5804 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
5806 data[i].invalid = 1;
5809 /* The TX instruction is the last one in the pattern. */
5811 instr[0].io.io.offset = instr[i].io.io.offset;
5812 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
5813 data[i].invalid = 1;
5817 instr_pattern_dma_many_detect(struct instruction *instr,
5818 struct instruction_data *data,
5820 uint32_t *n_pattern_instr)
5824 for (i = 0; i < n_instr; i++) {
5825 if (data[i].invalid)
5828 if (instr[i].type != INSTR_DMA_HT)
5831 if (i == RTE_DIM(instr->dma.dst.header_id))
5834 if (i && data[i].n_users)
5841 *n_pattern_instr = i;
5846 instr_pattern_dma_many_optimize(struct instruction *instr,
5847 struct instruction_data *data,
5852 for (i = 1; i < n_instr; i++) {
5854 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
5855 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
5856 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
5857 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
5859 data[i].invalid = 1;
5864 instr_optimize(struct instruction *instructions,
5865 struct instruction_data *instruction_data,
5866 uint32_t n_instructions)
5868 uint32_t i, pos = 0;
5870 for (i = 0; i < n_instructions; ) {
5871 struct instruction *instr = &instructions[i];
5872 struct instruction_data *data = &instruction_data[i];
5873 uint32_t n_instr = 0;
5877 detected = instr_pattern_extract_many_detect(instr,
5882 instr_pattern_extract_many_optimize(instr,
5889 /* Emit many + TX. */
5890 detected = instr_pattern_emit_many_tx_detect(instr,
5895 instr_pattern_emit_many_tx_optimize(instr,
5903 detected = instr_pattern_dma_many_detect(instr,
5908 instr_pattern_dma_many_optimize(instr, data, n_instr);
5913 /* No pattern starting at the current instruction. */
5917 /* Eliminate the invalid instructions that have been optimized out. */
5918 for (i = 0; i < n_instructions; i++) {
5919 struct instruction *instr = &instructions[i];
5920 struct instruction_data *data = &instruction_data[i];
5926 memcpy(&instructions[pos], instr, sizeof(*instr));
5927 memcpy(&instruction_data[pos], data, sizeof(*data));
5937 instruction_config(struct rte_swx_pipeline *p,
5939 const char **instructions,
5940 uint32_t n_instructions)
5942 struct instruction *instr = NULL;
5943 struct instruction_data *data = NULL;
5947 CHECK(n_instructions, EINVAL);
5948 CHECK(instructions, EINVAL);
5949 for (i = 0; i < n_instructions; i++)
5950 CHECK_INSTRUCTION(instructions[i], EINVAL);
5952 /* Memory allocation. */
5953 instr = calloc(n_instructions, sizeof(struct instruction));
5959 data = calloc(n_instructions, sizeof(struct instruction_data));
5965 for (i = 0; i < n_instructions; i++) {
5966 char *string = strdup(instructions[i]);
5972 err = instr_translate(p, a, string, &instr[i], &data[i]);
5981 err = instr_label_check(data, n_instructions);
5985 err = instr_verify(p, a, instr, data, n_instructions);
5989 n_instructions = instr_optimize(instr, data, n_instructions);
5991 err = instr_jmp_resolve(instr, data, n_instructions);
5996 a->instructions = instr;
5997 a->n_instructions = n_instructions;
5999 p->instructions = instr;
6000 p->n_instructions = n_instructions;
6012 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
6014 static instr_exec_t instruction_table[] = {
6015 [INSTR_RX] = instr_rx_exec,
6016 [INSTR_TX] = instr_tx_exec,
6018 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
6019 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
6020 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
6021 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
6022 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
6023 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
6024 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
6025 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
6027 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
6028 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
6029 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
6030 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
6031 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
6032 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
6033 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
6034 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
6035 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
6037 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
6038 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
6040 [INSTR_MOV] = instr_mov_exec,
6041 [INSTR_MOV_S] = instr_mov_s_exec,
6042 [INSTR_MOV_I] = instr_mov_i_exec,
6044 [INSTR_DMA_HT] = instr_dma_ht_exec,
6045 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
6046 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
6047 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
6048 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
6049 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
6050 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
6051 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
6053 [INSTR_ALU_ADD] = instr_alu_add_exec,
6054 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
6055 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
6056 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
6057 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
6058 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
6060 [INSTR_ALU_SUB] = instr_alu_sub_exec,
6061 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
6062 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
6063 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
6064 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
6065 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
6067 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
6068 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
6069 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
6070 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
6072 [INSTR_ALU_AND] = instr_alu_and_exec,
6073 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
6074 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
6076 [INSTR_ALU_OR] = instr_alu_or_exec,
6077 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
6078 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
6080 [INSTR_ALU_XOR] = instr_alu_xor_exec,
6081 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
6082 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
6084 [INSTR_ALU_SHL] = instr_alu_shl_exec,
6085 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
6086 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
6087 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
6088 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
6089 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
6091 [INSTR_ALU_SHR] = instr_alu_shr_exec,
6092 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
6093 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
6094 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
6095 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
6096 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
6098 [INSTR_TABLE] = instr_table_exec,
6099 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
6100 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
6102 [INSTR_JMP] = instr_jmp_exec,
6103 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
6104 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
6105 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
6106 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
6107 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
6108 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
6110 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
6111 [INSTR_JMP_EQ_S] = instr_jmp_eq_s_exec,
6112 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
6114 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
6115 [INSTR_JMP_NEQ_S] = instr_jmp_neq_s_exec,
6116 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
6118 [INSTR_JMP_LT] = instr_jmp_lt_exec,
6119 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
6120 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
6121 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
6122 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
6123 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
6125 [INSTR_JMP_GT] = instr_jmp_gt_exec,
6126 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
6127 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
6128 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
6129 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
6130 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
6132 [INSTR_RETURN] = instr_return_exec,
6136 instr_exec(struct rte_swx_pipeline *p)
6138 struct thread *t = &p->threads[p->thread_id];
6139 struct instruction *ip = t->ip;
6140 instr_exec_t instr = instruction_table[ip->type];
6148 static struct action *
6149 action_find(struct rte_swx_pipeline *p, const char *name)
6151 struct action *elem;
6156 TAILQ_FOREACH(elem, &p->actions, node)
6157 if (strcmp(elem->name, name) == 0)
6163 static struct action *
6164 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6166 struct action *action = NULL;
6168 TAILQ_FOREACH(action, &p->actions, node)
6169 if (action->id == id)
6175 static struct field *
6176 action_field_find(struct action *a, const char *name)
6178 return a->st ? struct_type_field_find(a->st, name) : NULL;
6181 static struct field *
6182 action_field_parse(struct action *action, const char *name)
6184 if (name[0] != 't' || name[1] != '.')
6187 return action_field_find(action, &name[2]);
6191 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
6193 const char *args_struct_type_name,
6194 const char **instructions,
6195 uint32_t n_instructions)
6197 struct struct_type *args_struct_type;
6203 CHECK_NAME(name, EINVAL);
6204 CHECK(!action_find(p, name), EEXIST);
6206 if (args_struct_type_name) {
6207 CHECK_NAME(args_struct_type_name, EINVAL);
6208 args_struct_type = struct_type_find(p, args_struct_type_name);
6209 CHECK(args_struct_type, EINVAL);
6211 args_struct_type = NULL;
6214 /* Node allocation. */
6215 a = calloc(1, sizeof(struct action));
6218 /* Node initialization. */
6219 strcpy(a->name, name);
6220 a->st = args_struct_type;
6221 a->id = p->n_actions;
6223 /* Instruction translation. */
6224 err = instruction_config(p, a, instructions, n_instructions);
6230 /* Node add to tailq. */
6231 TAILQ_INSERT_TAIL(&p->actions, a, node);
6238 action_build(struct rte_swx_pipeline *p)
6240 struct action *action;
6242 p->action_instructions = calloc(p->n_actions,
6243 sizeof(struct instruction *));
6244 CHECK(p->action_instructions, ENOMEM);
6246 TAILQ_FOREACH(action, &p->actions, node)
6247 p->action_instructions[action->id] = action->instructions;
6253 action_build_free(struct rte_swx_pipeline *p)
6255 free(p->action_instructions);
6256 p->action_instructions = NULL;
6260 action_free(struct rte_swx_pipeline *p)
6262 action_build_free(p);
6265 struct action *action;
6267 action = TAILQ_FIRST(&p->actions);
6271 TAILQ_REMOVE(&p->actions, action, node);
6272 free(action->instructions);
6280 static struct table_type *
6281 table_type_find(struct rte_swx_pipeline *p, const char *name)
6283 struct table_type *elem;
6285 TAILQ_FOREACH(elem, &p->table_types, node)
6286 if (strcmp(elem->name, name) == 0)
6292 static struct table_type *
6293 table_type_resolve(struct rte_swx_pipeline *p,
6294 const char *recommended_type_name,
6295 enum rte_swx_table_match_type match_type)
6297 struct table_type *elem;
6299 /* Only consider the recommended type if the match type is correct. */
6300 if (recommended_type_name)
6301 TAILQ_FOREACH(elem, &p->table_types, node)
6302 if (!strcmp(elem->name, recommended_type_name) &&
6303 (elem->match_type == match_type))
6306 /* Ignore the recommended type and get the first element with this match
6309 TAILQ_FOREACH(elem, &p->table_types, node)
6310 if (elem->match_type == match_type)
6316 static struct table *
6317 table_find(struct rte_swx_pipeline *p, const char *name)
6321 TAILQ_FOREACH(elem, &p->tables, node)
6322 if (strcmp(elem->name, name) == 0)
6328 static struct table *
6329 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6331 struct table *table = NULL;
6333 TAILQ_FOREACH(table, &p->tables, node)
6334 if (table->id == id)
6341 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
6343 enum rte_swx_table_match_type match_type,
6344 struct rte_swx_table_ops *ops)
6346 struct table_type *elem;
6350 CHECK_NAME(name, EINVAL);
6351 CHECK(!table_type_find(p, name), EEXIST);
6354 CHECK(ops->create, EINVAL);
6355 CHECK(ops->lkp, EINVAL);
6356 CHECK(ops->free, EINVAL);
6358 /* Node allocation. */
6359 elem = calloc(1, sizeof(struct table_type));
6360 CHECK(elem, ENOMEM);
6362 /* Node initialization. */
6363 strcpy(elem->name, name);
6364 elem->match_type = match_type;
6365 memcpy(&elem->ops, ops, sizeof(*ops));
6367 /* Node add to tailq. */
6368 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
6373 static enum rte_swx_table_match_type
6374 table_match_type_resolve(struct rte_swx_match_field_params *fields,
6379 for (i = 0; i < n_fields; i++)
6380 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
6384 return RTE_SWX_TABLE_MATCH_EXACT;
6386 if ((i == n_fields - 1) &&
6387 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
6388 return RTE_SWX_TABLE_MATCH_LPM;
6390 return RTE_SWX_TABLE_MATCH_WILDCARD;
6394 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
6396 struct rte_swx_pipeline_table_params *params,
6397 const char *recommended_table_type_name,
6401 struct table_type *type;
6403 struct action *default_action;
6404 struct header *header = NULL;
6406 uint32_t offset_prev = 0, action_data_size_max = 0, i;
6410 CHECK_NAME(name, EINVAL);
6411 CHECK(!table_find(p, name), EEXIST);
6413 CHECK(params, EINVAL);
6416 CHECK(!params->n_fields || params->fields, EINVAL);
6417 for (i = 0; i < params->n_fields; i++) {
6418 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6420 struct field *hf, *mf;
6423 CHECK_NAME(field->name, EINVAL);
6425 hf = header_field_parse(p, field->name, &h);
6426 mf = metadata_field_parse(p, field->name);
6427 CHECK(hf || mf, EINVAL);
6429 offset = hf ? hf->offset : mf->offset;
6432 is_header = hf ? 1 : 0;
6433 header = hf ? h : NULL;
6434 offset_prev = offset;
6439 CHECK((is_header && hf && (h->id == header->id)) ||
6440 (!is_header && mf), EINVAL);
6442 CHECK(offset > offset_prev, EINVAL);
6443 offset_prev = offset;
6446 /* Action checks. */
6447 CHECK(params->n_actions, EINVAL);
6448 CHECK(params->action_names, EINVAL);
6449 for (i = 0; i < params->n_actions; i++) {
6450 const char *action_name = params->action_names[i];
6452 uint32_t action_data_size;
6454 CHECK_NAME(action_name, EINVAL);
6456 a = action_find(p, action_name);
6459 action_data_size = a->st ? a->st->n_bits / 8 : 0;
6460 if (action_data_size > action_data_size_max)
6461 action_data_size_max = action_data_size;
6464 CHECK_NAME(params->default_action_name, EINVAL);
6465 for (i = 0; i < p->n_actions; i++)
6466 if (!strcmp(params->action_names[i],
6467 params->default_action_name))
6469 CHECK(i < params->n_actions, EINVAL);
6470 default_action = action_find(p, params->default_action_name);
6471 CHECK((default_action->st && params->default_action_data) ||
6472 !params->default_action_data, EINVAL);
6474 /* Table type checks. */
6475 if (recommended_table_type_name)
6476 CHECK_NAME(recommended_table_type_name, EINVAL);
6478 if (params->n_fields) {
6479 enum rte_swx_table_match_type match_type;
6481 match_type = table_match_type_resolve(params->fields,
6483 type = table_type_resolve(p,
6484 recommended_table_type_name,
6486 CHECK(type, EINVAL);
6491 /* Memory allocation. */
6492 t = calloc(1, sizeof(struct table));
6495 t->fields = calloc(params->n_fields, sizeof(struct match_field));
6501 t->actions = calloc(params->n_actions, sizeof(struct action *));
6508 if (action_data_size_max) {
6509 t->default_action_data = calloc(1, action_data_size_max);
6510 if (!t->default_action_data) {
6518 /* Node initialization. */
6519 strcpy(t->name, name);
6520 if (args && args[0])
6521 strcpy(t->args, args);
6524 for (i = 0; i < params->n_fields; i++) {
6525 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6526 struct match_field *f = &t->fields[i];
6528 f->match_type = field->match_type;
6529 f->field = is_header ?
6530 header_field_parse(p, field->name, NULL) :
6531 metadata_field_parse(p, field->name);
6533 t->n_fields = params->n_fields;
6534 t->is_header = is_header;
6537 for (i = 0; i < params->n_actions; i++)
6538 t->actions[i] = action_find(p, params->action_names[i]);
6539 t->default_action = default_action;
6540 if (default_action->st)
6541 memcpy(t->default_action_data,
6542 params->default_action_data,
6543 default_action->st->n_bits / 8);
6544 t->n_actions = params->n_actions;
6545 t->default_action_is_const = params->default_action_is_const;
6546 t->action_data_size_max = action_data_size_max;
6549 t->id = p->n_tables;
6551 /* Node add to tailq. */
6552 TAILQ_INSERT_TAIL(&p->tables, t, node);
6558 static struct rte_swx_table_params *
6559 table_params_get(struct table *table)
6561 struct rte_swx_table_params *params;
6562 struct field *first, *last;
6564 uint32_t key_size, key_offset, action_data_size, i;
6566 /* Memory allocation. */
6567 params = calloc(1, sizeof(struct rte_swx_table_params));
6571 /* Key offset and size. */
6572 first = table->fields[0].field;
6573 last = table->fields[table->n_fields - 1].field;
6574 key_offset = first->offset / 8;
6575 key_size = (last->offset + last->n_bits - first->offset) / 8;
6577 /* Memory allocation. */
6578 key_mask = calloc(1, key_size);
6585 for (i = 0; i < table->n_fields; i++) {
6586 struct field *f = table->fields[i].field;
6587 uint32_t start = (f->offset - first->offset) / 8;
6588 size_t size = f->n_bits / 8;
6590 memset(&key_mask[start], 0xFF, size);
6593 /* Action data size. */
6594 action_data_size = 0;
6595 for (i = 0; i < table->n_actions; i++) {
6596 struct action *action = table->actions[i];
6597 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
6599 if (ads > action_data_size)
6600 action_data_size = ads;
6604 params->match_type = table->type->match_type;
6605 params->key_size = key_size;
6606 params->key_offset = key_offset;
6607 params->key_mask0 = key_mask;
6608 params->action_data_size = action_data_size;
6609 params->n_keys_max = table->size;
6615 table_params_free(struct rte_swx_table_params *params)
6620 free(params->key_mask0);
6625 table_state_build(struct rte_swx_pipeline *p)
6627 struct table *table;
6629 p->table_state = calloc(p->n_tables,
6630 sizeof(struct rte_swx_table_state));
6631 CHECK(p->table_state, ENOMEM);
6633 TAILQ_FOREACH(table, &p->tables, node) {
6634 struct rte_swx_table_state *ts = &p->table_state[table->id];
6637 struct rte_swx_table_params *params;
6640 params = table_params_get(table);
6641 CHECK(params, ENOMEM);
6643 ts->obj = table->type->ops.create(params,
6648 table_params_free(params);
6649 CHECK(ts->obj, ENODEV);
6652 /* ts->default_action_data. */
6653 if (table->action_data_size_max) {
6654 ts->default_action_data =
6655 malloc(table->action_data_size_max);
6656 CHECK(ts->default_action_data, ENOMEM);
6658 memcpy(ts->default_action_data,
6659 table->default_action_data,
6660 table->action_data_size_max);
6663 /* ts->default_action_id. */
6664 ts->default_action_id = table->default_action->id;
6671 table_state_build_free(struct rte_swx_pipeline *p)
6675 if (!p->table_state)
6678 for (i = 0; i < p->n_tables; i++) {
6679 struct rte_swx_table_state *ts = &p->table_state[i];
6680 struct table *table = table_find_by_id(p, i);
6683 if (table->type && ts->obj)
6684 table->type->ops.free(ts->obj);
6686 /* ts->default_action_data. */
6687 free(ts->default_action_data);
6690 free(p->table_state);
6691 p->table_state = NULL;
6695 table_state_free(struct rte_swx_pipeline *p)
6697 table_state_build_free(p);
6701 table_stub_lkp(void *table __rte_unused,
6702 void *mailbox __rte_unused,
6703 uint8_t **key __rte_unused,
6704 uint64_t *action_id __rte_unused,
6705 uint8_t **action_data __rte_unused,
6709 return 1; /* DONE. */
6713 table_build(struct rte_swx_pipeline *p)
6717 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6718 struct thread *t = &p->threads[i];
6719 struct table *table;
6721 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
6722 CHECK(t->tables, ENOMEM);
6724 TAILQ_FOREACH(table, &p->tables, node) {
6725 struct table_runtime *r = &t->tables[table->id];
6730 size = table->type->ops.mailbox_size_get();
6733 r->func = table->type->ops.lkp;
6737 r->mailbox = calloc(1, size);
6738 CHECK(r->mailbox, ENOMEM);
6742 r->key = table->is_header ?
6743 &t->structs[table->header->struct_id] :
6744 &t->structs[p->metadata_struct_id];
6746 r->func = table_stub_lkp;
6755 table_build_free(struct rte_swx_pipeline *p)
6759 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6760 struct thread *t = &p->threads[i];
6766 for (j = 0; j < p->n_tables; j++) {
6767 struct table_runtime *r = &t->tables[j];
6778 table_free(struct rte_swx_pipeline *p)
6780 table_build_free(p);
6786 elem = TAILQ_FIRST(&p->tables);
6790 TAILQ_REMOVE(&p->tables, elem, node);
6792 free(elem->actions);
6793 free(elem->default_action_data);
6799 struct table_type *elem;
6801 elem = TAILQ_FIRST(&p->table_types);
6805 TAILQ_REMOVE(&p->table_types, elem, node);
6814 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
6816 struct rte_swx_pipeline *pipeline;
6818 /* Check input parameters. */
6821 /* Memory allocation. */
6822 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
6823 CHECK(pipeline, ENOMEM);
6825 /* Initialization. */
6826 TAILQ_INIT(&pipeline->struct_types);
6827 TAILQ_INIT(&pipeline->port_in_types);
6828 TAILQ_INIT(&pipeline->ports_in);
6829 TAILQ_INIT(&pipeline->port_out_types);
6830 TAILQ_INIT(&pipeline->ports_out);
6831 TAILQ_INIT(&pipeline->extern_types);
6832 TAILQ_INIT(&pipeline->extern_objs);
6833 TAILQ_INIT(&pipeline->extern_funcs);
6834 TAILQ_INIT(&pipeline->headers);
6835 TAILQ_INIT(&pipeline->actions);
6836 TAILQ_INIT(&pipeline->table_types);
6837 TAILQ_INIT(&pipeline->tables);
6839 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
6840 pipeline->numa_node = numa_node;
6847 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
6852 free(p->instructions);
6854 table_state_free(p);
6859 extern_func_free(p);
6869 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
6870 const char **instructions,
6871 uint32_t n_instructions)
6876 err = instruction_config(p, NULL, instructions, n_instructions);
6880 /* Thread instruction pointer reset. */
6881 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6882 struct thread *t = &p->threads[i];
6884 thread_ip_reset(p, t);
6891 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
6896 CHECK(p->build_done == 0, EEXIST);
6898 status = port_in_build(p);
6902 status = port_out_build(p);
6906 status = struct_build(p);
6910 status = extern_obj_build(p);
6914 status = extern_func_build(p);
6918 status = header_build(p);
6922 status = metadata_build(p);
6926 status = action_build(p);
6930 status = table_build(p);
6934 status = table_state_build(p);
6942 table_state_build_free(p);
6943 table_build_free(p);
6944 action_build_free(p);
6945 metadata_build_free(p);
6946 header_build_free(p);
6947 extern_func_build_free(p);
6948 extern_obj_build_free(p);
6949 port_out_build_free(p);
6950 port_in_build_free(p);
6951 struct_build_free(p);
6957 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
6961 for (i = 0; i < n_instructions; i++)
6966 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
6970 for (i = 0; i < p->n_ports_out; i++) {
6971 struct port_out_runtime *port = &p->out[i];
6974 port->flush(port->obj);
6982 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
6983 struct rte_swx_ctl_pipeline_info *pipeline)
6985 struct action *action;
6986 struct table *table;
6987 uint32_t n_actions = 0, n_tables = 0;
6989 if (!p || !pipeline)
6992 TAILQ_FOREACH(action, &p->actions, node)
6995 TAILQ_FOREACH(table, &p->tables, node)
6998 pipeline->n_ports_in = p->n_ports_in;
6999 pipeline->n_ports_out = p->n_ports_out;
7000 pipeline->n_actions = n_actions;
7001 pipeline->n_tables = n_tables;
7007 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
7009 if (!p || !numa_node)
7012 *numa_node = p->numa_node;
7017 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
7019 struct rte_swx_ctl_action_info *action)
7021 struct action *a = NULL;
7023 if (!p || (action_id >= p->n_actions) || !action)
7026 a = action_find_by_id(p, action_id);
7030 strcpy(action->name, a->name);
7031 action->n_args = a->st ? a->st->n_fields : 0;
7036 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
7038 uint32_t action_arg_id,
7039 struct rte_swx_ctl_action_arg_info *action_arg)
7041 struct action *a = NULL;
7042 struct field *arg = NULL;
7044 if (!p || (action_id >= p->n_actions) || !action_arg)
7047 a = action_find_by_id(p, action_id);
7048 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
7051 arg = &a->st->fields[action_arg_id];
7052 strcpy(action_arg->name, arg->name);
7053 action_arg->n_bits = arg->n_bits;
7059 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
7061 struct rte_swx_ctl_table_info *table)
7063 struct table *t = NULL;
7068 t = table_find_by_id(p, table_id);
7072 strcpy(table->name, t->name);
7073 strcpy(table->args, t->args);
7074 table->n_match_fields = t->n_fields;
7075 table->n_actions = t->n_actions;
7076 table->default_action_is_const = t->default_action_is_const;
7077 table->size = t->size;
7082 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
7084 uint32_t match_field_id,
7085 struct rte_swx_ctl_table_match_field_info *match_field)
7088 struct match_field *f;
7090 if (!p || (table_id >= p->n_tables) || !match_field)
7093 t = table_find_by_id(p, table_id);
7094 if (!t || (match_field_id >= t->n_fields))
7097 f = &t->fields[match_field_id];
7098 match_field->match_type = f->match_type;
7099 match_field->is_header = t->is_header;
7100 match_field->n_bits = f->field->n_bits;
7101 match_field->offset = f->field->offset;
7107 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
7109 uint32_t table_action_id,
7110 struct rte_swx_ctl_table_action_info *table_action)
7114 if (!p || (table_id >= p->n_tables) || !table_action)
7117 t = table_find_by_id(p, table_id);
7118 if (!t || (table_action_id >= t->n_actions))
7121 table_action->action_id = t->actions[table_action_id]->id;
7127 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
7129 struct rte_swx_table_ops *table_ops,
7134 if (!p || (table_id >= p->n_tables))
7137 t = table_find_by_id(p, table_id);
7143 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
7153 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
7154 struct rte_swx_table_state **table_state)
7156 if (!p || !table_state || !p->build_done)
7159 *table_state = p->table_state;
7164 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
7165 struct rte_swx_table_state *table_state)
7167 if (!p || !table_state || !p->build_done)
7170 p->table_state = table_state;
7175 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
7177 struct rte_swx_port_in_stats *stats)
7179 struct port_in *port;
7184 port = port_in_find(p, port_id);
7188 port->type->ops.stats_read(port->obj, stats);
7193 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
7195 struct rte_swx_port_out_stats *stats)
7197 struct port_out *port;
7202 port = port_out_find(p, port_id);
7206 port->type->ops.stats_read(port->obj, stats);