1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <arpa/inet.h>
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
16 #include "rte_swx_pipeline.h"
17 #include "rte_swx_ctl.h"
19 #define CHECK(condition, err_code) \
25 #define CHECK_NAME(name, err_code) \
28 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
31 #define CHECK_INSTRUCTION(instr, err_code) \
34 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
35 RTE_SWX_INSTRUCTION_SIZE), \
43 #define TRACE(...) printf(__VA_ARGS__)
48 #define ntoh64(x) rte_be_to_cpu_64(x)
49 #define hton64(x) rte_cpu_to_be_64(x)
55 char name[RTE_SWX_NAME_SIZE];
61 TAILQ_ENTRY(struct_type) node;
62 char name[RTE_SWX_NAME_SIZE];
68 TAILQ_HEAD(struct_type_tailq, struct_type);
74 TAILQ_ENTRY(port_in_type) node;
75 char name[RTE_SWX_NAME_SIZE];
76 struct rte_swx_port_in_ops ops;
79 TAILQ_HEAD(port_in_type_tailq, port_in_type);
82 TAILQ_ENTRY(port_in) node;
83 struct port_in_type *type;
88 TAILQ_HEAD(port_in_tailq, port_in);
90 struct port_in_runtime {
91 rte_swx_port_in_pkt_rx_t pkt_rx;
98 struct port_out_type {
99 TAILQ_ENTRY(port_out_type) node;
100 char name[RTE_SWX_NAME_SIZE];
101 struct rte_swx_port_out_ops ops;
104 TAILQ_HEAD(port_out_type_tailq, port_out_type);
107 TAILQ_ENTRY(port_out) node;
108 struct port_out_type *type;
113 TAILQ_HEAD(port_out_tailq, port_out);
115 struct port_out_runtime {
116 rte_swx_port_out_pkt_tx_t pkt_tx;
117 rte_swx_port_out_flush_t flush;
124 struct extern_type_member_func {
125 TAILQ_ENTRY(extern_type_member_func) node;
126 char name[RTE_SWX_NAME_SIZE];
127 rte_swx_extern_type_member_func_t func;
131 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
134 TAILQ_ENTRY(extern_type) node;
135 char name[RTE_SWX_NAME_SIZE];
136 struct struct_type *mailbox_struct_type;
137 rte_swx_extern_type_constructor_t constructor;
138 rte_swx_extern_type_destructor_t destructor;
139 struct extern_type_member_func_tailq funcs;
143 TAILQ_HEAD(extern_type_tailq, extern_type);
146 TAILQ_ENTRY(extern_obj) node;
147 char name[RTE_SWX_NAME_SIZE];
148 struct extern_type *type;
154 TAILQ_HEAD(extern_obj_tailq, extern_obj);
156 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
157 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
160 struct extern_obj_runtime {
163 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
170 TAILQ_ENTRY(extern_func) node;
171 char name[RTE_SWX_NAME_SIZE];
172 struct struct_type *mailbox_struct_type;
173 rte_swx_extern_func_t func;
178 TAILQ_HEAD(extern_func_tailq, extern_func);
180 struct extern_func_runtime {
182 rte_swx_extern_func_t func;
189 TAILQ_ENTRY(header) node;
190 char name[RTE_SWX_NAME_SIZE];
191 struct struct_type *st;
196 TAILQ_HEAD(header_tailq, header);
198 struct header_runtime {
202 struct header_out_runtime {
212 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
213 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
214 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
215 * when transferred to packet meta-data and in NBO when transferred to packet
219 /* Notation conventions:
220 * -Header field: H = h.header.field (dst/src)
221 * -Meta-data field: M = m.field (dst/src)
222 * -Extern object mailbox field: E = e.field (dst/src)
223 * -Extern function mailbox field: F = f.field (dst/src)
224 * -Table action data field: T = t.field (src only)
225 * -Immediate value: I = 32-bit unsigned value (src only)
228 enum instruction_type {
235 /* extract h.header */
256 /* validate h.header */
259 /* invalidate h.header */
260 INSTR_HDR_INVALIDATE,
264 * dst = HMEF, src = HMEFTI
266 INSTR_MOV, /* dst = MEF, src = MEFT */
267 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
268 INSTR_MOV_I, /* dst = HMEF, src = I */
270 /* dma h.header t.field
271 * memcpy(h.header, t.field, sizeof(h.header))
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
287 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
288 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
289 INSTR_ALU_ADD_HH, /* dst = H, src = H */
290 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
291 INSTR_ALU_ADD_HI, /* dst = H, src = I */
295 * dst = HMEF, src = HMEFTI
297 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
298 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
299 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
300 INSTR_ALU_SUB_HH, /* dst = H, src = H */
301 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
302 INSTR_ALU_SUB_HI, /* dst = H, src = I */
305 * dst = dst '+ src[0:1] '+ src[2:3] + ...
306 * dst = H, src = {H, h.header}
308 INSTR_ALU_CKADD_FIELD, /* src = H */
309 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
310 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
316 INSTR_ALU_CKSUB_FIELD,
320 * dst = HMEF, src = HMEFTI
322 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
323 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
324 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
328 * dst = HMEF, src = HMEFTI
330 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
331 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
332 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
336 * dst = HMEF, src = HMEFTI
338 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
339 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
340 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
344 * dst = HMEF, src = HMEFTI
346 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
347 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
348 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
349 INSTR_ALU_SHL_HH, /* dst = H, src = H */
350 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
351 INSTR_ALU_SHL_HI, /* dst = H, src = I */
355 * dst = HMEF, src = HMEFTI
357 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
358 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
359 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
360 INSTR_ALU_SHR_HH, /* dst = H, src = H */
361 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
362 INSTR_ALU_SHR_HI, /* dst = H, src = I */
367 /* extern e.obj.func */
378 /* jmpv LABEL h.header
379 * Jump if header is valid
383 /* jmpnv LABEL h.header
384 * Jump if header is invalid
389 * Jump if table lookup hit
394 * Jump if table lookup miss
401 INSTR_JMP_ACTION_HIT,
403 /* jmpna LABEL ACTION
404 * Jump if action not run
406 INSTR_JMP_ACTION_MISS,
409 * Jump is a is equal to b
410 * a = HMEFT, b = HMEFTI
412 INSTR_JMP_EQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
413 INSTR_JMP_EQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
414 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
417 * Jump is a is not equal to b
418 * a = HMEFT, b = HMEFTI
420 INSTR_JMP_NEQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
421 INSTR_JMP_NEQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
422 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
425 * Jump if a is less than b
426 * a = HMEFT, b = HMEFTI
428 INSTR_JMP_LT, /* a = MEF, b = MEF */
429 INSTR_JMP_LT_MH, /* a = MEF, b = H */
430 INSTR_JMP_LT_HM, /* a = H, b = MEF */
431 INSTR_JMP_LT_HH, /* a = H, b = H */
432 INSTR_JMP_LT_MI, /* a = MEF, b = I */
433 INSTR_JMP_LT_HI, /* a = H, b = I */
436 * Jump if a is greater than b
437 * a = HMEFT, b = HMEFTI
439 INSTR_JMP_GT, /* a = MEF, b = MEF */
440 INSTR_JMP_GT_MH, /* a = MEF, b = H */
441 INSTR_JMP_GT_HM, /* a = H, b = MEF */
442 INSTR_JMP_GT_HH, /* a = H, b = H */
443 INSTR_JMP_GT_MI, /* a = MEF, b = I */
444 INSTR_JMP_GT_HI, /* a = H, b = I */
452 struct instr_operand {
467 uint8_t header_id[8];
468 uint8_t struct_id[8];
473 struct instr_hdr_validity {
481 struct instr_extern_obj {
486 struct instr_extern_func {
490 struct instr_dst_src {
491 struct instr_operand dst;
493 struct instr_operand src;
500 uint8_t header_id[8];
501 uint8_t struct_id[8];
512 struct instruction *ip;
515 struct instr_operand a;
521 struct instr_operand b;
527 enum instruction_type type;
530 struct instr_hdr_validity valid;
531 struct instr_dst_src mov;
532 struct instr_dma dma;
533 struct instr_dst_src alu;
534 struct instr_table table;
535 struct instr_extern_obj ext_obj;
536 struct instr_extern_func ext_func;
537 struct instr_jmp jmp;
541 struct instruction_data {
542 char label[RTE_SWX_NAME_SIZE];
543 char jmp_label[RTE_SWX_NAME_SIZE];
544 uint32_t n_users; /* user = jmp instruction to this instruction. */
552 TAILQ_ENTRY(action) node;
553 char name[RTE_SWX_NAME_SIZE];
554 struct struct_type *st;
555 struct instruction *instructions;
556 uint32_t n_instructions;
560 TAILQ_HEAD(action_tailq, action);
566 TAILQ_ENTRY(table_type) node;
567 char name[RTE_SWX_NAME_SIZE];
568 enum rte_swx_table_match_type match_type;
569 struct rte_swx_table_ops ops;
572 TAILQ_HEAD(table_type_tailq, table_type);
575 enum rte_swx_table_match_type match_type;
580 TAILQ_ENTRY(table) node;
581 char name[RTE_SWX_NAME_SIZE];
582 char args[RTE_SWX_NAME_SIZE];
583 struct table_type *type; /* NULL when n_fields == 0. */
586 struct match_field *fields;
588 int is_header; /* Only valid when n_fields > 0. */
589 struct header *header; /* Only valid when n_fields > 0. */
592 struct action **actions;
593 struct action *default_action;
594 uint8_t *default_action_data;
596 int default_action_is_const;
597 uint32_t action_data_size_max;
603 TAILQ_HEAD(table_tailq, table);
605 struct table_runtime {
606 rte_swx_table_lookup_t func;
616 struct rte_swx_pkt pkt;
622 /* Packet headers. */
623 struct header_runtime *headers; /* Extracted or generated headers. */
624 struct header_out_runtime *headers_out; /* Emitted headers. */
625 uint8_t *header_storage;
626 uint8_t *header_out_storage;
627 uint64_t valid_headers;
628 uint32_t n_headers_out;
630 /* Packet meta-data. */
634 struct table_runtime *tables;
635 struct rte_swx_table_state *table_state;
637 int hit; /* 0 = Miss, 1 = Hit. */
639 /* Extern objects and functions. */
640 struct extern_obj_runtime *extern_objs;
641 struct extern_func_runtime *extern_funcs;
644 struct instruction *ip;
645 struct instruction *ret;
648 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
649 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
650 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
652 #define HEADER_VALID(thread, header_id) \
653 MASK64_BIT_GET((thread)->valid_headers, header_id)
655 #define ALU(thread, ip, operator) \
657 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
658 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
659 uint64_t dst64 = *dst64_ptr; \
660 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
661 uint64_t dst = dst64 & dst64_mask; \
663 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
664 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
665 uint64_t src64 = *src64_ptr; \
666 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
667 uint64_t src = src64 & src64_mask; \
669 uint64_t result = dst operator src; \
671 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
674 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
676 #define ALU_S(thread, ip, operator) \
678 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
679 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
680 uint64_t dst64 = *dst64_ptr; \
681 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
682 uint64_t dst = dst64 & dst64_mask; \
684 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
685 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
686 uint64_t src64 = *src64_ptr; \
687 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
689 uint64_t result = dst operator src; \
691 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
696 #define ALU_HM(thread, ip, operator) \
698 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
699 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
700 uint64_t dst64 = *dst64_ptr; \
701 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
702 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
704 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
705 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
706 uint64_t src64 = *src64_ptr; \
707 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
708 uint64_t src = src64 & src64_mask; \
710 uint64_t result = dst operator src; \
711 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
713 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
716 #define ALU_HH(thread, ip, operator) \
718 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
719 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
720 uint64_t dst64 = *dst64_ptr; \
721 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
722 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
724 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
725 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
726 uint64_t src64 = *src64_ptr; \
727 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
729 uint64_t result = dst operator src; \
730 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
732 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
744 #define ALU_I(thread, ip, operator) \
746 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
747 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
748 uint64_t dst64 = *dst64_ptr; \
749 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
750 uint64_t dst = dst64 & dst64_mask; \
752 uint64_t src = (ip)->alu.src_val; \
754 uint64_t result = dst operator src; \
756 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
761 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
763 #define ALU_HI(thread, ip, operator) \
765 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
766 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
767 uint64_t dst64 = *dst64_ptr; \
768 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
769 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
771 uint64_t src = (ip)->alu.src_val; \
773 uint64_t result = dst operator src; \
774 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
776 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
785 #define MOV(thread, ip) \
787 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
788 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
789 uint64_t dst64 = *dst64_ptr; \
790 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
792 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
793 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
794 uint64_t src64 = *src64_ptr; \
795 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
796 uint64_t src = src64 & src64_mask; \
798 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
801 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
803 #define MOV_S(thread, ip) \
805 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
806 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
807 uint64_t dst64 = *dst64_ptr; \
808 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
810 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
811 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
812 uint64_t src64 = *src64_ptr; \
813 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
815 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
824 #define MOV_I(thread, ip) \
826 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
827 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
828 uint64_t dst64 = *dst64_ptr; \
829 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
831 uint64_t src = (ip)->mov.src_val; \
833 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
836 #define JMP_CMP(thread, ip, operator) \
838 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
839 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
840 uint64_t a64 = *a64_ptr; \
841 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
842 uint64_t a = a64 & a64_mask; \
844 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
845 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
846 uint64_t b64 = *b64_ptr; \
847 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
848 uint64_t b = b64 & b64_mask; \
850 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
853 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
855 #define JMP_CMP_S(thread, ip, operator) \
857 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
858 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
859 uint64_t a64 = *a64_ptr; \
860 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
861 uint64_t a = a64 & a64_mask; \
863 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
864 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
865 uint64_t b64 = *b64_ptr; \
866 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
868 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
871 #define JMP_CMP_MH JMP_CMP_S
873 #define JMP_CMP_HM(thread, ip, operator) \
875 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
876 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
877 uint64_t a64 = *a64_ptr; \
878 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
880 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
881 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
882 uint64_t b64 = *b64_ptr; \
883 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
884 uint64_t b = b64 & b64_mask; \
886 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
889 #define JMP_CMP_HH(thread, ip, operator) \
891 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
892 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
893 uint64_t a64 = *a64_ptr; \
894 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
896 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
897 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
898 uint64_t b64 = *b64_ptr; \
899 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
901 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
906 #define JMP_CMP_S JMP_CMP
907 #define JMP_CMP_MH JMP_CMP
908 #define JMP_CMP_HM JMP_CMP
909 #define JMP_CMP_HH JMP_CMP
913 #define JMP_CMP_I(thread, ip, operator) \
915 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
916 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
917 uint64_t a64 = *a64_ptr; \
918 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
919 uint64_t a = a64 & a64_mask; \
921 uint64_t b = (ip)->jmp.b_val; \
923 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
926 #define JMP_CMP_MI JMP_CMP_I
928 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
930 #define JMP_CMP_HI(thread, ip, operator) \
932 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
933 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
934 uint64_t a64 = *a64_ptr; \
935 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
937 uint64_t b = (ip)->jmp.b_val; \
939 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
944 #define JMP_CMP_HI JMP_CMP_I
948 #define METADATA_READ(thread, offset, n_bits) \
950 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
951 uint64_t m64 = *m64_ptr; \
952 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
956 #define METADATA_WRITE(thread, offset, n_bits, value) \
958 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
959 uint64_t m64 = *m64_ptr; \
960 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
962 uint64_t m_new = value; \
964 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
967 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
968 #define RTE_SWX_PIPELINE_THREADS_MAX 16
971 struct rte_swx_pipeline {
972 struct struct_type_tailq struct_types;
973 struct port_in_type_tailq port_in_types;
974 struct port_in_tailq ports_in;
975 struct port_out_type_tailq port_out_types;
976 struct port_out_tailq ports_out;
977 struct extern_type_tailq extern_types;
978 struct extern_obj_tailq extern_objs;
979 struct extern_func_tailq extern_funcs;
980 struct header_tailq headers;
981 struct struct_type *metadata_st;
982 uint32_t metadata_struct_id;
983 struct action_tailq actions;
984 struct table_type_tailq table_types;
985 struct table_tailq tables;
987 struct port_in_runtime *in;
988 struct port_out_runtime *out;
989 struct instruction **action_instructions;
990 struct rte_swx_table_state *table_state;
991 struct instruction *instructions;
992 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
996 uint32_t n_ports_out;
997 uint32_t n_extern_objs;
998 uint32_t n_extern_funcs;
1004 uint32_t n_instructions;
1012 static struct struct_type *
1013 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1015 struct struct_type *elem;
1017 TAILQ_FOREACH(elem, &p->struct_types, node)
1018 if (strcmp(elem->name, name) == 0)
1024 static struct field *
1025 struct_type_field_find(struct struct_type *st, const char *name)
1029 for (i = 0; i < st->n_fields; i++) {
1030 struct field *f = &st->fields[i];
1032 if (strcmp(f->name, name) == 0)
1040 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1042 struct rte_swx_field_params *fields,
1045 struct struct_type *st;
1049 CHECK_NAME(name, EINVAL);
1050 CHECK(fields, EINVAL);
1051 CHECK(n_fields, EINVAL);
1053 for (i = 0; i < n_fields; i++) {
1054 struct rte_swx_field_params *f = &fields[i];
1057 CHECK_NAME(f->name, EINVAL);
1058 CHECK(f->n_bits, EINVAL);
1059 CHECK(f->n_bits <= 64, EINVAL);
1060 CHECK((f->n_bits & 7) == 0, EINVAL);
1062 for (j = 0; j < i; j++) {
1063 struct rte_swx_field_params *f_prev = &fields[j];
1065 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1069 CHECK(!struct_type_find(p, name), EEXIST);
1071 /* Node allocation. */
1072 st = calloc(1, sizeof(struct struct_type));
1075 st->fields = calloc(n_fields, sizeof(struct field));
1081 /* Node initialization. */
1082 strcpy(st->name, name);
1083 for (i = 0; i < n_fields; i++) {
1084 struct field *dst = &st->fields[i];
1085 struct rte_swx_field_params *src = &fields[i];
1087 strcpy(dst->name, src->name);
1088 dst->n_bits = src->n_bits;
1089 dst->offset = st->n_bits;
1091 st->n_bits += src->n_bits;
1093 st->n_fields = n_fields;
1095 /* Node add to tailq. */
1096 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1102 struct_build(struct rte_swx_pipeline *p)
1106 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1107 struct thread *t = &p->threads[i];
1109 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1110 CHECK(t->structs, ENOMEM);
1117 struct_build_free(struct rte_swx_pipeline *p)
1121 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1122 struct thread *t = &p->threads[i];
1130 struct_free(struct rte_swx_pipeline *p)
1132 struct_build_free(p);
1136 struct struct_type *elem;
1138 elem = TAILQ_FIRST(&p->struct_types);
1142 TAILQ_REMOVE(&p->struct_types, elem, node);
1151 static struct port_in_type *
1152 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1154 struct port_in_type *elem;
1159 TAILQ_FOREACH(elem, &p->port_in_types, node)
1160 if (strcmp(elem->name, name) == 0)
1167 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1169 struct rte_swx_port_in_ops *ops)
1171 struct port_in_type *elem;
1174 CHECK_NAME(name, EINVAL);
1176 CHECK(ops->create, EINVAL);
1177 CHECK(ops->free, EINVAL);
1178 CHECK(ops->pkt_rx, EINVAL);
1179 CHECK(ops->stats_read, EINVAL);
1181 CHECK(!port_in_type_find(p, name), EEXIST);
1183 /* Node allocation. */
1184 elem = calloc(1, sizeof(struct port_in_type));
1185 CHECK(elem, ENOMEM);
1187 /* Node initialization. */
1188 strcpy(elem->name, name);
1189 memcpy(&elem->ops, ops, sizeof(*ops));
1191 /* Node add to tailq. */
1192 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1197 static struct port_in *
1198 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1200 struct port_in *port;
1202 TAILQ_FOREACH(port, &p->ports_in, node)
1203 if (port->id == port_id)
1210 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1212 const char *port_type_name,
1215 struct port_in_type *type = NULL;
1216 struct port_in *port = NULL;
1221 CHECK(!port_in_find(p, port_id), EINVAL);
1223 CHECK_NAME(port_type_name, EINVAL);
1224 type = port_in_type_find(p, port_type_name);
1225 CHECK(type, EINVAL);
1227 obj = type->ops.create(args);
1230 /* Node allocation. */
1231 port = calloc(1, sizeof(struct port_in));
1232 CHECK(port, ENOMEM);
1234 /* Node initialization. */
1239 /* Node add to tailq. */
1240 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1241 if (p->n_ports_in < port_id + 1)
1242 p->n_ports_in = port_id + 1;
1248 port_in_build(struct rte_swx_pipeline *p)
1250 struct port_in *port;
1253 CHECK(p->n_ports_in, EINVAL);
1254 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1256 for (i = 0; i < p->n_ports_in; i++)
1257 CHECK(port_in_find(p, i), EINVAL);
1259 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1260 CHECK(p->in, ENOMEM);
1262 TAILQ_FOREACH(port, &p->ports_in, node) {
1263 struct port_in_runtime *in = &p->in[port->id];
1265 in->pkt_rx = port->type->ops.pkt_rx;
1266 in->obj = port->obj;
1273 port_in_build_free(struct rte_swx_pipeline *p)
1280 port_in_free(struct rte_swx_pipeline *p)
1282 port_in_build_free(p);
1286 struct port_in *port;
1288 port = TAILQ_FIRST(&p->ports_in);
1292 TAILQ_REMOVE(&p->ports_in, port, node);
1293 port->type->ops.free(port->obj);
1297 /* Input port types. */
1299 struct port_in_type *elem;
1301 elem = TAILQ_FIRST(&p->port_in_types);
1305 TAILQ_REMOVE(&p->port_in_types, elem, node);
1313 static struct port_out_type *
1314 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1316 struct port_out_type *elem;
1321 TAILQ_FOREACH(elem, &p->port_out_types, node)
1322 if (!strcmp(elem->name, name))
1329 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1331 struct rte_swx_port_out_ops *ops)
1333 struct port_out_type *elem;
1336 CHECK_NAME(name, EINVAL);
1338 CHECK(ops->create, EINVAL);
1339 CHECK(ops->free, EINVAL);
1340 CHECK(ops->pkt_tx, EINVAL);
1341 CHECK(ops->stats_read, EINVAL);
1343 CHECK(!port_out_type_find(p, name), EEXIST);
1345 /* Node allocation. */
1346 elem = calloc(1, sizeof(struct port_out_type));
1347 CHECK(elem, ENOMEM);
1349 /* Node initialization. */
1350 strcpy(elem->name, name);
1351 memcpy(&elem->ops, ops, sizeof(*ops));
1353 /* Node add to tailq. */
1354 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1359 static struct port_out *
1360 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1362 struct port_out *port;
1364 TAILQ_FOREACH(port, &p->ports_out, node)
1365 if (port->id == port_id)
1372 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1374 const char *port_type_name,
1377 struct port_out_type *type = NULL;
1378 struct port_out *port = NULL;
1383 CHECK(!port_out_find(p, port_id), EINVAL);
1385 CHECK_NAME(port_type_name, EINVAL);
1386 type = port_out_type_find(p, port_type_name);
1387 CHECK(type, EINVAL);
1389 obj = type->ops.create(args);
1392 /* Node allocation. */
1393 port = calloc(1, sizeof(struct port_out));
1394 CHECK(port, ENOMEM);
1396 /* Node initialization. */
1401 /* Node add to tailq. */
1402 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1403 if (p->n_ports_out < port_id + 1)
1404 p->n_ports_out = port_id + 1;
1410 port_out_build(struct rte_swx_pipeline *p)
1412 struct port_out *port;
1415 CHECK(p->n_ports_out, EINVAL);
1417 for (i = 0; i < p->n_ports_out; i++)
1418 CHECK(port_out_find(p, i), EINVAL);
1420 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1421 CHECK(p->out, ENOMEM);
1423 TAILQ_FOREACH(port, &p->ports_out, node) {
1424 struct port_out_runtime *out = &p->out[port->id];
1426 out->pkt_tx = port->type->ops.pkt_tx;
1427 out->flush = port->type->ops.flush;
1428 out->obj = port->obj;
1435 port_out_build_free(struct rte_swx_pipeline *p)
1442 port_out_free(struct rte_swx_pipeline *p)
1444 port_out_build_free(p);
1448 struct port_out *port;
1450 port = TAILQ_FIRST(&p->ports_out);
1454 TAILQ_REMOVE(&p->ports_out, port, node);
1455 port->type->ops.free(port->obj);
1459 /* Output port types. */
1461 struct port_out_type *elem;
1463 elem = TAILQ_FIRST(&p->port_out_types);
1467 TAILQ_REMOVE(&p->port_out_types, elem, node);
1475 static struct extern_type *
1476 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1478 struct extern_type *elem;
1480 TAILQ_FOREACH(elem, &p->extern_types, node)
1481 if (strcmp(elem->name, name) == 0)
1487 static struct extern_type_member_func *
1488 extern_type_member_func_find(struct extern_type *type, const char *name)
1490 struct extern_type_member_func *elem;
1492 TAILQ_FOREACH(elem, &type->funcs, node)
1493 if (strcmp(elem->name, name) == 0)
1499 static struct extern_obj *
1500 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1502 struct extern_obj *elem;
1504 TAILQ_FOREACH(elem, &p->extern_objs, node)
1505 if (strcmp(elem->name, name) == 0)
1511 static struct extern_type_member_func *
1512 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1514 struct extern_obj **obj)
1516 struct extern_obj *object;
1517 struct extern_type_member_func *func;
1518 char *object_name, *func_name;
1520 if (name[0] != 'e' || name[1] != '.')
1523 object_name = strdup(&name[2]);
1527 func_name = strchr(object_name, '.');
1536 object = extern_obj_find(p, object_name);
1542 func = extern_type_member_func_find(object->type, func_name);
1555 static struct field *
1556 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1558 struct extern_obj **object)
1560 struct extern_obj *obj;
1562 char *obj_name, *field_name;
1564 if ((name[0] != 'e') || (name[1] != '.'))
1567 obj_name = strdup(&name[2]);
1571 field_name = strchr(obj_name, '.');
1580 obj = extern_obj_find(p, obj_name);
1586 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1600 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1602 const char *mailbox_struct_type_name,
1603 rte_swx_extern_type_constructor_t constructor,
1604 rte_swx_extern_type_destructor_t destructor)
1606 struct extern_type *elem;
1607 struct struct_type *mailbox_struct_type;
1611 CHECK_NAME(name, EINVAL);
1612 CHECK(!extern_type_find(p, name), EEXIST);
1614 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1615 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1616 CHECK(mailbox_struct_type, EINVAL);
1618 CHECK(constructor, EINVAL);
1619 CHECK(destructor, EINVAL);
1621 /* Node allocation. */
1622 elem = calloc(1, sizeof(struct extern_type));
1623 CHECK(elem, ENOMEM);
1625 /* Node initialization. */
1626 strcpy(elem->name, name);
1627 elem->mailbox_struct_type = mailbox_struct_type;
1628 elem->constructor = constructor;
1629 elem->destructor = destructor;
1630 TAILQ_INIT(&elem->funcs);
1632 /* Node add to tailq. */
1633 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1639 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1640 const char *extern_type_name,
1642 rte_swx_extern_type_member_func_t member_func)
1644 struct extern_type *type;
1645 struct extern_type_member_func *type_member;
1649 CHECK_NAME(extern_type_name, EINVAL);
1650 type = extern_type_find(p, extern_type_name);
1651 CHECK(type, EINVAL);
1652 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1654 CHECK_NAME(name, EINVAL);
1655 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1657 CHECK(member_func, EINVAL);
1659 /* Node allocation. */
1660 type_member = calloc(1, sizeof(struct extern_type_member_func));
1661 CHECK(type_member, ENOMEM);
1663 /* Node initialization. */
1664 strcpy(type_member->name, name);
1665 type_member->func = member_func;
1666 type_member->id = type->n_funcs;
1668 /* Node add to tailq. */
1669 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1676 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1677 const char *extern_type_name,
1681 struct extern_type *type;
1682 struct extern_obj *obj;
1687 CHECK_NAME(extern_type_name, EINVAL);
1688 type = extern_type_find(p, extern_type_name);
1689 CHECK(type, EINVAL);
1691 CHECK_NAME(name, EINVAL);
1692 CHECK(!extern_obj_find(p, name), EEXIST);
1694 /* Node allocation. */
1695 obj = calloc(1, sizeof(struct extern_obj));
1698 /* Object construction. */
1699 obj_handle = type->constructor(args);
1705 /* Node initialization. */
1706 strcpy(obj->name, name);
1708 obj->obj = obj_handle;
1709 obj->struct_id = p->n_structs;
1710 obj->id = p->n_extern_objs;
1712 /* Node add to tailq. */
1713 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1721 extern_obj_build(struct rte_swx_pipeline *p)
1725 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1726 struct thread *t = &p->threads[i];
1727 struct extern_obj *obj;
1729 t->extern_objs = calloc(p->n_extern_objs,
1730 sizeof(struct extern_obj_runtime));
1731 CHECK(t->extern_objs, ENOMEM);
1733 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1734 struct extern_obj_runtime *r =
1735 &t->extern_objs[obj->id];
1736 struct extern_type_member_func *func;
1737 uint32_t mailbox_size =
1738 obj->type->mailbox_struct_type->n_bits / 8;
1742 r->mailbox = calloc(1, mailbox_size);
1743 CHECK(r->mailbox, ENOMEM);
1745 TAILQ_FOREACH(func, &obj->type->funcs, node)
1746 r->funcs[func->id] = func->func;
1748 t->structs[obj->struct_id] = r->mailbox;
1756 extern_obj_build_free(struct rte_swx_pipeline *p)
1760 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1761 struct thread *t = &p->threads[i];
1764 if (!t->extern_objs)
1767 for (j = 0; j < p->n_extern_objs; j++) {
1768 struct extern_obj_runtime *r = &t->extern_objs[j];
1773 free(t->extern_objs);
1774 t->extern_objs = NULL;
1779 extern_obj_free(struct rte_swx_pipeline *p)
1781 extern_obj_build_free(p);
1783 /* Extern objects. */
1785 struct extern_obj *elem;
1787 elem = TAILQ_FIRST(&p->extern_objs);
1791 TAILQ_REMOVE(&p->extern_objs, elem, node);
1793 elem->type->destructor(elem->obj);
1799 struct extern_type *elem;
1801 elem = TAILQ_FIRST(&p->extern_types);
1805 TAILQ_REMOVE(&p->extern_types, elem, node);
1808 struct extern_type_member_func *func;
1810 func = TAILQ_FIRST(&elem->funcs);
1814 TAILQ_REMOVE(&elem->funcs, func, node);
1825 static struct extern_func *
1826 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1828 struct extern_func *elem;
1830 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1831 if (strcmp(elem->name, name) == 0)
1837 static struct extern_func *
1838 extern_func_parse(struct rte_swx_pipeline *p,
1841 if (name[0] != 'f' || name[1] != '.')
1844 return extern_func_find(p, &name[2]);
1847 static struct field *
1848 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1850 struct extern_func **function)
1852 struct extern_func *func;
1854 char *func_name, *field_name;
1856 if ((name[0] != 'f') || (name[1] != '.'))
1859 func_name = strdup(&name[2]);
1863 field_name = strchr(func_name, '.');
1872 func = extern_func_find(p, func_name);
1878 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1892 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1894 const char *mailbox_struct_type_name,
1895 rte_swx_extern_func_t func)
1897 struct extern_func *f;
1898 struct struct_type *mailbox_struct_type;
1902 CHECK_NAME(name, EINVAL);
1903 CHECK(!extern_func_find(p, name), EEXIST);
1905 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1906 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1907 CHECK(mailbox_struct_type, EINVAL);
1909 CHECK(func, EINVAL);
1911 /* Node allocation. */
1912 f = calloc(1, sizeof(struct extern_func));
1913 CHECK(func, ENOMEM);
1915 /* Node initialization. */
1916 strcpy(f->name, name);
1917 f->mailbox_struct_type = mailbox_struct_type;
1919 f->struct_id = p->n_structs;
1920 f->id = p->n_extern_funcs;
1922 /* Node add to tailq. */
1923 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1924 p->n_extern_funcs++;
1931 extern_func_build(struct rte_swx_pipeline *p)
1935 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1936 struct thread *t = &p->threads[i];
1937 struct extern_func *func;
1939 /* Memory allocation. */
1940 t->extern_funcs = calloc(p->n_extern_funcs,
1941 sizeof(struct extern_func_runtime));
1942 CHECK(t->extern_funcs, ENOMEM);
1944 /* Extern function. */
1945 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1946 struct extern_func_runtime *r =
1947 &t->extern_funcs[func->id];
1948 uint32_t mailbox_size =
1949 func->mailbox_struct_type->n_bits / 8;
1951 r->func = func->func;
1953 r->mailbox = calloc(1, mailbox_size);
1954 CHECK(r->mailbox, ENOMEM);
1956 t->structs[func->struct_id] = r->mailbox;
1964 extern_func_build_free(struct rte_swx_pipeline *p)
1968 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1969 struct thread *t = &p->threads[i];
1972 if (!t->extern_funcs)
1975 for (j = 0; j < p->n_extern_funcs; j++) {
1976 struct extern_func_runtime *r = &t->extern_funcs[j];
1981 free(t->extern_funcs);
1982 t->extern_funcs = NULL;
1987 extern_func_free(struct rte_swx_pipeline *p)
1989 extern_func_build_free(p);
1992 struct extern_func *elem;
1994 elem = TAILQ_FIRST(&p->extern_funcs);
1998 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2006 static struct header *
2007 header_find(struct rte_swx_pipeline *p, const char *name)
2009 struct header *elem;
2011 TAILQ_FOREACH(elem, &p->headers, node)
2012 if (strcmp(elem->name, name) == 0)
2018 static struct header *
2019 header_parse(struct rte_swx_pipeline *p,
2022 if (name[0] != 'h' || name[1] != '.')
2025 return header_find(p, &name[2]);
2028 static struct field *
2029 header_field_parse(struct rte_swx_pipeline *p,
2031 struct header **header)
2035 char *header_name, *field_name;
2037 if ((name[0] != 'h') || (name[1] != '.'))
2040 header_name = strdup(&name[2]);
2044 field_name = strchr(header_name, '.');
2053 h = header_find(p, header_name);
2059 f = struct_type_field_find(h->st, field_name);
2073 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2075 const char *struct_type_name)
2077 struct struct_type *st;
2079 size_t n_headers_max;
2082 CHECK_NAME(name, EINVAL);
2083 CHECK_NAME(struct_type_name, EINVAL);
2085 CHECK(!header_find(p, name), EEXIST);
2087 st = struct_type_find(p, struct_type_name);
2090 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2091 CHECK(p->n_headers < n_headers_max, ENOSPC);
2093 /* Node allocation. */
2094 h = calloc(1, sizeof(struct header));
2097 /* Node initialization. */
2098 strcpy(h->name, name);
2100 h->struct_id = p->n_structs;
2101 h->id = p->n_headers;
2103 /* Node add to tailq. */
2104 TAILQ_INSERT_TAIL(&p->headers, h, node);
2112 header_build(struct rte_swx_pipeline *p)
2115 uint32_t n_bytes = 0, i;
2117 TAILQ_FOREACH(h, &p->headers, node) {
2118 n_bytes += h->st->n_bits / 8;
2121 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2122 struct thread *t = &p->threads[i];
2123 uint32_t offset = 0;
2125 t->headers = calloc(p->n_headers,
2126 sizeof(struct header_runtime));
2127 CHECK(t->headers, ENOMEM);
2129 t->headers_out = calloc(p->n_headers,
2130 sizeof(struct header_out_runtime));
2131 CHECK(t->headers_out, ENOMEM);
2133 t->header_storage = calloc(1, n_bytes);
2134 CHECK(t->header_storage, ENOMEM);
2136 t->header_out_storage = calloc(1, n_bytes);
2137 CHECK(t->header_out_storage, ENOMEM);
2139 TAILQ_FOREACH(h, &p->headers, node) {
2140 uint8_t *header_storage;
2142 header_storage = &t->header_storage[offset];
2143 offset += h->st->n_bits / 8;
2145 t->headers[h->id].ptr0 = header_storage;
2146 t->structs[h->struct_id] = header_storage;
2154 header_build_free(struct rte_swx_pipeline *p)
2158 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2159 struct thread *t = &p->threads[i];
2161 free(t->headers_out);
2162 t->headers_out = NULL;
2167 free(t->header_out_storage);
2168 t->header_out_storage = NULL;
2170 free(t->header_storage);
2171 t->header_storage = NULL;
2176 header_free(struct rte_swx_pipeline *p)
2178 header_build_free(p);
2181 struct header *elem;
2183 elem = TAILQ_FIRST(&p->headers);
2187 TAILQ_REMOVE(&p->headers, elem, node);
2195 static struct field *
2196 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2198 if (!p->metadata_st)
2201 if (name[0] != 'm' || name[1] != '.')
2204 return struct_type_field_find(p->metadata_st, &name[2]);
2208 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2209 const char *struct_type_name)
2211 struct struct_type *st = NULL;
2215 CHECK_NAME(struct_type_name, EINVAL);
2216 st = struct_type_find(p, struct_type_name);
2218 CHECK(!p->metadata_st, EINVAL);
2220 p->metadata_st = st;
2221 p->metadata_struct_id = p->n_structs;
2229 metadata_build(struct rte_swx_pipeline *p)
2231 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2234 /* Thread-level initialization. */
2235 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2236 struct thread *t = &p->threads[i];
2239 metadata = calloc(1, n_bytes);
2240 CHECK(metadata, ENOMEM);
2242 t->metadata = metadata;
2243 t->structs[p->metadata_struct_id] = metadata;
2250 metadata_build_free(struct rte_swx_pipeline *p)
2254 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2255 struct thread *t = &p->threads[i];
2263 metadata_free(struct rte_swx_pipeline *p)
2265 metadata_build_free(p);
2272 instruction_is_jmp(struct instruction *instr)
2274 switch (instr->type) {
2276 case INSTR_JMP_VALID:
2277 case INSTR_JMP_INVALID:
2279 case INSTR_JMP_MISS:
2280 case INSTR_JMP_ACTION_HIT:
2281 case INSTR_JMP_ACTION_MISS:
2283 case INSTR_JMP_EQ_S:
2284 case INSTR_JMP_EQ_I:
2286 case INSTR_JMP_NEQ_S:
2287 case INSTR_JMP_NEQ_I:
2289 case INSTR_JMP_LT_MH:
2290 case INSTR_JMP_LT_HM:
2291 case INSTR_JMP_LT_HH:
2292 case INSTR_JMP_LT_MI:
2293 case INSTR_JMP_LT_HI:
2295 case INSTR_JMP_GT_MH:
2296 case INSTR_JMP_GT_HM:
2297 case INSTR_JMP_GT_HH:
2298 case INSTR_JMP_GT_MI:
2299 case INSTR_JMP_GT_HI:
2307 static struct field *
2308 action_field_parse(struct action *action, const char *name);
2310 static struct field *
2311 struct_field_parse(struct rte_swx_pipeline *p,
2312 struct action *action,
2314 uint32_t *struct_id)
2321 struct header *header;
2323 f = header_field_parse(p, name, &header);
2327 *struct_id = header->struct_id;
2333 f = metadata_field_parse(p, name);
2337 *struct_id = p->metadata_struct_id;
2346 f = action_field_parse(action, name);
2356 struct extern_obj *obj;
2358 f = extern_obj_mailbox_field_parse(p, name, &obj);
2362 *struct_id = obj->struct_id;
2368 struct extern_func *func;
2370 f = extern_func_mailbox_field_parse(p, name, &func);
2374 *struct_id = func->struct_id;
2384 pipeline_port_inc(struct rte_swx_pipeline *p)
2386 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2390 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2392 t->ip = p->instructions;
2396 thread_ip_set(struct thread *t, struct instruction *ip)
2402 thread_ip_action_call(struct rte_swx_pipeline *p,
2407 t->ip = p->action_instructions[action_id];
2411 thread_ip_inc(struct rte_swx_pipeline *p);
2414 thread_ip_inc(struct rte_swx_pipeline *p)
2416 struct thread *t = &p->threads[p->thread_id];
2422 thread_ip_inc_cond(struct thread *t, int cond)
2428 thread_yield(struct rte_swx_pipeline *p)
2430 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2434 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2436 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2443 instr_rx_translate(struct rte_swx_pipeline *p,
2444 struct action *action,
2447 struct instruction *instr,
2448 struct instruction_data *data __rte_unused)
2452 CHECK(!action, EINVAL);
2453 CHECK(n_tokens == 2, EINVAL);
2455 f = metadata_field_parse(p, tokens[1]);
2458 instr->type = INSTR_RX;
2459 instr->io.io.offset = f->offset / 8;
2460 instr->io.io.n_bits = f->n_bits;
2465 instr_rx_exec(struct rte_swx_pipeline *p);
2468 instr_rx_exec(struct rte_swx_pipeline *p)
2470 struct thread *t = &p->threads[p->thread_id];
2471 struct instruction *ip = t->ip;
2472 struct port_in_runtime *port = &p->in[p->port_id];
2473 struct rte_swx_pkt *pkt = &t->pkt;
2477 pkt_received = port->pkt_rx(port->obj, pkt);
2478 t->ptr = &pkt->pkt[pkt->offset];
2479 rte_prefetch0(t->ptr);
2481 TRACE("[Thread %2u] rx %s from port %u\n",
2483 pkt_received ? "1 pkt" : "0 pkts",
2487 t->valid_headers = 0;
2488 t->n_headers_out = 0;
2491 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2494 t->table_state = p->table_state;
2497 pipeline_port_inc(p);
2498 thread_ip_inc_cond(t, pkt_received);
2506 instr_tx_translate(struct rte_swx_pipeline *p,
2507 struct action *action __rte_unused,
2510 struct instruction *instr,
2511 struct instruction_data *data __rte_unused)
2515 CHECK(n_tokens == 2, EINVAL);
2517 f = metadata_field_parse(p, tokens[1]);
2520 instr->type = INSTR_TX;
2521 instr->io.io.offset = f->offset / 8;
2522 instr->io.io.n_bits = f->n_bits;
2527 emit_handler(struct thread *t)
2529 struct header_out_runtime *h0 = &t->headers_out[0];
2530 struct header_out_runtime *h1 = &t->headers_out[1];
2531 uint32_t offset = 0, i;
2533 /* No header change or header decapsulation. */
2534 if ((t->n_headers_out == 1) &&
2535 (h0->ptr + h0->n_bytes == t->ptr)) {
2536 TRACE("Emit handler: no header change or header decap.\n");
2538 t->pkt.offset -= h0->n_bytes;
2539 t->pkt.length += h0->n_bytes;
2544 /* Header encapsulation (optionally, with prior header decasulation). */
2545 if ((t->n_headers_out == 2) &&
2546 (h1->ptr + h1->n_bytes == t->ptr) &&
2547 (h0->ptr == h0->ptr0)) {
2550 TRACE("Emit handler: header encapsulation.\n");
2552 offset = h0->n_bytes + h1->n_bytes;
2553 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2554 t->pkt.offset -= offset;
2555 t->pkt.length += offset;
2560 /* Header insertion. */
2563 /* Header extraction. */
2566 /* For any other case. */
2567 TRACE("Emit handler: complex case.\n");
2569 for (i = 0; i < t->n_headers_out; i++) {
2570 struct header_out_runtime *h = &t->headers_out[i];
2572 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2573 offset += h->n_bytes;
2577 memcpy(t->ptr - offset, t->header_out_storage, offset);
2578 t->pkt.offset -= offset;
2579 t->pkt.length += offset;
2584 instr_tx_exec(struct rte_swx_pipeline *p);
2587 instr_tx_exec(struct rte_swx_pipeline *p)
2589 struct thread *t = &p->threads[p->thread_id];
2590 struct instruction *ip = t->ip;
2591 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2592 struct port_out_runtime *port = &p->out[port_id];
2593 struct rte_swx_pkt *pkt = &t->pkt;
2595 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2603 port->pkt_tx(port->obj, pkt);
2606 thread_ip_reset(p, t);
2614 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2615 struct action *action,
2618 struct instruction *instr,
2619 struct instruction_data *data __rte_unused)
2623 CHECK(!action, EINVAL);
2624 CHECK(n_tokens == 2, EINVAL);
2626 h = header_parse(p, tokens[1]);
2629 instr->type = INSTR_HDR_EXTRACT;
2630 instr->io.hdr.header_id[0] = h->id;
2631 instr->io.hdr.struct_id[0] = h->struct_id;
2632 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2637 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2640 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2642 struct thread *t = &p->threads[p->thread_id];
2643 struct instruction *ip = t->ip;
2644 uint64_t valid_headers = t->valid_headers;
2645 uint8_t *ptr = t->ptr;
2646 uint32_t offset = t->pkt.offset;
2647 uint32_t length = t->pkt.length;
2650 for (i = 0; i < n_extract; i++) {
2651 uint32_t header_id = ip->io.hdr.header_id[i];
2652 uint32_t struct_id = ip->io.hdr.struct_id[i];
2653 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2655 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2661 t->structs[struct_id] = ptr;
2662 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2671 t->valid_headers = valid_headers;
2674 t->pkt.offset = offset;
2675 t->pkt.length = length;
2680 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2682 __instr_hdr_extract_exec(p, 1);
2689 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2691 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2694 __instr_hdr_extract_exec(p, 2);
2701 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2703 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2706 __instr_hdr_extract_exec(p, 3);
2713 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2715 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2718 __instr_hdr_extract_exec(p, 4);
2725 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2727 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2730 __instr_hdr_extract_exec(p, 5);
2737 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2739 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2742 __instr_hdr_extract_exec(p, 6);
2749 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2751 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2754 __instr_hdr_extract_exec(p, 7);
2761 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2763 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2766 __instr_hdr_extract_exec(p, 8);
2776 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2777 struct action *action __rte_unused,
2780 struct instruction *instr,
2781 struct instruction_data *data __rte_unused)
2785 CHECK(n_tokens == 2, EINVAL);
2787 h = header_parse(p, tokens[1]);
2790 instr->type = INSTR_HDR_EMIT;
2791 instr->io.hdr.header_id[0] = h->id;
2792 instr->io.hdr.struct_id[0] = h->struct_id;
2793 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2798 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2801 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2803 struct thread *t = &p->threads[p->thread_id];
2804 struct instruction *ip = t->ip;
2805 uint32_t n_headers_out = t->n_headers_out;
2806 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2807 uint8_t *ho_ptr = NULL;
2808 uint32_t ho_nbytes = 0, i;
2810 for (i = 0; i < n_emit; i++) {
2811 uint32_t header_id = ip->io.hdr.header_id[i];
2812 uint32_t struct_id = ip->io.hdr.struct_id[i];
2813 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2815 struct header_runtime *hi = &t->headers[header_id];
2816 uint8_t *hi_ptr = t->structs[struct_id];
2818 TRACE("[Thread %2u]: emit header %u\n",
2824 if (!t->n_headers_out) {
2825 ho = &t->headers_out[0];
2827 ho->ptr0 = hi->ptr0;
2831 ho_nbytes = n_bytes;
2838 ho_nbytes = ho->n_bytes;
2842 if (ho_ptr + ho_nbytes == hi_ptr) {
2843 ho_nbytes += n_bytes;
2845 ho->n_bytes = ho_nbytes;
2848 ho->ptr0 = hi->ptr0;
2852 ho_nbytes = n_bytes;
2858 ho->n_bytes = ho_nbytes;
2859 t->n_headers_out = n_headers_out;
2863 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2865 __instr_hdr_emit_exec(p, 1);
2872 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2874 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2877 __instr_hdr_emit_exec(p, 1);
2882 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2884 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2887 __instr_hdr_emit_exec(p, 2);
2892 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2894 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2897 __instr_hdr_emit_exec(p, 3);
2902 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2904 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2907 __instr_hdr_emit_exec(p, 4);
2912 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2914 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2917 __instr_hdr_emit_exec(p, 5);
2922 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2924 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2927 __instr_hdr_emit_exec(p, 6);
2932 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2934 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2937 __instr_hdr_emit_exec(p, 7);
2942 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2944 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2947 __instr_hdr_emit_exec(p, 8);
2955 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2956 struct action *action __rte_unused,
2959 struct instruction *instr,
2960 struct instruction_data *data __rte_unused)
2964 CHECK(n_tokens == 2, EINVAL);
2966 h = header_parse(p, tokens[1]);
2969 instr->type = INSTR_HDR_VALIDATE;
2970 instr->valid.header_id = h->id;
2975 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2977 struct thread *t = &p->threads[p->thread_id];
2978 struct instruction *ip = t->ip;
2979 uint32_t header_id = ip->valid.header_id;
2981 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2984 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2994 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2995 struct action *action __rte_unused,
2998 struct instruction *instr,
2999 struct instruction_data *data __rte_unused)
3003 CHECK(n_tokens == 2, EINVAL);
3005 h = header_parse(p, tokens[1]);
3008 instr->type = INSTR_HDR_INVALIDATE;
3009 instr->valid.header_id = h->id;
3014 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3016 struct thread *t = &p->threads[p->thread_id];
3017 struct instruction *ip = t->ip;
3018 uint32_t header_id = ip->valid.header_id;
3020 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3023 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3032 static struct table *
3033 table_find(struct rte_swx_pipeline *p, const char *name);
3036 instr_table_translate(struct rte_swx_pipeline *p,
3037 struct action *action,
3040 struct instruction *instr,
3041 struct instruction_data *data __rte_unused)
3045 CHECK(!action, EINVAL);
3046 CHECK(n_tokens == 2, EINVAL);
3048 t = table_find(p, tokens[1]);
3051 instr->type = INSTR_TABLE;
3052 instr->table.table_id = t->id;
3057 instr_table_exec(struct rte_swx_pipeline *p)
3059 struct thread *t = &p->threads[p->thread_id];
3060 struct instruction *ip = t->ip;
3061 uint32_t table_id = ip->table.table_id;
3062 struct rte_swx_table_state *ts = &t->table_state[table_id];
3063 struct table_runtime *table = &t->tables[table_id];
3065 uint8_t *action_data;
3069 done = table->func(ts->obj,
3077 TRACE("[Thread %2u] table %u (not finalized)\n",
3085 action_id = hit ? action_id : ts->default_action_id;
3086 action_data = hit ? action_data : ts->default_action_data;
3088 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3091 hit ? "hit" : "miss",
3092 (uint32_t)action_id);
3094 t->action_id = action_id;
3095 t->structs[0] = action_data;
3099 thread_ip_action_call(p, t, action_id);
3106 instr_extern_translate(struct rte_swx_pipeline *p,
3107 struct action *action __rte_unused,
3110 struct instruction *instr,
3111 struct instruction_data *data __rte_unused)
3113 char *token = tokens[1];
3115 CHECK(n_tokens == 2, EINVAL);
3117 if (token[0] == 'e') {
3118 struct extern_obj *obj;
3119 struct extern_type_member_func *func;
3121 func = extern_obj_member_func_parse(p, token, &obj);
3122 CHECK(func, EINVAL);
3124 instr->type = INSTR_EXTERN_OBJ;
3125 instr->ext_obj.ext_obj_id = obj->id;
3126 instr->ext_obj.func_id = func->id;
3131 if (token[0] == 'f') {
3132 struct extern_func *func;
3134 func = extern_func_parse(p, token);
3135 CHECK(func, EINVAL);
3137 instr->type = INSTR_EXTERN_FUNC;
3138 instr->ext_func.ext_func_id = func->id;
3147 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3149 struct thread *t = &p->threads[p->thread_id];
3150 struct instruction *ip = t->ip;
3151 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3152 uint32_t func_id = ip->ext_obj.func_id;
3153 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3154 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3156 TRACE("[Thread %2u] extern obj %u member func %u\n",
3161 /* Extern object member function execute. */
3162 uint32_t done = func(obj->obj, obj->mailbox);
3165 thread_ip_inc_cond(t, done);
3166 thread_yield_cond(p, done ^ 1);
3170 instr_extern_func_exec(struct rte_swx_pipeline *p)
3172 struct thread *t = &p->threads[p->thread_id];
3173 struct instruction *ip = t->ip;
3174 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3175 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3176 rte_swx_extern_func_t func = ext_func->func;
3178 TRACE("[Thread %2u] extern func %u\n",
3182 /* Extern function execute. */
3183 uint32_t done = func(ext_func->mailbox);
3186 thread_ip_inc_cond(t, done);
3187 thread_yield_cond(p, done ^ 1);
3194 instr_mov_translate(struct rte_swx_pipeline *p,
3195 struct action *action,
3198 struct instruction *instr,
3199 struct instruction_data *data __rte_unused)
3201 char *dst = tokens[1], *src = tokens[2];
3202 struct field *fdst, *fsrc;
3204 uint32_t dst_struct_id, src_struct_id;
3206 CHECK(n_tokens == 3, EINVAL);
3208 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3209 CHECK(fdst, EINVAL);
3212 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3214 instr->type = INSTR_MOV;
3215 if ((dst[0] == 'h' && src[0] != 'h') ||
3216 (dst[0] != 'h' && src[0] == 'h'))
3217 instr->type = INSTR_MOV_S;
3219 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3220 instr->mov.dst.n_bits = fdst->n_bits;
3221 instr->mov.dst.offset = fdst->offset / 8;
3222 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3223 instr->mov.src.n_bits = fsrc->n_bits;
3224 instr->mov.src.offset = fsrc->offset / 8;
3229 src_val = strtoull(src, &src, 0);
3230 CHECK(!src[0], EINVAL);
3233 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3235 instr->type = INSTR_MOV_I;
3236 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3237 instr->mov.dst.n_bits = fdst->n_bits;
3238 instr->mov.dst.offset = fdst->offset / 8;
3239 instr->mov.src_val = src_val;
3244 instr_mov_exec(struct rte_swx_pipeline *p)
3246 struct thread *t = &p->threads[p->thread_id];
3247 struct instruction *ip = t->ip;
3249 TRACE("[Thread %2u] mov\n",
3259 instr_mov_s_exec(struct rte_swx_pipeline *p)
3261 struct thread *t = &p->threads[p->thread_id];
3262 struct instruction *ip = t->ip;
3264 TRACE("[Thread %2u] mov (s)\n",
3274 instr_mov_i_exec(struct rte_swx_pipeline *p)
3276 struct thread *t = &p->threads[p->thread_id];
3277 struct instruction *ip = t->ip;
3279 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3293 instr_dma_translate(struct rte_swx_pipeline *p,
3294 struct action *action,
3297 struct instruction *instr,
3298 struct instruction_data *data __rte_unused)
3300 char *dst = tokens[1];
3301 char *src = tokens[2];
3305 CHECK(action, EINVAL);
3306 CHECK(n_tokens == 3, EINVAL);
3308 h = header_parse(p, dst);
3311 tf = action_field_parse(action, src);
3314 instr->type = INSTR_DMA_HT;
3315 instr->dma.dst.header_id[0] = h->id;
3316 instr->dma.dst.struct_id[0] = h->struct_id;
3317 instr->dma.n_bytes[0] = h->st->n_bits / 8;
3318 instr->dma.src.offset[0] = tf->offset / 8;
3324 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3327 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3329 struct thread *t = &p->threads[p->thread_id];
3330 struct instruction *ip = t->ip;
3331 uint8_t *action_data = t->structs[0];
3332 uint64_t valid_headers = t->valid_headers;
3335 for (i = 0; i < n_dma; i++) {
3336 uint32_t header_id = ip->dma.dst.header_id[i];
3337 uint32_t struct_id = ip->dma.dst.struct_id[i];
3338 uint32_t offset = ip->dma.src.offset[i];
3339 uint32_t n_bytes = ip->dma.n_bytes[i];
3341 struct header_runtime *h = &t->headers[header_id];
3342 uint8_t *h_ptr0 = h->ptr0;
3343 uint8_t *h_ptr = t->structs[struct_id];
3345 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3347 void *src = &action_data[offset];
3349 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3352 memcpy(dst, src, n_bytes);
3353 t->structs[struct_id] = dst;
3354 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3357 t->valid_headers = valid_headers;
3361 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3363 __instr_dma_ht_exec(p, 1);
3370 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3372 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3375 __instr_dma_ht_exec(p, 2);
3382 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3384 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3387 __instr_dma_ht_exec(p, 3);
3394 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3396 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3399 __instr_dma_ht_exec(p, 4);
3406 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3408 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3411 __instr_dma_ht_exec(p, 5);
3418 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3420 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3423 __instr_dma_ht_exec(p, 6);
3430 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3432 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3435 __instr_dma_ht_exec(p, 7);
3442 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3444 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3447 __instr_dma_ht_exec(p, 8);
3457 instr_alu_add_translate(struct rte_swx_pipeline *p,
3458 struct action *action,
3461 struct instruction *instr,
3462 struct instruction_data *data __rte_unused)
3464 char *dst = tokens[1], *src = tokens[2];
3465 struct field *fdst, *fsrc;
3467 uint32_t dst_struct_id, src_struct_id;
3469 CHECK(n_tokens == 3, EINVAL);
3471 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3472 CHECK(fdst, EINVAL);
3474 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3475 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3477 instr->type = INSTR_ALU_ADD;
3478 if (dst[0] == 'h' && src[0] == 'm')
3479 instr->type = INSTR_ALU_ADD_HM;
3480 if (dst[0] == 'm' && src[0] == 'h')
3481 instr->type = INSTR_ALU_ADD_MH;
3482 if (dst[0] == 'h' && src[0] == 'h')
3483 instr->type = INSTR_ALU_ADD_HH;
3485 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3486 instr->alu.dst.n_bits = fdst->n_bits;
3487 instr->alu.dst.offset = fdst->offset / 8;
3488 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3489 instr->alu.src.n_bits = fsrc->n_bits;
3490 instr->alu.src.offset = fsrc->offset / 8;
3494 /* ADD_MI, ADD_HI. */
3495 src_val = strtoull(src, &src, 0);
3496 CHECK(!src[0], EINVAL);
3498 instr->type = INSTR_ALU_ADD_MI;
3500 instr->type = INSTR_ALU_ADD_HI;
3502 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3503 instr->alu.dst.n_bits = fdst->n_bits;
3504 instr->alu.dst.offset = fdst->offset / 8;
3505 instr->alu.src_val = src_val;
3510 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3511 struct action *action,
3514 struct instruction *instr,
3515 struct instruction_data *data __rte_unused)
3517 char *dst = tokens[1], *src = tokens[2];
3518 struct field *fdst, *fsrc;
3520 uint32_t dst_struct_id, src_struct_id;
3522 CHECK(n_tokens == 3, EINVAL);
3524 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3525 CHECK(fdst, EINVAL);
3527 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3528 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3530 instr->type = INSTR_ALU_SUB;
3531 if (dst[0] == 'h' && src[0] == 'm')
3532 instr->type = INSTR_ALU_SUB_HM;
3533 if (dst[0] == 'm' && src[0] == 'h')
3534 instr->type = INSTR_ALU_SUB_MH;
3535 if (dst[0] == 'h' && src[0] == 'h')
3536 instr->type = INSTR_ALU_SUB_HH;
3538 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3539 instr->alu.dst.n_bits = fdst->n_bits;
3540 instr->alu.dst.offset = fdst->offset / 8;
3541 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3542 instr->alu.src.n_bits = fsrc->n_bits;
3543 instr->alu.src.offset = fsrc->offset / 8;
3547 /* SUB_MI, SUB_HI. */
3548 src_val = strtoull(src, &src, 0);
3549 CHECK(!src[0], EINVAL);
3551 instr->type = INSTR_ALU_SUB_MI;
3553 instr->type = INSTR_ALU_SUB_HI;
3555 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3556 instr->alu.dst.n_bits = fdst->n_bits;
3557 instr->alu.dst.offset = fdst->offset / 8;
3558 instr->alu.src_val = src_val;
3563 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3564 struct action *action __rte_unused,
3567 struct instruction *instr,
3568 struct instruction_data *data __rte_unused)
3570 char *dst = tokens[1], *src = tokens[2];
3571 struct header *hdst, *hsrc;
3572 struct field *fdst, *fsrc;
3574 CHECK(n_tokens == 3, EINVAL);
3576 fdst = header_field_parse(p, dst, &hdst);
3577 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3580 fsrc = header_field_parse(p, src, &hsrc);
3582 instr->type = INSTR_ALU_CKADD_FIELD;
3583 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3584 instr->alu.dst.n_bits = fdst->n_bits;
3585 instr->alu.dst.offset = fdst->offset / 8;
3586 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3587 instr->alu.src.n_bits = fsrc->n_bits;
3588 instr->alu.src.offset = fsrc->offset / 8;
3592 /* CKADD_STRUCT, CKADD_STRUCT20. */
3593 hsrc = header_parse(p, src);
3594 CHECK(hsrc, EINVAL);
3596 instr->type = INSTR_ALU_CKADD_STRUCT;
3597 if ((hsrc->st->n_bits / 8) == 20)
3598 instr->type = INSTR_ALU_CKADD_STRUCT20;
3600 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3601 instr->alu.dst.n_bits = fdst->n_bits;
3602 instr->alu.dst.offset = fdst->offset / 8;
3603 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3604 instr->alu.src.n_bits = hsrc->st->n_bits;
3605 instr->alu.src.offset = 0; /* Unused. */
3610 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3611 struct action *action __rte_unused,
3614 struct instruction *instr,
3615 struct instruction_data *data __rte_unused)
3617 char *dst = tokens[1], *src = tokens[2];
3618 struct header *hdst, *hsrc;
3619 struct field *fdst, *fsrc;
3621 CHECK(n_tokens == 3, EINVAL);
3623 fdst = header_field_parse(p, dst, &hdst);
3624 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3626 fsrc = header_field_parse(p, src, &hsrc);
3627 CHECK(fsrc, EINVAL);
3629 instr->type = INSTR_ALU_CKSUB_FIELD;
3630 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3631 instr->alu.dst.n_bits = fdst->n_bits;
3632 instr->alu.dst.offset = fdst->offset / 8;
3633 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3634 instr->alu.src.n_bits = fsrc->n_bits;
3635 instr->alu.src.offset = fsrc->offset / 8;
3640 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3641 struct action *action,
3644 struct instruction *instr,
3645 struct instruction_data *data __rte_unused)
3647 char *dst = tokens[1], *src = tokens[2];
3648 struct field *fdst, *fsrc;
3650 uint32_t dst_struct_id, src_struct_id;
3652 CHECK(n_tokens == 3, EINVAL);
3654 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3655 CHECK(fdst, EINVAL);
3657 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3658 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3660 instr->type = INSTR_ALU_SHL;
3661 if (dst[0] == 'h' && src[0] == 'm')
3662 instr->type = INSTR_ALU_SHL_HM;
3663 if (dst[0] == 'm' && src[0] == 'h')
3664 instr->type = INSTR_ALU_SHL_MH;
3665 if (dst[0] == 'h' && src[0] == 'h')
3666 instr->type = INSTR_ALU_SHL_HH;
3668 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3669 instr->alu.dst.n_bits = fdst->n_bits;
3670 instr->alu.dst.offset = fdst->offset / 8;
3671 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3672 instr->alu.src.n_bits = fsrc->n_bits;
3673 instr->alu.src.offset = fsrc->offset / 8;
3677 /* SHL_MI, SHL_HI. */
3678 src_val = strtoull(src, &src, 0);
3679 CHECK(!src[0], EINVAL);
3681 instr->type = INSTR_ALU_SHL_MI;
3683 instr->type = INSTR_ALU_SHL_HI;
3685 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3686 instr->alu.dst.n_bits = fdst->n_bits;
3687 instr->alu.dst.offset = fdst->offset / 8;
3688 instr->alu.src_val = src_val;
3693 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3694 struct action *action,
3697 struct instruction *instr,
3698 struct instruction_data *data __rte_unused)
3700 char *dst = tokens[1], *src = tokens[2];
3701 struct field *fdst, *fsrc;
3703 uint32_t dst_struct_id, src_struct_id;
3705 CHECK(n_tokens == 3, EINVAL);
3707 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3708 CHECK(fdst, EINVAL);
3710 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3711 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3713 instr->type = INSTR_ALU_SHR;
3714 if (dst[0] == 'h' && src[0] == 'm')
3715 instr->type = INSTR_ALU_SHR_HM;
3716 if (dst[0] == 'm' && src[0] == 'h')
3717 instr->type = INSTR_ALU_SHR_MH;
3718 if (dst[0] == 'h' && src[0] == 'h')
3719 instr->type = INSTR_ALU_SHR_HH;
3721 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3722 instr->alu.dst.n_bits = fdst->n_bits;
3723 instr->alu.dst.offset = fdst->offset / 8;
3724 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3725 instr->alu.src.n_bits = fsrc->n_bits;
3726 instr->alu.src.offset = fsrc->offset / 8;
3730 /* SHR_MI, SHR_HI. */
3731 src_val = strtoull(src, &src, 0);
3732 CHECK(!src[0], EINVAL);
3734 instr->type = INSTR_ALU_SHR_MI;
3736 instr->type = INSTR_ALU_SHR_HI;
3738 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3739 instr->alu.dst.n_bits = fdst->n_bits;
3740 instr->alu.dst.offset = fdst->offset / 8;
3741 instr->alu.src_val = src_val;
3746 instr_alu_and_translate(struct rte_swx_pipeline *p,
3747 struct action *action,
3750 struct instruction *instr,
3751 struct instruction_data *data __rte_unused)
3753 char *dst = tokens[1], *src = tokens[2];
3754 struct field *fdst, *fsrc;
3756 uint32_t dst_struct_id, src_struct_id;
3758 CHECK(n_tokens == 3, EINVAL);
3760 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3761 CHECK(fdst, EINVAL);
3764 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3766 instr->type = INSTR_ALU_AND;
3767 if ((dst[0] == 'h' && src[0] != 'h') ||
3768 (dst[0] != 'h' && src[0] == 'h'))
3769 instr->type = INSTR_ALU_AND_S;
3771 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3772 instr->alu.dst.n_bits = fdst->n_bits;
3773 instr->alu.dst.offset = fdst->offset / 8;
3774 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3775 instr->alu.src.n_bits = fsrc->n_bits;
3776 instr->alu.src.offset = fsrc->offset / 8;
3781 src_val = strtoull(src, &src, 0);
3782 CHECK(!src[0], EINVAL);
3785 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3787 instr->type = INSTR_ALU_AND_I;
3788 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3789 instr->alu.dst.n_bits = fdst->n_bits;
3790 instr->alu.dst.offset = fdst->offset / 8;
3791 instr->alu.src_val = src_val;
3796 instr_alu_or_translate(struct rte_swx_pipeline *p,
3797 struct action *action,
3800 struct instruction *instr,
3801 struct instruction_data *data __rte_unused)
3803 char *dst = tokens[1], *src = tokens[2];
3804 struct field *fdst, *fsrc;
3806 uint32_t dst_struct_id, src_struct_id;
3808 CHECK(n_tokens == 3, EINVAL);
3810 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3811 CHECK(fdst, EINVAL);
3814 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3816 instr->type = INSTR_ALU_OR;
3817 if ((dst[0] == 'h' && src[0] != 'h') ||
3818 (dst[0] != 'h' && src[0] == 'h'))
3819 instr->type = INSTR_ALU_OR_S;
3821 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3822 instr->alu.dst.n_bits = fdst->n_bits;
3823 instr->alu.dst.offset = fdst->offset / 8;
3824 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3825 instr->alu.src.n_bits = fsrc->n_bits;
3826 instr->alu.src.offset = fsrc->offset / 8;
3831 src_val = strtoull(src, &src, 0);
3832 CHECK(!src[0], EINVAL);
3835 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3837 instr->type = INSTR_ALU_OR_I;
3838 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3839 instr->alu.dst.n_bits = fdst->n_bits;
3840 instr->alu.dst.offset = fdst->offset / 8;
3841 instr->alu.src_val = src_val;
3846 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3847 struct action *action,
3850 struct instruction *instr,
3851 struct instruction_data *data __rte_unused)
3853 char *dst = tokens[1], *src = tokens[2];
3854 struct field *fdst, *fsrc;
3856 uint32_t dst_struct_id, src_struct_id;
3858 CHECK(n_tokens == 3, EINVAL);
3860 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3861 CHECK(fdst, EINVAL);
3864 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3866 instr->type = INSTR_ALU_XOR;
3867 if ((dst[0] == 'h' && src[0] != 'h') ||
3868 (dst[0] != 'h' && src[0] == 'h'))
3869 instr->type = INSTR_ALU_XOR_S;
3871 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3872 instr->alu.dst.n_bits = fdst->n_bits;
3873 instr->alu.dst.offset = fdst->offset / 8;
3874 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3875 instr->alu.src.n_bits = fsrc->n_bits;
3876 instr->alu.src.offset = fsrc->offset / 8;
3881 src_val = strtoull(src, &src, 0);
3882 CHECK(!src[0], EINVAL);
3885 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3887 instr->type = INSTR_ALU_XOR_I;
3888 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3889 instr->alu.dst.n_bits = fdst->n_bits;
3890 instr->alu.dst.offset = fdst->offset / 8;
3891 instr->alu.src_val = src_val;
3896 instr_alu_add_exec(struct rte_swx_pipeline *p)
3898 struct thread *t = &p->threads[p->thread_id];
3899 struct instruction *ip = t->ip;
3901 TRACE("[Thread %2u] add\n", p->thread_id);
3911 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3913 struct thread *t = &p->threads[p->thread_id];
3914 struct instruction *ip = t->ip;
3916 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3926 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3928 struct thread *t = &p->threads[p->thread_id];
3929 struct instruction *ip = t->ip;
3931 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3941 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3943 struct thread *t = &p->threads[p->thread_id];
3944 struct instruction *ip = t->ip;
3946 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3956 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3958 struct thread *t = &p->threads[p->thread_id];
3959 struct instruction *ip = t->ip;
3961 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3971 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3973 struct thread *t = &p->threads[p->thread_id];
3974 struct instruction *ip = t->ip;
3976 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3986 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3988 struct thread *t = &p->threads[p->thread_id];
3989 struct instruction *ip = t->ip;
3991 TRACE("[Thread %2u] sub\n", p->thread_id);
4001 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4003 struct thread *t = &p->threads[p->thread_id];
4004 struct instruction *ip = t->ip;
4006 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4016 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4018 struct thread *t = &p->threads[p->thread_id];
4019 struct instruction *ip = t->ip;
4021 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4031 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4033 struct thread *t = &p->threads[p->thread_id];
4034 struct instruction *ip = t->ip;
4036 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4046 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4048 struct thread *t = &p->threads[p->thread_id];
4049 struct instruction *ip = t->ip;
4051 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4061 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4063 struct thread *t = &p->threads[p->thread_id];
4064 struct instruction *ip = t->ip;
4066 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4076 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4078 struct thread *t = &p->threads[p->thread_id];
4079 struct instruction *ip = t->ip;
4081 TRACE("[Thread %2u] shl\n", p->thread_id);
4091 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4093 struct thread *t = &p->threads[p->thread_id];
4094 struct instruction *ip = t->ip;
4096 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4106 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4108 struct thread *t = &p->threads[p->thread_id];
4109 struct instruction *ip = t->ip;
4111 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4121 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4123 struct thread *t = &p->threads[p->thread_id];
4124 struct instruction *ip = t->ip;
4126 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4136 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4138 struct thread *t = &p->threads[p->thread_id];
4139 struct instruction *ip = t->ip;
4141 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4151 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4153 struct thread *t = &p->threads[p->thread_id];
4154 struct instruction *ip = t->ip;
4156 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4166 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4168 struct thread *t = &p->threads[p->thread_id];
4169 struct instruction *ip = t->ip;
4171 TRACE("[Thread %2u] shr\n", p->thread_id);
4181 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4183 struct thread *t = &p->threads[p->thread_id];
4184 struct instruction *ip = t->ip;
4186 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4196 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4198 struct thread *t = &p->threads[p->thread_id];
4199 struct instruction *ip = t->ip;
4201 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4211 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4213 struct thread *t = &p->threads[p->thread_id];
4214 struct instruction *ip = t->ip;
4216 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4226 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4228 struct thread *t = &p->threads[p->thread_id];
4229 struct instruction *ip = t->ip;
4231 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4241 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4243 struct thread *t = &p->threads[p->thread_id];
4244 struct instruction *ip = t->ip;
4246 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4256 instr_alu_and_exec(struct rte_swx_pipeline *p)
4258 struct thread *t = &p->threads[p->thread_id];
4259 struct instruction *ip = t->ip;
4261 TRACE("[Thread %2u] and\n", p->thread_id);
4271 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
4273 struct thread *t = &p->threads[p->thread_id];
4274 struct instruction *ip = t->ip;
4276 TRACE("[Thread %2u] and (s)\n", p->thread_id);
4286 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4288 struct thread *t = &p->threads[p->thread_id];
4289 struct instruction *ip = t->ip;
4291 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4301 instr_alu_or_exec(struct rte_swx_pipeline *p)
4303 struct thread *t = &p->threads[p->thread_id];
4304 struct instruction *ip = t->ip;
4306 TRACE("[Thread %2u] or\n", p->thread_id);
4316 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
4318 struct thread *t = &p->threads[p->thread_id];
4319 struct instruction *ip = t->ip;
4321 TRACE("[Thread %2u] or (s)\n", p->thread_id);
4331 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4333 struct thread *t = &p->threads[p->thread_id];
4334 struct instruction *ip = t->ip;
4336 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4346 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4348 struct thread *t = &p->threads[p->thread_id];
4349 struct instruction *ip = t->ip;
4351 TRACE("[Thread %2u] xor\n", p->thread_id);
4361 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
4363 struct thread *t = &p->threads[p->thread_id];
4364 struct instruction *ip = t->ip;
4366 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
4376 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4378 struct thread *t = &p->threads[p->thread_id];
4379 struct instruction *ip = t->ip;
4381 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4391 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4393 struct thread *t = &p->threads[p->thread_id];
4394 struct instruction *ip = t->ip;
4395 uint8_t *dst_struct, *src_struct;
4396 uint16_t *dst16_ptr, dst;
4397 uint64_t *src64_ptr, src64, src64_mask, src;
4400 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4403 dst_struct = t->structs[ip->alu.dst.struct_id];
4404 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4407 src_struct = t->structs[ip->alu.src.struct_id];
4408 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4410 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4411 src = src64 & src64_mask;
4416 /* The first input (r) is a 16-bit number. The second and the third
4417 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4418 * three numbers (output r) is a 34-bit number.
4420 r += (src >> 32) + (src & 0xFFFFFFFF);
4422 /* The first input is a 16-bit number. The second input is an 18-bit
4423 * number. In the worst case scenario, the sum of the two numbers is a
4426 r = (r & 0xFFFF) + (r >> 16);
4428 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4429 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4431 r = (r & 0xFFFF) + (r >> 16);
4433 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4434 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4435 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4436 * therefore the output r is always a 16-bit number.
4438 r = (r & 0xFFFF) + (r >> 16);
4443 *dst16_ptr = (uint16_t)r;
4450 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4452 struct thread *t = &p->threads[p->thread_id];
4453 struct instruction *ip = t->ip;
4454 uint8_t *dst_struct, *src_struct;
4455 uint16_t *dst16_ptr, dst;
4456 uint64_t *src64_ptr, src64, src64_mask, src;
4459 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4462 dst_struct = t->structs[ip->alu.dst.struct_id];
4463 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4466 src_struct = t->structs[ip->alu.src.struct_id];
4467 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4469 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4470 src = src64 & src64_mask;
4475 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
4476 * the following sequence of operations in 2's complement arithmetic:
4477 * a '- b = (a - b) % 0xFFFF.
4479 * In order to prevent an underflow for the below subtraction, in which
4480 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
4481 * minuend), we first add a multiple of the 0xFFFF modulus to the
4482 * minuend. The number we add to the minuend needs to be a 34-bit number
4483 * or higher, so for readability reasons we picked the 36-bit multiple.
4484 * We are effectively turning the 16-bit minuend into a 36-bit number:
4485 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
4487 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
4489 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
4490 * result (the output r) is a 36-bit number.
4492 r -= (src >> 32) + (src & 0xFFFFFFFF);
4494 /* The first input is a 16-bit number. The second input is a 20-bit
4495 * number. Their sum is a 21-bit number.
4497 r = (r & 0xFFFF) + (r >> 16);
4499 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4500 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
4502 r = (r & 0xFFFF) + (r >> 16);
4504 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4505 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4506 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4507 * generated, therefore the output r is always a 16-bit number.
4509 r = (r & 0xFFFF) + (r >> 16);
4514 *dst16_ptr = (uint16_t)r;
4521 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
4523 struct thread *t = &p->threads[p->thread_id];
4524 struct instruction *ip = t->ip;
4525 uint8_t *dst_struct, *src_struct;
4526 uint16_t *dst16_ptr;
4527 uint32_t *src32_ptr;
4530 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4533 dst_struct = t->structs[ip->alu.dst.struct_id];
4534 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4536 src_struct = t->structs[ip->alu.src.struct_id];
4537 src32_ptr = (uint32_t *)&src_struct[0];
4539 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4540 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4541 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4542 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4543 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4545 /* The first input is a 16-bit number. The second input is a 19-bit
4546 * number. Their sum is a 20-bit number.
4548 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4550 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4551 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4553 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4555 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4556 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4557 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4558 * generated, therefore the output r is always a 16-bit number.
4560 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4563 r0 = r0 ? r0 : 0xFFFF;
4565 *dst16_ptr = (uint16_t)r0;
4572 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4574 struct thread *t = &p->threads[p->thread_id];
4575 struct instruction *ip = t->ip;
4576 uint8_t *dst_struct, *src_struct;
4577 uint16_t *dst16_ptr;
4578 uint32_t *src32_ptr;
4582 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4585 dst_struct = t->structs[ip->alu.dst.struct_id];
4586 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4588 src_struct = t->structs[ip->alu.src.struct_id];
4589 src32_ptr = (uint32_t *)&src_struct[0];
4591 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4592 * Therefore, in the worst case scenario, a 35-bit number is added to a
4593 * 16-bit number (the input r), so the output r is 36-bit number.
4595 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4598 /* The first input is a 16-bit number. The second input is a 20-bit
4599 * number. Their sum is a 21-bit number.
4601 r = (r & 0xFFFF) + (r >> 16);
4603 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4604 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4606 r = (r & 0xFFFF) + (r >> 16);
4608 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4609 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4610 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4611 * generated, therefore the output r is always a 16-bit number.
4613 r = (r & 0xFFFF) + (r >> 16);
4618 *dst16_ptr = (uint16_t)r;
4627 static struct action *
4628 action_find(struct rte_swx_pipeline *p, const char *name);
4631 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
4632 struct action *action __rte_unused,
4635 struct instruction *instr,
4636 struct instruction_data *data)
4638 CHECK(n_tokens == 2, EINVAL);
4640 strcpy(data->jmp_label, tokens[1]);
4642 instr->type = INSTR_JMP;
4643 instr->jmp.ip = NULL; /* Resolved later. */
4648 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
4649 struct action *action __rte_unused,
4652 struct instruction *instr,
4653 struct instruction_data *data)
4657 CHECK(n_tokens == 3, EINVAL);
4659 strcpy(data->jmp_label, tokens[1]);
4661 h = header_parse(p, tokens[2]);
4664 instr->type = INSTR_JMP_VALID;
4665 instr->jmp.ip = NULL; /* Resolved later. */
4666 instr->jmp.header_id = h->id;
4671 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
4672 struct action *action __rte_unused,
4675 struct instruction *instr,
4676 struct instruction_data *data)
4680 CHECK(n_tokens == 3, EINVAL);
4682 strcpy(data->jmp_label, tokens[1]);
4684 h = header_parse(p, tokens[2]);
4687 instr->type = INSTR_JMP_INVALID;
4688 instr->jmp.ip = NULL; /* Resolved later. */
4689 instr->jmp.header_id = h->id;
4694 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
4695 struct action *action,
4698 struct instruction *instr,
4699 struct instruction_data *data)
4701 CHECK(!action, EINVAL);
4702 CHECK(n_tokens == 2, EINVAL);
4704 strcpy(data->jmp_label, tokens[1]);
4706 instr->type = INSTR_JMP_HIT;
4707 instr->jmp.ip = NULL; /* Resolved later. */
4712 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
4713 struct action *action,
4716 struct instruction *instr,
4717 struct instruction_data *data)
4719 CHECK(!action, EINVAL);
4720 CHECK(n_tokens == 2, EINVAL);
4722 strcpy(data->jmp_label, tokens[1]);
4724 instr->type = INSTR_JMP_MISS;
4725 instr->jmp.ip = NULL; /* Resolved later. */
4730 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
4731 struct action *action,
4734 struct instruction *instr,
4735 struct instruction_data *data)
4739 CHECK(!action, EINVAL);
4740 CHECK(n_tokens == 3, EINVAL);
4742 strcpy(data->jmp_label, tokens[1]);
4744 a = action_find(p, tokens[2]);
4747 instr->type = INSTR_JMP_ACTION_HIT;
4748 instr->jmp.ip = NULL; /* Resolved later. */
4749 instr->jmp.action_id = a->id;
4754 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
4755 struct action *action,
4758 struct instruction *instr,
4759 struct instruction_data *data)
4763 CHECK(!action, EINVAL);
4764 CHECK(n_tokens == 3, EINVAL);
4766 strcpy(data->jmp_label, tokens[1]);
4768 a = action_find(p, tokens[2]);
4771 instr->type = INSTR_JMP_ACTION_MISS;
4772 instr->jmp.ip = NULL; /* Resolved later. */
4773 instr->jmp.action_id = a->id;
4778 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
4779 struct action *action,
4782 struct instruction *instr,
4783 struct instruction_data *data)
4785 char *a = tokens[2], *b = tokens[3];
4786 struct field *fa, *fb;
4788 uint32_t a_struct_id, b_struct_id;
4790 CHECK(n_tokens == 4, EINVAL);
4792 strcpy(data->jmp_label, tokens[1]);
4794 fa = struct_field_parse(p, action, a, &a_struct_id);
4797 /* JMP_EQ or JMP_EQ_S. */
4798 fb = struct_field_parse(p, action, b, &b_struct_id);
4800 instr->type = INSTR_JMP_EQ;
4801 if ((a[0] == 'h' && b[0] != 'h') ||
4802 (a[0] != 'h' && b[0] == 'h'))
4803 instr->type = INSTR_JMP_EQ_S;
4804 instr->jmp.ip = NULL; /* Resolved later. */
4806 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4807 instr->jmp.a.n_bits = fa->n_bits;
4808 instr->jmp.a.offset = fa->offset / 8;
4809 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4810 instr->jmp.b.n_bits = fb->n_bits;
4811 instr->jmp.b.offset = fb->offset / 8;
4816 b_val = strtoull(b, &b, 0);
4817 CHECK(!b[0], EINVAL);
4820 b_val = hton64(b_val) >> (64 - fa->n_bits);
4822 instr->type = INSTR_JMP_EQ_I;
4823 instr->jmp.ip = NULL; /* Resolved later. */
4824 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4825 instr->jmp.a.n_bits = fa->n_bits;
4826 instr->jmp.a.offset = fa->offset / 8;
4827 instr->jmp.b_val = b_val;
4832 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
4833 struct action *action,
4836 struct instruction *instr,
4837 struct instruction_data *data)
4839 char *a = tokens[2], *b = tokens[3];
4840 struct field *fa, *fb;
4842 uint32_t a_struct_id, b_struct_id;
4844 CHECK(n_tokens == 4, EINVAL);
4846 strcpy(data->jmp_label, tokens[1]);
4848 fa = struct_field_parse(p, action, a, &a_struct_id);
4851 /* JMP_NEQ or JMP_NEQ_S. */
4852 fb = struct_field_parse(p, action, b, &b_struct_id);
4854 instr->type = INSTR_JMP_NEQ;
4855 if ((a[0] == 'h' && b[0] != 'h') ||
4856 (a[0] != 'h' && b[0] == 'h'))
4857 instr->type = INSTR_JMP_NEQ_S;
4858 instr->jmp.ip = NULL; /* Resolved later. */
4860 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4861 instr->jmp.a.n_bits = fa->n_bits;
4862 instr->jmp.a.offset = fa->offset / 8;
4863 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4864 instr->jmp.b.n_bits = fb->n_bits;
4865 instr->jmp.b.offset = fb->offset / 8;
4870 b_val = strtoull(b, &b, 0);
4871 CHECK(!b[0], EINVAL);
4874 b_val = hton64(b_val) >> (64 - fa->n_bits);
4876 instr->type = INSTR_JMP_NEQ_I;
4877 instr->jmp.ip = NULL; /* Resolved later. */
4878 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4879 instr->jmp.a.n_bits = fa->n_bits;
4880 instr->jmp.a.offset = fa->offset / 8;
4881 instr->jmp.b_val = b_val;
4886 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
4887 struct action *action,
4890 struct instruction *instr,
4891 struct instruction_data *data)
4893 char *a = tokens[2], *b = tokens[3];
4894 struct field *fa, *fb;
4896 uint32_t a_struct_id, b_struct_id;
4898 CHECK(n_tokens == 4, EINVAL);
4900 strcpy(data->jmp_label, tokens[1]);
4902 fa = struct_field_parse(p, action, a, &a_struct_id);
4905 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
4906 fb = struct_field_parse(p, action, b, &b_struct_id);
4908 instr->type = INSTR_JMP_LT;
4909 if (a[0] == 'h' && b[0] == 'm')
4910 instr->type = INSTR_JMP_LT_HM;
4911 if (a[0] == 'm' && b[0] == 'h')
4912 instr->type = INSTR_JMP_LT_MH;
4913 if (a[0] == 'h' && b[0] == 'h')
4914 instr->type = INSTR_JMP_LT_HH;
4915 instr->jmp.ip = NULL; /* Resolved later. */
4917 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4918 instr->jmp.a.n_bits = fa->n_bits;
4919 instr->jmp.a.offset = fa->offset / 8;
4920 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4921 instr->jmp.b.n_bits = fb->n_bits;
4922 instr->jmp.b.offset = fb->offset / 8;
4926 /* JMP_LT_MI, JMP_LT_HI. */
4927 b_val = strtoull(b, &b, 0);
4928 CHECK(!b[0], EINVAL);
4930 instr->type = INSTR_JMP_LT_MI;
4932 instr->type = INSTR_JMP_LT_HI;
4933 instr->jmp.ip = NULL; /* Resolved later. */
4935 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4936 instr->jmp.a.n_bits = fa->n_bits;
4937 instr->jmp.a.offset = fa->offset / 8;
4938 instr->jmp.b_val = b_val;
4943 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
4944 struct action *action,
4947 struct instruction *instr,
4948 struct instruction_data *data)
4950 char *a = tokens[2], *b = tokens[3];
4951 struct field *fa, *fb;
4953 uint32_t a_struct_id, b_struct_id;
4955 CHECK(n_tokens == 4, EINVAL);
4957 strcpy(data->jmp_label, tokens[1]);
4959 fa = struct_field_parse(p, action, a, &a_struct_id);
4962 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
4963 fb = struct_field_parse(p, action, b, &b_struct_id);
4965 instr->type = INSTR_JMP_GT;
4966 if (a[0] == 'h' && b[0] == 'm')
4967 instr->type = INSTR_JMP_GT_HM;
4968 if (a[0] == 'm' && b[0] == 'h')
4969 instr->type = INSTR_JMP_GT_MH;
4970 if (a[0] == 'h' && b[0] == 'h')
4971 instr->type = INSTR_JMP_GT_HH;
4972 instr->jmp.ip = NULL; /* Resolved later. */
4974 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4975 instr->jmp.a.n_bits = fa->n_bits;
4976 instr->jmp.a.offset = fa->offset / 8;
4977 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4978 instr->jmp.b.n_bits = fb->n_bits;
4979 instr->jmp.b.offset = fb->offset / 8;
4983 /* JMP_GT_MI, JMP_GT_HI. */
4984 b_val = strtoull(b, &b, 0);
4985 CHECK(!b[0], EINVAL);
4987 instr->type = INSTR_JMP_GT_MI;
4989 instr->type = INSTR_JMP_GT_HI;
4990 instr->jmp.ip = NULL; /* Resolved later. */
4992 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4993 instr->jmp.a.n_bits = fa->n_bits;
4994 instr->jmp.a.offset = fa->offset / 8;
4995 instr->jmp.b_val = b_val;
5000 instr_jmp_exec(struct rte_swx_pipeline *p)
5002 struct thread *t = &p->threads[p->thread_id];
5003 struct instruction *ip = t->ip;
5005 TRACE("[Thread %2u] jmp\n", p->thread_id);
5007 thread_ip_set(t, ip->jmp.ip);
5011 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
5013 struct thread *t = &p->threads[p->thread_id];
5014 struct instruction *ip = t->ip;
5015 uint32_t header_id = ip->jmp.header_id;
5017 TRACE("[Thread %2u] jmpv\n", p->thread_id);
5019 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
5023 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
5025 struct thread *t = &p->threads[p->thread_id];
5026 struct instruction *ip = t->ip;
5027 uint32_t header_id = ip->jmp.header_id;
5029 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
5031 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
5035 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
5037 struct thread *t = &p->threads[p->thread_id];
5038 struct instruction *ip = t->ip;
5039 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
5041 TRACE("[Thread %2u] jmph\n", p->thread_id);
5043 t->ip = ip_next[t->hit];
5047 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
5049 struct thread *t = &p->threads[p->thread_id];
5050 struct instruction *ip = t->ip;
5051 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
5053 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
5055 t->ip = ip_next[t->hit];
5059 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
5061 struct thread *t = &p->threads[p->thread_id];
5062 struct instruction *ip = t->ip;
5064 TRACE("[Thread %2u] jmpa\n", p->thread_id);
5066 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
5070 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
5072 struct thread *t = &p->threads[p->thread_id];
5073 struct instruction *ip = t->ip;
5075 TRACE("[Thread %2u] jmpna\n", p->thread_id);
5077 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
5081 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
5083 struct thread *t = &p->threads[p->thread_id];
5084 struct instruction *ip = t->ip;
5086 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
5092 instr_jmp_eq_s_exec(struct rte_swx_pipeline *p)
5094 struct thread *t = &p->threads[p->thread_id];
5095 struct instruction *ip = t->ip;
5097 TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id);
5099 JMP_CMP_S(t, ip, ==);
5103 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
5105 struct thread *t = &p->threads[p->thread_id];
5106 struct instruction *ip = t->ip;
5108 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
5110 JMP_CMP_I(t, ip, ==);
5114 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
5116 struct thread *t = &p->threads[p->thread_id];
5117 struct instruction *ip = t->ip;
5119 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
5125 instr_jmp_neq_s_exec(struct rte_swx_pipeline *p)
5127 struct thread *t = &p->threads[p->thread_id];
5128 struct instruction *ip = t->ip;
5130 TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id);
5132 JMP_CMP_S(t, ip, !=);
5136 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
5138 struct thread *t = &p->threads[p->thread_id];
5139 struct instruction *ip = t->ip;
5141 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
5143 JMP_CMP_I(t, ip, !=);
5147 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
5149 struct thread *t = &p->threads[p->thread_id];
5150 struct instruction *ip = t->ip;
5152 TRACE("[Thread %2u] jmplt\n", p->thread_id);
5158 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
5160 struct thread *t = &p->threads[p->thread_id];
5161 struct instruction *ip = t->ip;
5163 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
5165 JMP_CMP_MH(t, ip, <);
5169 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
5171 struct thread *t = &p->threads[p->thread_id];
5172 struct instruction *ip = t->ip;
5174 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
5176 JMP_CMP_HM(t, ip, <);
5180 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
5182 struct thread *t = &p->threads[p->thread_id];
5183 struct instruction *ip = t->ip;
5185 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
5187 JMP_CMP_HH(t, ip, <);
5191 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
5193 struct thread *t = &p->threads[p->thread_id];
5194 struct instruction *ip = t->ip;
5196 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
5198 JMP_CMP_MI(t, ip, <);
5202 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
5204 struct thread *t = &p->threads[p->thread_id];
5205 struct instruction *ip = t->ip;
5207 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
5209 JMP_CMP_HI(t, ip, <);
5213 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
5215 struct thread *t = &p->threads[p->thread_id];
5216 struct instruction *ip = t->ip;
5218 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
5224 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
5226 struct thread *t = &p->threads[p->thread_id];
5227 struct instruction *ip = t->ip;
5229 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
5231 JMP_CMP_MH(t, ip, >);
5235 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
5237 struct thread *t = &p->threads[p->thread_id];
5238 struct instruction *ip = t->ip;
5240 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
5242 JMP_CMP_HM(t, ip, >);
5246 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
5248 struct thread *t = &p->threads[p->thread_id];
5249 struct instruction *ip = t->ip;
5251 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
5253 JMP_CMP_HH(t, ip, >);
5257 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
5259 struct thread *t = &p->threads[p->thread_id];
5260 struct instruction *ip = t->ip;
5262 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
5264 JMP_CMP_MI(t, ip, >);
5268 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
5270 struct thread *t = &p->threads[p->thread_id];
5271 struct instruction *ip = t->ip;
5273 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
5275 JMP_CMP_HI(t, ip, >);
5282 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
5283 struct action *action,
5284 char **tokens __rte_unused,
5286 struct instruction *instr,
5287 struct instruction_data *data __rte_unused)
5289 CHECK(action, EINVAL);
5290 CHECK(n_tokens == 1, EINVAL);
5292 instr->type = INSTR_RETURN;
5297 instr_return_exec(struct rte_swx_pipeline *p)
5299 struct thread *t = &p->threads[p->thread_id];
5301 TRACE("[Thread %2u] return\n", p->thread_id);
5307 instr_translate(struct rte_swx_pipeline *p,
5308 struct action *action,
5310 struct instruction *instr,
5311 struct instruction_data *data)
5313 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
5314 int n_tokens = 0, tpos = 0;
5316 /* Parse the instruction string into tokens. */
5320 token = strtok_r(string, " \t\v", &string);
5324 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
5325 CHECK_NAME(token, EINVAL);
5327 tokens[n_tokens] = token;
5331 CHECK(n_tokens, EINVAL);
5333 /* Handle the optional instruction label. */
5334 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
5335 strcpy(data->label, tokens[0]);
5338 CHECK(n_tokens - tpos, EINVAL);
5341 /* Identify the instruction type. */
5342 if (!strcmp(tokens[tpos], "rx"))
5343 return instr_rx_translate(p,
5350 if (!strcmp(tokens[tpos], "tx"))
5351 return instr_tx_translate(p,
5358 if (!strcmp(tokens[tpos], "extract"))
5359 return instr_hdr_extract_translate(p,
5366 if (!strcmp(tokens[tpos], "emit"))
5367 return instr_hdr_emit_translate(p,
5374 if (!strcmp(tokens[tpos], "validate"))
5375 return instr_hdr_validate_translate(p,
5382 if (!strcmp(tokens[tpos], "invalidate"))
5383 return instr_hdr_invalidate_translate(p,
5390 if (!strcmp(tokens[tpos], "mov"))
5391 return instr_mov_translate(p,
5398 if (!strcmp(tokens[tpos], "dma"))
5399 return instr_dma_translate(p,
5406 if (!strcmp(tokens[tpos], "add"))
5407 return instr_alu_add_translate(p,
5414 if (!strcmp(tokens[tpos], "sub"))
5415 return instr_alu_sub_translate(p,
5422 if (!strcmp(tokens[tpos], "ckadd"))
5423 return instr_alu_ckadd_translate(p,
5430 if (!strcmp(tokens[tpos], "cksub"))
5431 return instr_alu_cksub_translate(p,
5438 if (!strcmp(tokens[tpos], "and"))
5439 return instr_alu_and_translate(p,
5446 if (!strcmp(tokens[tpos], "or"))
5447 return instr_alu_or_translate(p,
5454 if (!strcmp(tokens[tpos], "xor"))
5455 return instr_alu_xor_translate(p,
5462 if (!strcmp(tokens[tpos], "shl"))
5463 return instr_alu_shl_translate(p,
5470 if (!strcmp(tokens[tpos], "shr"))
5471 return instr_alu_shr_translate(p,
5478 if (!strcmp(tokens[tpos], "table"))
5479 return instr_table_translate(p,
5486 if (!strcmp(tokens[tpos], "extern"))
5487 return instr_extern_translate(p,
5494 if (!strcmp(tokens[tpos], "jmp"))
5495 return instr_jmp_translate(p,
5502 if (!strcmp(tokens[tpos], "jmpv"))
5503 return instr_jmp_valid_translate(p,
5510 if (!strcmp(tokens[tpos], "jmpnv"))
5511 return instr_jmp_invalid_translate(p,
5518 if (!strcmp(tokens[tpos], "jmph"))
5519 return instr_jmp_hit_translate(p,
5526 if (!strcmp(tokens[tpos], "jmpnh"))
5527 return instr_jmp_miss_translate(p,
5534 if (!strcmp(tokens[tpos], "jmpa"))
5535 return instr_jmp_action_hit_translate(p,
5542 if (!strcmp(tokens[tpos], "jmpna"))
5543 return instr_jmp_action_miss_translate(p,
5550 if (!strcmp(tokens[tpos], "jmpeq"))
5551 return instr_jmp_eq_translate(p,
5558 if (!strcmp(tokens[tpos], "jmpneq"))
5559 return instr_jmp_neq_translate(p,
5566 if (!strcmp(tokens[tpos], "jmplt"))
5567 return instr_jmp_lt_translate(p,
5574 if (!strcmp(tokens[tpos], "jmpgt"))
5575 return instr_jmp_gt_translate(p,
5582 if (!strcmp(tokens[tpos], "return"))
5583 return instr_return_translate(p,
5593 static struct instruction_data *
5594 label_find(struct instruction_data *data, uint32_t n, const char *label)
5598 for (i = 0; i < n; i++)
5599 if (!strcmp(label, data[i].label))
5606 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
5608 uint32_t count = 0, i;
5613 for (i = 0; i < n; i++)
5614 if (!strcmp(label, data[i].jmp_label))
5621 instr_label_check(struct instruction_data *instruction_data,
5622 uint32_t n_instructions)
5626 /* Check that all instruction labels are unique. */
5627 for (i = 0; i < n_instructions; i++) {
5628 struct instruction_data *data = &instruction_data[i];
5629 char *label = data->label;
5635 for (j = i + 1; j < n_instructions; j++)
5636 CHECK(strcmp(label, data[j].label), EINVAL);
5639 /* Get users for each instruction label. */
5640 for (i = 0; i < n_instructions; i++) {
5641 struct instruction_data *data = &instruction_data[i];
5642 char *label = data->label;
5644 data->n_users = label_is_used(instruction_data,
5653 instr_jmp_resolve(struct instruction *instructions,
5654 struct instruction_data *instruction_data,
5655 uint32_t n_instructions)
5659 for (i = 0; i < n_instructions; i++) {
5660 struct instruction *instr = &instructions[i];
5661 struct instruction_data *data = &instruction_data[i];
5662 struct instruction_data *found;
5664 if (!instruction_is_jmp(instr))
5667 found = label_find(instruction_data,
5670 CHECK(found, EINVAL);
5672 instr->jmp.ip = &instructions[found - instruction_data];
5679 instr_verify(struct rte_swx_pipeline *p __rte_unused,
5681 struct instruction *instr,
5682 struct instruction_data *data __rte_unused,
5683 uint32_t n_instructions)
5686 enum instruction_type type;
5689 /* Check that the first instruction is rx. */
5690 CHECK(instr[0].type == INSTR_RX, EINVAL);
5692 /* Check that there is at least one tx instruction. */
5693 for (i = 0; i < n_instructions; i++) {
5694 type = instr[i].type;
5696 if (type == INSTR_TX)
5699 CHECK(i < n_instructions, EINVAL);
5701 /* Check that the last instruction is either tx or unconditional
5704 type = instr[n_instructions - 1].type;
5705 CHECK((type == INSTR_TX) || (type == INSTR_JMP), EINVAL);
5709 enum instruction_type type;
5712 /* Check that there is at least one return or tx instruction. */
5713 for (i = 0; i < n_instructions; i++) {
5714 type = instr[i].type;
5716 if ((type == INSTR_RETURN) || (type == INSTR_TX))
5719 CHECK(i < n_instructions, EINVAL);
5726 instr_pattern_extract_many_detect(struct instruction *instr,
5727 struct instruction_data *data,
5729 uint32_t *n_pattern_instr)
5733 for (i = 0; i < n_instr; i++) {
5734 if (data[i].invalid)
5737 if (instr[i].type != INSTR_HDR_EXTRACT)
5740 if (i == RTE_DIM(instr->io.hdr.header_id))
5743 if (i && data[i].n_users)
5750 *n_pattern_instr = i;
5755 instr_pattern_extract_many_optimize(struct instruction *instr,
5756 struct instruction_data *data,
5761 for (i = 1; i < n_instr; i++) {
5763 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
5764 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
5765 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
5767 data[i].invalid = 1;
5772 instr_pattern_emit_many_tx_detect(struct instruction *instr,
5773 struct instruction_data *data,
5775 uint32_t *n_pattern_instr)
5779 for (i = 0; i < n_instr; i++) {
5780 if (data[i].invalid)
5783 if (instr[i].type != INSTR_HDR_EMIT)
5786 if (i == RTE_DIM(instr->io.hdr.header_id))
5789 if (i && data[i].n_users)
5796 if (instr[i].type != INSTR_TX)
5801 *n_pattern_instr = i;
5806 instr_pattern_emit_many_tx_optimize(struct instruction *instr,
5807 struct instruction_data *data,
5812 /* Any emit instruction in addition to the first one. */
5813 for (i = 1; i < n_instr - 1; i++) {
5815 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
5816 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
5817 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
5819 data[i].invalid = 1;
5822 /* The TX instruction is the last one in the pattern. */
5824 instr[0].io.io.offset = instr[i].io.io.offset;
5825 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
5826 data[i].invalid = 1;
5830 instr_pattern_dma_many_detect(struct instruction *instr,
5831 struct instruction_data *data,
5833 uint32_t *n_pattern_instr)
5837 for (i = 0; i < n_instr; i++) {
5838 if (data[i].invalid)
5841 if (instr[i].type != INSTR_DMA_HT)
5844 if (i == RTE_DIM(instr->dma.dst.header_id))
5847 if (i && data[i].n_users)
5854 *n_pattern_instr = i;
5859 instr_pattern_dma_many_optimize(struct instruction *instr,
5860 struct instruction_data *data,
5865 for (i = 1; i < n_instr; i++) {
5867 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
5868 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
5869 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
5870 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
5872 data[i].invalid = 1;
5877 instr_optimize(struct instruction *instructions,
5878 struct instruction_data *instruction_data,
5879 uint32_t n_instructions)
5881 uint32_t i, pos = 0;
5883 for (i = 0; i < n_instructions; ) {
5884 struct instruction *instr = &instructions[i];
5885 struct instruction_data *data = &instruction_data[i];
5886 uint32_t n_instr = 0;
5890 detected = instr_pattern_extract_many_detect(instr,
5895 instr_pattern_extract_many_optimize(instr,
5902 /* Emit many + TX. */
5903 detected = instr_pattern_emit_many_tx_detect(instr,
5908 instr_pattern_emit_many_tx_optimize(instr,
5916 detected = instr_pattern_dma_many_detect(instr,
5921 instr_pattern_dma_many_optimize(instr, data, n_instr);
5926 /* No pattern starting at the current instruction. */
5930 /* Eliminate the invalid instructions that have been optimized out. */
5931 for (i = 0; i < n_instructions; i++) {
5932 struct instruction *instr = &instructions[i];
5933 struct instruction_data *data = &instruction_data[i];
5939 memcpy(&instructions[pos], instr, sizeof(*instr));
5940 memcpy(&instruction_data[pos], data, sizeof(*data));
5950 instruction_config(struct rte_swx_pipeline *p,
5952 const char **instructions,
5953 uint32_t n_instructions)
5955 struct instruction *instr = NULL;
5956 struct instruction_data *data = NULL;
5960 CHECK(n_instructions, EINVAL);
5961 CHECK(instructions, EINVAL);
5962 for (i = 0; i < n_instructions; i++)
5963 CHECK_INSTRUCTION(instructions[i], EINVAL);
5965 /* Memory allocation. */
5966 instr = calloc(n_instructions, sizeof(struct instruction));
5972 data = calloc(n_instructions, sizeof(struct instruction_data));
5978 for (i = 0; i < n_instructions; i++) {
5979 char *string = strdup(instructions[i]);
5985 err = instr_translate(p, a, string, &instr[i], &data[i]);
5994 err = instr_label_check(data, n_instructions);
5998 err = instr_verify(p, a, instr, data, n_instructions);
6002 n_instructions = instr_optimize(instr, data, n_instructions);
6004 err = instr_jmp_resolve(instr, data, n_instructions);
6009 a->instructions = instr;
6010 a->n_instructions = n_instructions;
6012 p->instructions = instr;
6013 p->n_instructions = n_instructions;
6025 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
6027 static instr_exec_t instruction_table[] = {
6028 [INSTR_RX] = instr_rx_exec,
6029 [INSTR_TX] = instr_tx_exec,
6031 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
6032 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
6033 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
6034 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
6035 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
6036 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
6037 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
6038 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
6040 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
6041 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
6042 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
6043 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
6044 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
6045 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
6046 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
6047 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
6048 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
6050 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
6051 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
6053 [INSTR_MOV] = instr_mov_exec,
6054 [INSTR_MOV_S] = instr_mov_s_exec,
6055 [INSTR_MOV_I] = instr_mov_i_exec,
6057 [INSTR_DMA_HT] = instr_dma_ht_exec,
6058 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
6059 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
6060 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
6061 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
6062 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
6063 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
6064 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
6066 [INSTR_ALU_ADD] = instr_alu_add_exec,
6067 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
6068 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
6069 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
6070 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
6071 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
6073 [INSTR_ALU_SUB] = instr_alu_sub_exec,
6074 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
6075 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
6076 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
6077 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
6078 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
6080 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
6081 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
6082 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
6083 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
6085 [INSTR_ALU_AND] = instr_alu_and_exec,
6086 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
6087 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
6089 [INSTR_ALU_OR] = instr_alu_or_exec,
6090 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
6091 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
6093 [INSTR_ALU_XOR] = instr_alu_xor_exec,
6094 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
6095 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
6097 [INSTR_ALU_SHL] = instr_alu_shl_exec,
6098 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
6099 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
6100 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
6101 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
6102 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
6104 [INSTR_ALU_SHR] = instr_alu_shr_exec,
6105 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
6106 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
6107 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
6108 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
6109 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
6111 [INSTR_TABLE] = instr_table_exec,
6112 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
6113 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
6115 [INSTR_JMP] = instr_jmp_exec,
6116 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
6117 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
6118 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
6119 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
6120 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
6121 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
6123 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
6124 [INSTR_JMP_EQ_S] = instr_jmp_eq_s_exec,
6125 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
6127 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
6128 [INSTR_JMP_NEQ_S] = instr_jmp_neq_s_exec,
6129 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
6131 [INSTR_JMP_LT] = instr_jmp_lt_exec,
6132 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
6133 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
6134 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
6135 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
6136 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
6138 [INSTR_JMP_GT] = instr_jmp_gt_exec,
6139 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
6140 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
6141 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
6142 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
6143 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
6145 [INSTR_RETURN] = instr_return_exec,
6149 instr_exec(struct rte_swx_pipeline *p)
6151 struct thread *t = &p->threads[p->thread_id];
6152 struct instruction *ip = t->ip;
6153 instr_exec_t instr = instruction_table[ip->type];
6161 static struct action *
6162 action_find(struct rte_swx_pipeline *p, const char *name)
6164 struct action *elem;
6169 TAILQ_FOREACH(elem, &p->actions, node)
6170 if (strcmp(elem->name, name) == 0)
6176 static struct action *
6177 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6179 struct action *action = NULL;
6181 TAILQ_FOREACH(action, &p->actions, node)
6182 if (action->id == id)
6188 static struct field *
6189 action_field_find(struct action *a, const char *name)
6191 return a->st ? struct_type_field_find(a->st, name) : NULL;
6194 static struct field *
6195 action_field_parse(struct action *action, const char *name)
6197 if (name[0] != 't' || name[1] != '.')
6200 return action_field_find(action, &name[2]);
6204 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
6206 const char *args_struct_type_name,
6207 const char **instructions,
6208 uint32_t n_instructions)
6210 struct struct_type *args_struct_type;
6216 CHECK_NAME(name, EINVAL);
6217 CHECK(!action_find(p, name), EEXIST);
6219 if (args_struct_type_name) {
6220 CHECK_NAME(args_struct_type_name, EINVAL);
6221 args_struct_type = struct_type_find(p, args_struct_type_name);
6222 CHECK(args_struct_type, EINVAL);
6224 args_struct_type = NULL;
6227 /* Node allocation. */
6228 a = calloc(1, sizeof(struct action));
6231 /* Node initialization. */
6232 strcpy(a->name, name);
6233 a->st = args_struct_type;
6234 a->id = p->n_actions;
6236 /* Instruction translation. */
6237 err = instruction_config(p, a, instructions, n_instructions);
6243 /* Node add to tailq. */
6244 TAILQ_INSERT_TAIL(&p->actions, a, node);
6251 action_build(struct rte_swx_pipeline *p)
6253 struct action *action;
6255 p->action_instructions = calloc(p->n_actions,
6256 sizeof(struct instruction *));
6257 CHECK(p->action_instructions, ENOMEM);
6259 TAILQ_FOREACH(action, &p->actions, node)
6260 p->action_instructions[action->id] = action->instructions;
6266 action_build_free(struct rte_swx_pipeline *p)
6268 free(p->action_instructions);
6269 p->action_instructions = NULL;
6273 action_free(struct rte_swx_pipeline *p)
6275 action_build_free(p);
6278 struct action *action;
6280 action = TAILQ_FIRST(&p->actions);
6284 TAILQ_REMOVE(&p->actions, action, node);
6285 free(action->instructions);
6293 static struct table_type *
6294 table_type_find(struct rte_swx_pipeline *p, const char *name)
6296 struct table_type *elem;
6298 TAILQ_FOREACH(elem, &p->table_types, node)
6299 if (strcmp(elem->name, name) == 0)
6305 static struct table_type *
6306 table_type_resolve(struct rte_swx_pipeline *p,
6307 const char *recommended_type_name,
6308 enum rte_swx_table_match_type match_type)
6310 struct table_type *elem;
6312 /* Only consider the recommended type if the match type is correct. */
6313 if (recommended_type_name)
6314 TAILQ_FOREACH(elem, &p->table_types, node)
6315 if (!strcmp(elem->name, recommended_type_name) &&
6316 (elem->match_type == match_type))
6319 /* Ignore the recommended type and get the first element with this match
6322 TAILQ_FOREACH(elem, &p->table_types, node)
6323 if (elem->match_type == match_type)
6329 static struct table *
6330 table_find(struct rte_swx_pipeline *p, const char *name)
6334 TAILQ_FOREACH(elem, &p->tables, node)
6335 if (strcmp(elem->name, name) == 0)
6341 static struct table *
6342 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6344 struct table *table = NULL;
6346 TAILQ_FOREACH(table, &p->tables, node)
6347 if (table->id == id)
6354 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
6356 enum rte_swx_table_match_type match_type,
6357 struct rte_swx_table_ops *ops)
6359 struct table_type *elem;
6363 CHECK_NAME(name, EINVAL);
6364 CHECK(!table_type_find(p, name), EEXIST);
6367 CHECK(ops->create, EINVAL);
6368 CHECK(ops->lkp, EINVAL);
6369 CHECK(ops->free, EINVAL);
6371 /* Node allocation. */
6372 elem = calloc(1, sizeof(struct table_type));
6373 CHECK(elem, ENOMEM);
6375 /* Node initialization. */
6376 strcpy(elem->name, name);
6377 elem->match_type = match_type;
6378 memcpy(&elem->ops, ops, sizeof(*ops));
6380 /* Node add to tailq. */
6381 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
6386 static enum rte_swx_table_match_type
6387 table_match_type_resolve(struct rte_swx_match_field_params *fields,
6392 for (i = 0; i < n_fields; i++)
6393 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
6397 return RTE_SWX_TABLE_MATCH_EXACT;
6399 if ((i == n_fields - 1) &&
6400 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
6401 return RTE_SWX_TABLE_MATCH_LPM;
6403 return RTE_SWX_TABLE_MATCH_WILDCARD;
6407 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
6409 struct rte_swx_pipeline_table_params *params,
6410 const char *recommended_table_type_name,
6414 struct table_type *type;
6416 struct action *default_action;
6417 struct header *header = NULL;
6419 uint32_t offset_prev = 0, action_data_size_max = 0, i;
6423 CHECK_NAME(name, EINVAL);
6424 CHECK(!table_find(p, name), EEXIST);
6426 CHECK(params, EINVAL);
6429 CHECK(!params->n_fields || params->fields, EINVAL);
6430 for (i = 0; i < params->n_fields; i++) {
6431 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6433 struct field *hf, *mf;
6436 CHECK_NAME(field->name, EINVAL);
6438 hf = header_field_parse(p, field->name, &h);
6439 mf = metadata_field_parse(p, field->name);
6440 CHECK(hf || mf, EINVAL);
6442 offset = hf ? hf->offset : mf->offset;
6445 is_header = hf ? 1 : 0;
6446 header = hf ? h : NULL;
6447 offset_prev = offset;
6452 CHECK((is_header && hf && (h->id == header->id)) ||
6453 (!is_header && mf), EINVAL);
6455 CHECK(offset > offset_prev, EINVAL);
6456 offset_prev = offset;
6459 /* Action checks. */
6460 CHECK(params->n_actions, EINVAL);
6461 CHECK(params->action_names, EINVAL);
6462 for (i = 0; i < params->n_actions; i++) {
6463 const char *action_name = params->action_names[i];
6465 uint32_t action_data_size;
6467 CHECK_NAME(action_name, EINVAL);
6469 a = action_find(p, action_name);
6472 action_data_size = a->st ? a->st->n_bits / 8 : 0;
6473 if (action_data_size > action_data_size_max)
6474 action_data_size_max = action_data_size;
6477 CHECK_NAME(params->default_action_name, EINVAL);
6478 for (i = 0; i < p->n_actions; i++)
6479 if (!strcmp(params->action_names[i],
6480 params->default_action_name))
6482 CHECK(i < params->n_actions, EINVAL);
6483 default_action = action_find(p, params->default_action_name);
6484 CHECK((default_action->st && params->default_action_data) ||
6485 !params->default_action_data, EINVAL);
6487 /* Table type checks. */
6488 if (recommended_table_type_name)
6489 CHECK_NAME(recommended_table_type_name, EINVAL);
6491 if (params->n_fields) {
6492 enum rte_swx_table_match_type match_type;
6494 match_type = table_match_type_resolve(params->fields,
6496 type = table_type_resolve(p,
6497 recommended_table_type_name,
6499 CHECK(type, EINVAL);
6504 /* Memory allocation. */
6505 t = calloc(1, sizeof(struct table));
6508 t->fields = calloc(params->n_fields, sizeof(struct match_field));
6514 t->actions = calloc(params->n_actions, sizeof(struct action *));
6521 if (action_data_size_max) {
6522 t->default_action_data = calloc(1, action_data_size_max);
6523 if (!t->default_action_data) {
6531 /* Node initialization. */
6532 strcpy(t->name, name);
6533 if (args && args[0])
6534 strcpy(t->args, args);
6537 for (i = 0; i < params->n_fields; i++) {
6538 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6539 struct match_field *f = &t->fields[i];
6541 f->match_type = field->match_type;
6542 f->field = is_header ?
6543 header_field_parse(p, field->name, NULL) :
6544 metadata_field_parse(p, field->name);
6546 t->n_fields = params->n_fields;
6547 t->is_header = is_header;
6550 for (i = 0; i < params->n_actions; i++)
6551 t->actions[i] = action_find(p, params->action_names[i]);
6552 t->default_action = default_action;
6553 if (default_action->st)
6554 memcpy(t->default_action_data,
6555 params->default_action_data,
6556 default_action->st->n_bits / 8);
6557 t->n_actions = params->n_actions;
6558 t->default_action_is_const = params->default_action_is_const;
6559 t->action_data_size_max = action_data_size_max;
6562 t->id = p->n_tables;
6564 /* Node add to tailq. */
6565 TAILQ_INSERT_TAIL(&p->tables, t, node);
6571 static struct rte_swx_table_params *
6572 table_params_get(struct table *table)
6574 struct rte_swx_table_params *params;
6575 struct field *first, *last;
6577 uint32_t key_size, key_offset, action_data_size, i;
6579 /* Memory allocation. */
6580 params = calloc(1, sizeof(struct rte_swx_table_params));
6584 /* Key offset and size. */
6585 first = table->fields[0].field;
6586 last = table->fields[table->n_fields - 1].field;
6587 key_offset = first->offset / 8;
6588 key_size = (last->offset + last->n_bits - first->offset) / 8;
6590 /* Memory allocation. */
6591 key_mask = calloc(1, key_size);
6598 for (i = 0; i < table->n_fields; i++) {
6599 struct field *f = table->fields[i].field;
6600 uint32_t start = (f->offset - first->offset) / 8;
6601 size_t size = f->n_bits / 8;
6603 memset(&key_mask[start], 0xFF, size);
6606 /* Action data size. */
6607 action_data_size = 0;
6608 for (i = 0; i < table->n_actions; i++) {
6609 struct action *action = table->actions[i];
6610 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
6612 if (ads > action_data_size)
6613 action_data_size = ads;
6617 params->match_type = table->type->match_type;
6618 params->key_size = key_size;
6619 params->key_offset = key_offset;
6620 params->key_mask0 = key_mask;
6621 params->action_data_size = action_data_size;
6622 params->n_keys_max = table->size;
6628 table_params_free(struct rte_swx_table_params *params)
6633 free(params->key_mask0);
6638 table_state_build(struct rte_swx_pipeline *p)
6640 struct table *table;
6642 p->table_state = calloc(p->n_tables,
6643 sizeof(struct rte_swx_table_state));
6644 CHECK(p->table_state, ENOMEM);
6646 TAILQ_FOREACH(table, &p->tables, node) {
6647 struct rte_swx_table_state *ts = &p->table_state[table->id];
6650 struct rte_swx_table_params *params;
6653 params = table_params_get(table);
6654 CHECK(params, ENOMEM);
6656 ts->obj = table->type->ops.create(params,
6661 table_params_free(params);
6662 CHECK(ts->obj, ENODEV);
6665 /* ts->default_action_data. */
6666 if (table->action_data_size_max) {
6667 ts->default_action_data =
6668 malloc(table->action_data_size_max);
6669 CHECK(ts->default_action_data, ENOMEM);
6671 memcpy(ts->default_action_data,
6672 table->default_action_data,
6673 table->action_data_size_max);
6676 /* ts->default_action_id. */
6677 ts->default_action_id = table->default_action->id;
6684 table_state_build_free(struct rte_swx_pipeline *p)
6688 if (!p->table_state)
6691 for (i = 0; i < p->n_tables; i++) {
6692 struct rte_swx_table_state *ts = &p->table_state[i];
6693 struct table *table = table_find_by_id(p, i);
6696 if (table->type && ts->obj)
6697 table->type->ops.free(ts->obj);
6699 /* ts->default_action_data. */
6700 free(ts->default_action_data);
6703 free(p->table_state);
6704 p->table_state = NULL;
6708 table_state_free(struct rte_swx_pipeline *p)
6710 table_state_build_free(p);
6714 table_stub_lkp(void *table __rte_unused,
6715 void *mailbox __rte_unused,
6716 uint8_t **key __rte_unused,
6717 uint64_t *action_id __rte_unused,
6718 uint8_t **action_data __rte_unused,
6722 return 1; /* DONE. */
6726 table_build(struct rte_swx_pipeline *p)
6730 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6731 struct thread *t = &p->threads[i];
6732 struct table *table;
6734 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
6735 CHECK(t->tables, ENOMEM);
6737 TAILQ_FOREACH(table, &p->tables, node) {
6738 struct table_runtime *r = &t->tables[table->id];
6743 size = table->type->ops.mailbox_size_get();
6746 r->func = table->type->ops.lkp;
6750 r->mailbox = calloc(1, size);
6751 CHECK(r->mailbox, ENOMEM);
6755 r->key = table->is_header ?
6756 &t->structs[table->header->struct_id] :
6757 &t->structs[p->metadata_struct_id];
6759 r->func = table_stub_lkp;
6768 table_build_free(struct rte_swx_pipeline *p)
6772 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6773 struct thread *t = &p->threads[i];
6779 for (j = 0; j < p->n_tables; j++) {
6780 struct table_runtime *r = &t->tables[j];
6791 table_free(struct rte_swx_pipeline *p)
6793 table_build_free(p);
6799 elem = TAILQ_FIRST(&p->tables);
6803 TAILQ_REMOVE(&p->tables, elem, node);
6805 free(elem->actions);
6806 free(elem->default_action_data);
6812 struct table_type *elem;
6814 elem = TAILQ_FIRST(&p->table_types);
6818 TAILQ_REMOVE(&p->table_types, elem, node);
6827 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
6829 struct rte_swx_pipeline *pipeline;
6831 /* Check input parameters. */
6834 /* Memory allocation. */
6835 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
6836 CHECK(pipeline, ENOMEM);
6838 /* Initialization. */
6839 TAILQ_INIT(&pipeline->struct_types);
6840 TAILQ_INIT(&pipeline->port_in_types);
6841 TAILQ_INIT(&pipeline->ports_in);
6842 TAILQ_INIT(&pipeline->port_out_types);
6843 TAILQ_INIT(&pipeline->ports_out);
6844 TAILQ_INIT(&pipeline->extern_types);
6845 TAILQ_INIT(&pipeline->extern_objs);
6846 TAILQ_INIT(&pipeline->extern_funcs);
6847 TAILQ_INIT(&pipeline->headers);
6848 TAILQ_INIT(&pipeline->actions);
6849 TAILQ_INIT(&pipeline->table_types);
6850 TAILQ_INIT(&pipeline->tables);
6852 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
6853 pipeline->numa_node = numa_node;
6860 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
6865 free(p->instructions);
6867 table_state_free(p);
6872 extern_func_free(p);
6882 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
6883 const char **instructions,
6884 uint32_t n_instructions)
6889 err = instruction_config(p, NULL, instructions, n_instructions);
6893 /* Thread instruction pointer reset. */
6894 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6895 struct thread *t = &p->threads[i];
6897 thread_ip_reset(p, t);
6904 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
6909 CHECK(p->build_done == 0, EEXIST);
6911 status = port_in_build(p);
6915 status = port_out_build(p);
6919 status = struct_build(p);
6923 status = extern_obj_build(p);
6927 status = extern_func_build(p);
6931 status = header_build(p);
6935 status = metadata_build(p);
6939 status = action_build(p);
6943 status = table_build(p);
6947 status = table_state_build(p);
6955 table_state_build_free(p);
6956 table_build_free(p);
6957 action_build_free(p);
6958 metadata_build_free(p);
6959 header_build_free(p);
6960 extern_func_build_free(p);
6961 extern_obj_build_free(p);
6962 port_out_build_free(p);
6963 port_in_build_free(p);
6964 struct_build_free(p);
6970 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
6974 for (i = 0; i < n_instructions; i++)
6979 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
6983 for (i = 0; i < p->n_ports_out; i++) {
6984 struct port_out_runtime *port = &p->out[i];
6987 port->flush(port->obj);
6995 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
6996 struct rte_swx_ctl_pipeline_info *pipeline)
6998 struct action *action;
6999 struct table *table;
7000 uint32_t n_actions = 0, n_tables = 0;
7002 if (!p || !pipeline)
7005 TAILQ_FOREACH(action, &p->actions, node)
7008 TAILQ_FOREACH(table, &p->tables, node)
7011 pipeline->n_ports_in = p->n_ports_in;
7012 pipeline->n_ports_out = p->n_ports_out;
7013 pipeline->n_actions = n_actions;
7014 pipeline->n_tables = n_tables;
7020 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
7022 if (!p || !numa_node)
7025 *numa_node = p->numa_node;
7030 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
7032 struct rte_swx_ctl_action_info *action)
7034 struct action *a = NULL;
7036 if (!p || (action_id >= p->n_actions) || !action)
7039 a = action_find_by_id(p, action_id);
7043 strcpy(action->name, a->name);
7044 action->n_args = a->st ? a->st->n_fields : 0;
7049 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
7051 uint32_t action_arg_id,
7052 struct rte_swx_ctl_action_arg_info *action_arg)
7054 struct action *a = NULL;
7055 struct field *arg = NULL;
7057 if (!p || (action_id >= p->n_actions) || !action_arg)
7060 a = action_find_by_id(p, action_id);
7061 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
7064 arg = &a->st->fields[action_arg_id];
7065 strcpy(action_arg->name, arg->name);
7066 action_arg->n_bits = arg->n_bits;
7072 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
7074 struct rte_swx_ctl_table_info *table)
7076 struct table *t = NULL;
7081 t = table_find_by_id(p, table_id);
7085 strcpy(table->name, t->name);
7086 strcpy(table->args, t->args);
7087 table->n_match_fields = t->n_fields;
7088 table->n_actions = t->n_actions;
7089 table->default_action_is_const = t->default_action_is_const;
7090 table->size = t->size;
7095 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
7097 uint32_t match_field_id,
7098 struct rte_swx_ctl_table_match_field_info *match_field)
7101 struct match_field *f;
7103 if (!p || (table_id >= p->n_tables) || !match_field)
7106 t = table_find_by_id(p, table_id);
7107 if (!t || (match_field_id >= t->n_fields))
7110 f = &t->fields[match_field_id];
7111 match_field->match_type = f->match_type;
7112 match_field->is_header = t->is_header;
7113 match_field->n_bits = f->field->n_bits;
7114 match_field->offset = f->field->offset;
7120 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
7122 uint32_t table_action_id,
7123 struct rte_swx_ctl_table_action_info *table_action)
7127 if (!p || (table_id >= p->n_tables) || !table_action)
7130 t = table_find_by_id(p, table_id);
7131 if (!t || (table_action_id >= t->n_actions))
7134 table_action->action_id = t->actions[table_action_id]->id;
7140 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
7142 struct rte_swx_table_ops *table_ops,
7147 if (!p || (table_id >= p->n_tables))
7150 t = table_find_by_id(p, table_id);
7156 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
7166 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
7167 struct rte_swx_table_state **table_state)
7169 if (!p || !table_state || !p->build_done)
7172 *table_state = p->table_state;
7177 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
7178 struct rte_swx_table_state *table_state)
7180 if (!p || !table_state || !p->build_done)
7183 p->table_state = table_state;
7188 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
7190 struct rte_swx_port_in_stats *stats)
7192 struct port_in *port;
7197 port = port_in_find(p, port_id);
7201 port->type->ops.stats_read(port->obj, stats);
7206 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
7208 struct rte_swx_port_out_stats *stats)
7210 struct port_out *port;
7215 port = port_out_find(p, port_id);
7219 port->type->ops.stats_read(port->obj, stats);