1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
328 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
329 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
333 * dst = HMEF, src = HMEFTI
335 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
336 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
337 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
338 INSTR_ALU_SHL_HH, /* dst = H, src = H */
339 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
340 INSTR_ALU_SHL_HI, /* dst = H, src = I */
344 * dst = HMEF, src = HMEFTI
346 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
347 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
348 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
349 INSTR_ALU_SHR_HH, /* dst = H, src = H */
350 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
351 INSTR_ALU_SHR_HI, /* dst = H, src = I */
356 /* extern e.obj.func */
367 /* jmpv LABEL h.header
368 * Jump if header is valid
372 /* jmpnv LABEL h.header
373 * Jump if header is invalid
378 * Jump if table lookup hit
383 * Jump if table lookup miss
390 INSTR_JMP_ACTION_HIT,
392 /* jmpna LABEL ACTION
393 * Jump if action not run
395 INSTR_JMP_ACTION_MISS,
398 * Jump is a is equal to b
399 * a = HMEFT, b = HMEFTI
401 INSTR_JMP_EQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
402 INSTR_JMP_EQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
403 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
406 * Jump is a is not equal to b
407 * a = HMEFT, b = HMEFTI
409 INSTR_JMP_NEQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
410 INSTR_JMP_NEQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
411 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
414 * Jump if a is less than b
415 * a = HMEFT, b = HMEFTI
417 INSTR_JMP_LT, /* a = MEF, b = MEF */
418 INSTR_JMP_LT_MH, /* a = MEF, b = H */
419 INSTR_JMP_LT_HM, /* a = H, b = MEF */
420 INSTR_JMP_LT_HH, /* a = H, b = H */
421 INSTR_JMP_LT_MI, /* a = MEF, b = I */
422 INSTR_JMP_LT_HI, /* a = H, b = I */
425 * Jump if a is greater than b
426 * a = HMEFT, b = HMEFTI
428 INSTR_JMP_GT, /* a = MEF, b = MEF */
429 INSTR_JMP_GT_MH, /* a = MEF, b = H */
430 INSTR_JMP_GT_HM, /* a = H, b = MEF */
431 INSTR_JMP_GT_HH, /* a = H, b = H */
432 INSTR_JMP_GT_MI, /* a = MEF, b = I */
433 INSTR_JMP_GT_HI, /* a = H, b = I */
441 struct instr_operand {
456 uint8_t header_id[8];
457 uint8_t struct_id[8];
462 struct instr_hdr_validity {
470 struct instr_extern_obj {
475 struct instr_extern_func {
479 struct instr_dst_src {
480 struct instr_operand dst;
482 struct instr_operand src;
489 uint8_t header_id[8];
490 uint8_t struct_id[8];
501 struct instruction *ip;
504 struct instr_operand a;
510 struct instr_operand b;
516 enum instruction_type type;
519 struct instr_hdr_validity valid;
520 struct instr_dst_src mov;
521 struct instr_dma dma;
522 struct instr_dst_src alu;
523 struct instr_table table;
524 struct instr_extern_obj ext_obj;
525 struct instr_extern_func ext_func;
526 struct instr_jmp jmp;
530 struct instruction_data {
531 char label[RTE_SWX_NAME_SIZE];
532 char jmp_label[RTE_SWX_NAME_SIZE];
533 uint32_t n_users; /* user = jmp instruction to this instruction. */
541 TAILQ_ENTRY(action) node;
542 char name[RTE_SWX_NAME_SIZE];
543 struct struct_type *st;
544 struct instruction *instructions;
545 uint32_t n_instructions;
549 TAILQ_HEAD(action_tailq, action);
555 TAILQ_ENTRY(table_type) node;
556 char name[RTE_SWX_NAME_SIZE];
557 enum rte_swx_table_match_type match_type;
558 struct rte_swx_table_ops ops;
561 TAILQ_HEAD(table_type_tailq, table_type);
564 enum rte_swx_table_match_type match_type;
569 TAILQ_ENTRY(table) node;
570 char name[RTE_SWX_NAME_SIZE];
571 char args[RTE_SWX_NAME_SIZE];
572 struct table_type *type; /* NULL when n_fields == 0. */
575 struct match_field *fields;
577 int is_header; /* Only valid when n_fields > 0. */
578 struct header *header; /* Only valid when n_fields > 0. */
581 struct action **actions;
582 struct action *default_action;
583 uint8_t *default_action_data;
585 int default_action_is_const;
586 uint32_t action_data_size_max;
592 TAILQ_HEAD(table_tailq, table);
594 struct table_runtime {
595 rte_swx_table_lookup_t func;
605 struct rte_swx_pkt pkt;
611 /* Packet headers. */
612 struct header_runtime *headers; /* Extracted or generated headers. */
613 struct header_out_runtime *headers_out; /* Emitted headers. */
614 uint8_t *header_storage;
615 uint8_t *header_out_storage;
616 uint64_t valid_headers;
617 uint32_t n_headers_out;
619 /* Packet meta-data. */
623 struct table_runtime *tables;
624 struct rte_swx_table_state *table_state;
626 int hit; /* 0 = Miss, 1 = Hit. */
628 /* Extern objects and functions. */
629 struct extern_obj_runtime *extern_objs;
630 struct extern_func_runtime *extern_funcs;
633 struct instruction *ip;
634 struct instruction *ret;
637 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
638 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
639 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
641 #define HEADER_VALID(thread, header_id) \
642 MASK64_BIT_GET((thread)->valid_headers, header_id)
644 #define ALU(thread, ip, operator) \
646 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
647 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
648 uint64_t dst64 = *dst64_ptr; \
649 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
650 uint64_t dst = dst64 & dst64_mask; \
652 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
653 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
654 uint64_t src64 = *src64_ptr; \
655 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
656 uint64_t src = src64 & src64_mask; \
658 uint64_t result = dst operator src; \
660 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
663 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
665 #define ALU_S(thread, ip, operator) \
667 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
668 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
669 uint64_t dst64 = *dst64_ptr; \
670 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
671 uint64_t dst = dst64 & dst64_mask; \
673 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
674 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
675 uint64_t src64 = *src64_ptr; \
676 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
678 uint64_t result = dst operator src; \
680 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
685 #define ALU_HM(thread, ip, operator) \
687 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
688 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
689 uint64_t dst64 = *dst64_ptr; \
690 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
691 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
693 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
694 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
695 uint64_t src64 = *src64_ptr; \
696 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
697 uint64_t src = src64 & src64_mask; \
699 uint64_t result = dst operator src; \
700 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
702 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
705 #define ALU_HH(thread, ip, operator) \
707 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
708 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
709 uint64_t dst64 = *dst64_ptr; \
710 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
711 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
713 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
714 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
715 uint64_t src64 = *src64_ptr; \
716 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
718 uint64_t result = dst operator src; \
719 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
721 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
733 #define ALU_I(thread, ip, operator) \
735 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
736 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
737 uint64_t dst64 = *dst64_ptr; \
738 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
739 uint64_t dst = dst64 & dst64_mask; \
741 uint64_t src = (ip)->alu.src_val; \
743 uint64_t result = dst operator src; \
745 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
750 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
752 #define ALU_HI(thread, ip, operator) \
754 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
755 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
756 uint64_t dst64 = *dst64_ptr; \
757 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
758 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
760 uint64_t src = (ip)->alu.src_val; \
762 uint64_t result = dst operator src; \
763 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
765 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
774 #define MOV(thread, ip) \
776 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
777 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
778 uint64_t dst64 = *dst64_ptr; \
779 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
781 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
782 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
783 uint64_t src64 = *src64_ptr; \
784 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
785 uint64_t src = src64 & src64_mask; \
787 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
790 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
792 #define MOV_S(thread, ip) \
794 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
795 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
796 uint64_t dst64 = *dst64_ptr; \
797 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
799 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
800 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
801 uint64_t src64 = *src64_ptr; \
802 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
804 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
813 #define MOV_I(thread, ip) \
815 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
816 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
817 uint64_t dst64 = *dst64_ptr; \
818 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
820 uint64_t src = (ip)->mov.src_val; \
822 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
825 #define JMP_CMP(thread, ip, operator) \
827 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
828 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
829 uint64_t a64 = *a64_ptr; \
830 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
831 uint64_t a = a64 & a64_mask; \
833 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
834 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
835 uint64_t b64 = *b64_ptr; \
836 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
837 uint64_t b = b64 & b64_mask; \
839 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
842 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
844 #define JMP_CMP_S(thread, ip, operator) \
846 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
847 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
848 uint64_t a64 = *a64_ptr; \
849 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
850 uint64_t a = a64 & a64_mask; \
852 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
853 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
854 uint64_t b64 = *b64_ptr; \
855 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
857 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
860 #define JMP_CMP_MH JMP_CMP_S
862 #define JMP_CMP_HM(thread, ip, operator) \
864 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
865 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
866 uint64_t a64 = *a64_ptr; \
867 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
869 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
870 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
871 uint64_t b64 = *b64_ptr; \
872 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
873 uint64_t b = b64 & b64_mask; \
875 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
878 #define JMP_CMP_HH(thread, ip, operator) \
880 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
881 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
882 uint64_t a64 = *a64_ptr; \
883 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
885 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
886 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
887 uint64_t b64 = *b64_ptr; \
888 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
890 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
895 #define JMP_CMP_S JMP_CMP
896 #define JMP_CMP_MH JMP_CMP
897 #define JMP_CMP_HM JMP_CMP
898 #define JMP_CMP_HH JMP_CMP
902 #define JMP_CMP_I(thread, ip, operator) \
904 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
905 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
906 uint64_t a64 = *a64_ptr; \
907 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
908 uint64_t a = a64 & a64_mask; \
910 uint64_t b = (ip)->jmp.b_val; \
912 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
915 #define JMP_CMP_MI JMP_CMP_I
917 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
919 #define JMP_CMP_HI(thread, ip, operator) \
921 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
922 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
923 uint64_t a64 = *a64_ptr; \
924 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
926 uint64_t b = (ip)->jmp.b_val; \
928 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
933 #define JMP_CMP_HI JMP_CMP_I
937 #define METADATA_READ(thread, offset, n_bits) \
939 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
940 uint64_t m64 = *m64_ptr; \
941 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
945 #define METADATA_WRITE(thread, offset, n_bits, value) \
947 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
948 uint64_t m64 = *m64_ptr; \
949 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
951 uint64_t m_new = value; \
953 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
956 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
957 #define RTE_SWX_PIPELINE_THREADS_MAX 16
960 struct rte_swx_pipeline {
961 struct struct_type_tailq struct_types;
962 struct port_in_type_tailq port_in_types;
963 struct port_in_tailq ports_in;
964 struct port_out_type_tailq port_out_types;
965 struct port_out_tailq ports_out;
966 struct extern_type_tailq extern_types;
967 struct extern_obj_tailq extern_objs;
968 struct extern_func_tailq extern_funcs;
969 struct header_tailq headers;
970 struct struct_type *metadata_st;
971 uint32_t metadata_struct_id;
972 struct action_tailq actions;
973 struct table_type_tailq table_types;
974 struct table_tailq tables;
976 struct port_in_runtime *in;
977 struct port_out_runtime *out;
978 struct instruction **action_instructions;
979 struct rte_swx_table_state *table_state;
980 struct instruction *instructions;
981 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
985 uint32_t n_ports_out;
986 uint32_t n_extern_objs;
987 uint32_t n_extern_funcs;
993 uint32_t n_instructions;
1001 static struct struct_type *
1002 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1004 struct struct_type *elem;
1006 TAILQ_FOREACH(elem, &p->struct_types, node)
1007 if (strcmp(elem->name, name) == 0)
1013 static struct field *
1014 struct_type_field_find(struct struct_type *st, const char *name)
1018 for (i = 0; i < st->n_fields; i++) {
1019 struct field *f = &st->fields[i];
1021 if (strcmp(f->name, name) == 0)
1029 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1031 struct rte_swx_field_params *fields,
1034 struct struct_type *st;
1038 CHECK_NAME(name, EINVAL);
1039 CHECK(fields, EINVAL);
1040 CHECK(n_fields, EINVAL);
1042 for (i = 0; i < n_fields; i++) {
1043 struct rte_swx_field_params *f = &fields[i];
1046 CHECK_NAME(f->name, EINVAL);
1047 CHECK(f->n_bits, EINVAL);
1048 CHECK(f->n_bits <= 64, EINVAL);
1049 CHECK((f->n_bits & 7) == 0, EINVAL);
1051 for (j = 0; j < i; j++) {
1052 struct rte_swx_field_params *f_prev = &fields[j];
1054 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1058 CHECK(!struct_type_find(p, name), EEXIST);
1060 /* Node allocation. */
1061 st = calloc(1, sizeof(struct struct_type));
1064 st->fields = calloc(n_fields, sizeof(struct field));
1070 /* Node initialization. */
1071 strcpy(st->name, name);
1072 for (i = 0; i < n_fields; i++) {
1073 struct field *dst = &st->fields[i];
1074 struct rte_swx_field_params *src = &fields[i];
1076 strcpy(dst->name, src->name);
1077 dst->n_bits = src->n_bits;
1078 dst->offset = st->n_bits;
1080 st->n_bits += src->n_bits;
1082 st->n_fields = n_fields;
1084 /* Node add to tailq. */
1085 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1091 struct_build(struct rte_swx_pipeline *p)
1095 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1096 struct thread *t = &p->threads[i];
1098 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1099 CHECK(t->structs, ENOMEM);
1106 struct_build_free(struct rte_swx_pipeline *p)
1110 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1111 struct thread *t = &p->threads[i];
1119 struct_free(struct rte_swx_pipeline *p)
1121 struct_build_free(p);
1125 struct struct_type *elem;
1127 elem = TAILQ_FIRST(&p->struct_types);
1131 TAILQ_REMOVE(&p->struct_types, elem, node);
1140 static struct port_in_type *
1141 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1143 struct port_in_type *elem;
1148 TAILQ_FOREACH(elem, &p->port_in_types, node)
1149 if (strcmp(elem->name, name) == 0)
1156 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1158 struct rte_swx_port_in_ops *ops)
1160 struct port_in_type *elem;
1163 CHECK_NAME(name, EINVAL);
1165 CHECK(ops->create, EINVAL);
1166 CHECK(ops->free, EINVAL);
1167 CHECK(ops->pkt_rx, EINVAL);
1168 CHECK(ops->stats_read, EINVAL);
1170 CHECK(!port_in_type_find(p, name), EEXIST);
1172 /* Node allocation. */
1173 elem = calloc(1, sizeof(struct port_in_type));
1174 CHECK(elem, ENOMEM);
1176 /* Node initialization. */
1177 strcpy(elem->name, name);
1178 memcpy(&elem->ops, ops, sizeof(*ops));
1180 /* Node add to tailq. */
1181 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1186 static struct port_in *
1187 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1189 struct port_in *port;
1191 TAILQ_FOREACH(port, &p->ports_in, node)
1192 if (port->id == port_id)
1199 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1201 const char *port_type_name,
1204 struct port_in_type *type = NULL;
1205 struct port_in *port = NULL;
1210 CHECK(!port_in_find(p, port_id), EINVAL);
1212 CHECK_NAME(port_type_name, EINVAL);
1213 type = port_in_type_find(p, port_type_name);
1214 CHECK(type, EINVAL);
1216 obj = type->ops.create(args);
1219 /* Node allocation. */
1220 port = calloc(1, sizeof(struct port_in));
1221 CHECK(port, ENOMEM);
1223 /* Node initialization. */
1228 /* Node add to tailq. */
1229 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1230 if (p->n_ports_in < port_id + 1)
1231 p->n_ports_in = port_id + 1;
1237 port_in_build(struct rte_swx_pipeline *p)
1239 struct port_in *port;
1242 CHECK(p->n_ports_in, EINVAL);
1243 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1245 for (i = 0; i < p->n_ports_in; i++)
1246 CHECK(port_in_find(p, i), EINVAL);
1248 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1249 CHECK(p->in, ENOMEM);
1251 TAILQ_FOREACH(port, &p->ports_in, node) {
1252 struct port_in_runtime *in = &p->in[port->id];
1254 in->pkt_rx = port->type->ops.pkt_rx;
1255 in->obj = port->obj;
1262 port_in_build_free(struct rte_swx_pipeline *p)
1269 port_in_free(struct rte_swx_pipeline *p)
1271 port_in_build_free(p);
1275 struct port_in *port;
1277 port = TAILQ_FIRST(&p->ports_in);
1281 TAILQ_REMOVE(&p->ports_in, port, node);
1282 port->type->ops.free(port->obj);
1286 /* Input port types. */
1288 struct port_in_type *elem;
1290 elem = TAILQ_FIRST(&p->port_in_types);
1294 TAILQ_REMOVE(&p->port_in_types, elem, node);
1302 static struct port_out_type *
1303 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1305 struct port_out_type *elem;
1310 TAILQ_FOREACH(elem, &p->port_out_types, node)
1311 if (!strcmp(elem->name, name))
1318 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1320 struct rte_swx_port_out_ops *ops)
1322 struct port_out_type *elem;
1325 CHECK_NAME(name, EINVAL);
1327 CHECK(ops->create, EINVAL);
1328 CHECK(ops->free, EINVAL);
1329 CHECK(ops->pkt_tx, EINVAL);
1330 CHECK(ops->stats_read, EINVAL);
1332 CHECK(!port_out_type_find(p, name), EEXIST);
1334 /* Node allocation. */
1335 elem = calloc(1, sizeof(struct port_out_type));
1336 CHECK(elem, ENOMEM);
1338 /* Node initialization. */
1339 strcpy(elem->name, name);
1340 memcpy(&elem->ops, ops, sizeof(*ops));
1342 /* Node add to tailq. */
1343 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1348 static struct port_out *
1349 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1351 struct port_out *port;
1353 TAILQ_FOREACH(port, &p->ports_out, node)
1354 if (port->id == port_id)
1361 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1363 const char *port_type_name,
1366 struct port_out_type *type = NULL;
1367 struct port_out *port = NULL;
1372 CHECK(!port_out_find(p, port_id), EINVAL);
1374 CHECK_NAME(port_type_name, EINVAL);
1375 type = port_out_type_find(p, port_type_name);
1376 CHECK(type, EINVAL);
1378 obj = type->ops.create(args);
1381 /* Node allocation. */
1382 port = calloc(1, sizeof(struct port_out));
1383 CHECK(port, ENOMEM);
1385 /* Node initialization. */
1390 /* Node add to tailq. */
1391 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1392 if (p->n_ports_out < port_id + 1)
1393 p->n_ports_out = port_id + 1;
1399 port_out_build(struct rte_swx_pipeline *p)
1401 struct port_out *port;
1404 CHECK(p->n_ports_out, EINVAL);
1406 for (i = 0; i < p->n_ports_out; i++)
1407 CHECK(port_out_find(p, i), EINVAL);
1409 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1410 CHECK(p->out, ENOMEM);
1412 TAILQ_FOREACH(port, &p->ports_out, node) {
1413 struct port_out_runtime *out = &p->out[port->id];
1415 out->pkt_tx = port->type->ops.pkt_tx;
1416 out->flush = port->type->ops.flush;
1417 out->obj = port->obj;
1424 port_out_build_free(struct rte_swx_pipeline *p)
1431 port_out_free(struct rte_swx_pipeline *p)
1433 port_out_build_free(p);
1437 struct port_out *port;
1439 port = TAILQ_FIRST(&p->ports_out);
1443 TAILQ_REMOVE(&p->ports_out, port, node);
1444 port->type->ops.free(port->obj);
1448 /* Output port types. */
1450 struct port_out_type *elem;
1452 elem = TAILQ_FIRST(&p->port_out_types);
1456 TAILQ_REMOVE(&p->port_out_types, elem, node);
1464 static struct extern_type *
1465 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1467 struct extern_type *elem;
1469 TAILQ_FOREACH(elem, &p->extern_types, node)
1470 if (strcmp(elem->name, name) == 0)
1476 static struct extern_type_member_func *
1477 extern_type_member_func_find(struct extern_type *type, const char *name)
1479 struct extern_type_member_func *elem;
1481 TAILQ_FOREACH(elem, &type->funcs, node)
1482 if (strcmp(elem->name, name) == 0)
1488 static struct extern_obj *
1489 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1491 struct extern_obj *elem;
1493 TAILQ_FOREACH(elem, &p->extern_objs, node)
1494 if (strcmp(elem->name, name) == 0)
1500 static struct extern_type_member_func *
1501 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1503 struct extern_obj **obj)
1505 struct extern_obj *object;
1506 struct extern_type_member_func *func;
1507 char *object_name, *func_name;
1509 if (name[0] != 'e' || name[1] != '.')
1512 object_name = strdup(&name[2]);
1516 func_name = strchr(object_name, '.');
1525 object = extern_obj_find(p, object_name);
1531 func = extern_type_member_func_find(object->type, func_name);
1544 static struct field *
1545 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1547 struct extern_obj **object)
1549 struct extern_obj *obj;
1551 char *obj_name, *field_name;
1553 if ((name[0] != 'e') || (name[1] != '.'))
1556 obj_name = strdup(&name[2]);
1560 field_name = strchr(obj_name, '.');
1569 obj = extern_obj_find(p, obj_name);
1575 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1589 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1591 const char *mailbox_struct_type_name,
1592 rte_swx_extern_type_constructor_t constructor,
1593 rte_swx_extern_type_destructor_t destructor)
1595 struct extern_type *elem;
1596 struct struct_type *mailbox_struct_type;
1600 CHECK_NAME(name, EINVAL);
1601 CHECK(!extern_type_find(p, name), EEXIST);
1603 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1604 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1605 CHECK(mailbox_struct_type, EINVAL);
1607 CHECK(constructor, EINVAL);
1608 CHECK(destructor, EINVAL);
1610 /* Node allocation. */
1611 elem = calloc(1, sizeof(struct extern_type));
1612 CHECK(elem, ENOMEM);
1614 /* Node initialization. */
1615 strcpy(elem->name, name);
1616 elem->mailbox_struct_type = mailbox_struct_type;
1617 elem->constructor = constructor;
1618 elem->destructor = destructor;
1619 TAILQ_INIT(&elem->funcs);
1621 /* Node add to tailq. */
1622 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1628 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1629 const char *extern_type_name,
1631 rte_swx_extern_type_member_func_t member_func)
1633 struct extern_type *type;
1634 struct extern_type_member_func *type_member;
1638 CHECK(extern_type_name, EINVAL);
1639 type = extern_type_find(p, extern_type_name);
1640 CHECK(type, EINVAL);
1641 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1643 CHECK(name, EINVAL);
1644 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1646 CHECK(member_func, EINVAL);
1648 /* Node allocation. */
1649 type_member = calloc(1, sizeof(struct extern_type_member_func));
1650 CHECK(type_member, ENOMEM);
1652 /* Node initialization. */
1653 strcpy(type_member->name, name);
1654 type_member->func = member_func;
1655 type_member->id = type->n_funcs;
1657 /* Node add to tailq. */
1658 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1665 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1666 const char *extern_type_name,
1670 struct extern_type *type;
1671 struct extern_obj *obj;
1676 CHECK_NAME(extern_type_name, EINVAL);
1677 type = extern_type_find(p, extern_type_name);
1678 CHECK(type, EINVAL);
1680 CHECK_NAME(name, EINVAL);
1681 CHECK(!extern_obj_find(p, name), EEXIST);
1683 /* Node allocation. */
1684 obj = calloc(1, sizeof(struct extern_obj));
1687 /* Object construction. */
1688 obj_handle = type->constructor(args);
1694 /* Node initialization. */
1695 strcpy(obj->name, name);
1697 obj->obj = obj_handle;
1698 obj->struct_id = p->n_structs;
1699 obj->id = p->n_extern_objs;
1701 /* Node add to tailq. */
1702 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1710 extern_obj_build(struct rte_swx_pipeline *p)
1714 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1715 struct thread *t = &p->threads[i];
1716 struct extern_obj *obj;
1718 t->extern_objs = calloc(p->n_extern_objs,
1719 sizeof(struct extern_obj_runtime));
1720 CHECK(t->extern_objs, ENOMEM);
1722 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1723 struct extern_obj_runtime *r =
1724 &t->extern_objs[obj->id];
1725 struct extern_type_member_func *func;
1726 uint32_t mailbox_size =
1727 obj->type->mailbox_struct_type->n_bits / 8;
1731 r->mailbox = calloc(1, mailbox_size);
1732 CHECK(r->mailbox, ENOMEM);
1734 TAILQ_FOREACH(func, &obj->type->funcs, node)
1735 r->funcs[func->id] = func->func;
1737 t->structs[obj->struct_id] = r->mailbox;
1745 extern_obj_build_free(struct rte_swx_pipeline *p)
1749 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1750 struct thread *t = &p->threads[i];
1753 if (!t->extern_objs)
1756 for (j = 0; j < p->n_extern_objs; j++) {
1757 struct extern_obj_runtime *r = &t->extern_objs[j];
1762 free(t->extern_objs);
1763 t->extern_objs = NULL;
1768 extern_obj_free(struct rte_swx_pipeline *p)
1770 extern_obj_build_free(p);
1772 /* Extern objects. */
1774 struct extern_obj *elem;
1776 elem = TAILQ_FIRST(&p->extern_objs);
1780 TAILQ_REMOVE(&p->extern_objs, elem, node);
1782 elem->type->destructor(elem->obj);
1788 struct extern_type *elem;
1790 elem = TAILQ_FIRST(&p->extern_types);
1794 TAILQ_REMOVE(&p->extern_types, elem, node);
1797 struct extern_type_member_func *func;
1799 func = TAILQ_FIRST(&elem->funcs);
1803 TAILQ_REMOVE(&elem->funcs, func, node);
1814 static struct extern_func *
1815 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1817 struct extern_func *elem;
1819 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1820 if (strcmp(elem->name, name) == 0)
1826 static struct extern_func *
1827 extern_func_parse(struct rte_swx_pipeline *p,
1830 if (name[0] != 'f' || name[1] != '.')
1833 return extern_func_find(p, &name[2]);
1836 static struct field *
1837 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1839 struct extern_func **function)
1841 struct extern_func *func;
1843 char *func_name, *field_name;
1845 if ((name[0] != 'f') || (name[1] != '.'))
1848 func_name = strdup(&name[2]);
1852 field_name = strchr(func_name, '.');
1861 func = extern_func_find(p, func_name);
1867 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1881 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1883 const char *mailbox_struct_type_name,
1884 rte_swx_extern_func_t func)
1886 struct extern_func *f;
1887 struct struct_type *mailbox_struct_type;
1891 CHECK_NAME(name, EINVAL);
1892 CHECK(!extern_func_find(p, name), EEXIST);
1894 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1895 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1896 CHECK(mailbox_struct_type, EINVAL);
1898 CHECK(func, EINVAL);
1900 /* Node allocation. */
1901 f = calloc(1, sizeof(struct extern_func));
1902 CHECK(func, ENOMEM);
1904 /* Node initialization. */
1905 strcpy(f->name, name);
1906 f->mailbox_struct_type = mailbox_struct_type;
1908 f->struct_id = p->n_structs;
1909 f->id = p->n_extern_funcs;
1911 /* Node add to tailq. */
1912 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1913 p->n_extern_funcs++;
1920 extern_func_build(struct rte_swx_pipeline *p)
1924 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1925 struct thread *t = &p->threads[i];
1926 struct extern_func *func;
1928 /* Memory allocation. */
1929 t->extern_funcs = calloc(p->n_extern_funcs,
1930 sizeof(struct extern_func_runtime));
1931 CHECK(t->extern_funcs, ENOMEM);
1933 /* Extern function. */
1934 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1935 struct extern_func_runtime *r =
1936 &t->extern_funcs[func->id];
1937 uint32_t mailbox_size =
1938 func->mailbox_struct_type->n_bits / 8;
1940 r->func = func->func;
1942 r->mailbox = calloc(1, mailbox_size);
1943 CHECK(r->mailbox, ENOMEM);
1945 t->structs[func->struct_id] = r->mailbox;
1953 extern_func_build_free(struct rte_swx_pipeline *p)
1957 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1958 struct thread *t = &p->threads[i];
1961 if (!t->extern_funcs)
1964 for (j = 0; j < p->n_extern_funcs; j++) {
1965 struct extern_func_runtime *r = &t->extern_funcs[j];
1970 free(t->extern_funcs);
1971 t->extern_funcs = NULL;
1976 extern_func_free(struct rte_swx_pipeline *p)
1978 extern_func_build_free(p);
1981 struct extern_func *elem;
1983 elem = TAILQ_FIRST(&p->extern_funcs);
1987 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1995 static struct header *
1996 header_find(struct rte_swx_pipeline *p, const char *name)
1998 struct header *elem;
2000 TAILQ_FOREACH(elem, &p->headers, node)
2001 if (strcmp(elem->name, name) == 0)
2007 static struct header *
2008 header_parse(struct rte_swx_pipeline *p,
2011 if (name[0] != 'h' || name[1] != '.')
2014 return header_find(p, &name[2]);
2017 static struct field *
2018 header_field_parse(struct rte_swx_pipeline *p,
2020 struct header **header)
2024 char *header_name, *field_name;
2026 if ((name[0] != 'h') || (name[1] != '.'))
2029 header_name = strdup(&name[2]);
2033 field_name = strchr(header_name, '.');
2042 h = header_find(p, header_name);
2048 f = struct_type_field_find(h->st, field_name);
2062 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2064 const char *struct_type_name)
2066 struct struct_type *st;
2068 size_t n_headers_max;
2071 CHECK_NAME(name, EINVAL);
2072 CHECK_NAME(struct_type_name, EINVAL);
2074 CHECK(!header_find(p, name), EEXIST);
2076 st = struct_type_find(p, struct_type_name);
2079 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2080 CHECK(p->n_headers < n_headers_max, ENOSPC);
2082 /* Node allocation. */
2083 h = calloc(1, sizeof(struct header));
2086 /* Node initialization. */
2087 strcpy(h->name, name);
2089 h->struct_id = p->n_structs;
2090 h->id = p->n_headers;
2092 /* Node add to tailq. */
2093 TAILQ_INSERT_TAIL(&p->headers, h, node);
2101 header_build(struct rte_swx_pipeline *p)
2104 uint32_t n_bytes = 0, i;
2106 TAILQ_FOREACH(h, &p->headers, node) {
2107 n_bytes += h->st->n_bits / 8;
2110 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2111 struct thread *t = &p->threads[i];
2112 uint32_t offset = 0;
2114 t->headers = calloc(p->n_headers,
2115 sizeof(struct header_runtime));
2116 CHECK(t->headers, ENOMEM);
2118 t->headers_out = calloc(p->n_headers,
2119 sizeof(struct header_out_runtime));
2120 CHECK(t->headers_out, ENOMEM);
2122 t->header_storage = calloc(1, n_bytes);
2123 CHECK(t->header_storage, ENOMEM);
2125 t->header_out_storage = calloc(1, n_bytes);
2126 CHECK(t->header_out_storage, ENOMEM);
2128 TAILQ_FOREACH(h, &p->headers, node) {
2129 uint8_t *header_storage;
2131 header_storage = &t->header_storage[offset];
2132 offset += h->st->n_bits / 8;
2134 t->headers[h->id].ptr0 = header_storage;
2135 t->structs[h->struct_id] = header_storage;
2143 header_build_free(struct rte_swx_pipeline *p)
2147 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2148 struct thread *t = &p->threads[i];
2150 free(t->headers_out);
2151 t->headers_out = NULL;
2156 free(t->header_out_storage);
2157 t->header_out_storage = NULL;
2159 free(t->header_storage);
2160 t->header_storage = NULL;
2165 header_free(struct rte_swx_pipeline *p)
2167 header_build_free(p);
2170 struct header *elem;
2172 elem = TAILQ_FIRST(&p->headers);
2176 TAILQ_REMOVE(&p->headers, elem, node);
2184 static struct field *
2185 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2187 if (!p->metadata_st)
2190 if (name[0] != 'm' || name[1] != '.')
2193 return struct_type_field_find(p->metadata_st, &name[2]);
2197 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2198 const char *struct_type_name)
2200 struct struct_type *st = NULL;
2204 CHECK_NAME(struct_type_name, EINVAL);
2205 st = struct_type_find(p, struct_type_name);
2207 CHECK(!p->metadata_st, EINVAL);
2209 p->metadata_st = st;
2210 p->metadata_struct_id = p->n_structs;
2218 metadata_build(struct rte_swx_pipeline *p)
2220 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2223 /* Thread-level initialization. */
2224 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2225 struct thread *t = &p->threads[i];
2228 metadata = calloc(1, n_bytes);
2229 CHECK(metadata, ENOMEM);
2231 t->metadata = metadata;
2232 t->structs[p->metadata_struct_id] = metadata;
2239 metadata_build_free(struct rte_swx_pipeline *p)
2243 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2244 struct thread *t = &p->threads[i];
2252 metadata_free(struct rte_swx_pipeline *p)
2254 metadata_build_free(p);
2261 instruction_is_jmp(struct instruction *instr)
2263 switch (instr->type) {
2265 case INSTR_JMP_VALID:
2266 case INSTR_JMP_INVALID:
2268 case INSTR_JMP_MISS:
2269 case INSTR_JMP_ACTION_HIT:
2270 case INSTR_JMP_ACTION_MISS:
2272 case INSTR_JMP_EQ_S:
2273 case INSTR_JMP_EQ_I:
2275 case INSTR_JMP_NEQ_S:
2276 case INSTR_JMP_NEQ_I:
2278 case INSTR_JMP_LT_MH:
2279 case INSTR_JMP_LT_HM:
2280 case INSTR_JMP_LT_HH:
2281 case INSTR_JMP_LT_MI:
2282 case INSTR_JMP_LT_HI:
2284 case INSTR_JMP_GT_MH:
2285 case INSTR_JMP_GT_HM:
2286 case INSTR_JMP_GT_HH:
2287 case INSTR_JMP_GT_MI:
2288 case INSTR_JMP_GT_HI:
2296 static struct field *
2297 action_field_parse(struct action *action, const char *name);
2299 static struct field *
2300 struct_field_parse(struct rte_swx_pipeline *p,
2301 struct action *action,
2303 uint32_t *struct_id)
2310 struct header *header;
2312 f = header_field_parse(p, name, &header);
2316 *struct_id = header->struct_id;
2322 f = metadata_field_parse(p, name);
2326 *struct_id = p->metadata_struct_id;
2335 f = action_field_parse(action, name);
2345 struct extern_obj *obj;
2347 f = extern_obj_mailbox_field_parse(p, name, &obj);
2351 *struct_id = obj->struct_id;
2357 struct extern_func *func;
2359 f = extern_func_mailbox_field_parse(p, name, &func);
2363 *struct_id = func->struct_id;
2373 pipeline_port_inc(struct rte_swx_pipeline *p)
2375 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2379 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2381 t->ip = p->instructions;
2385 thread_ip_set(struct thread *t, struct instruction *ip)
2391 thread_ip_action_call(struct rte_swx_pipeline *p,
2396 t->ip = p->action_instructions[action_id];
2400 thread_ip_inc(struct rte_swx_pipeline *p);
2403 thread_ip_inc(struct rte_swx_pipeline *p)
2405 struct thread *t = &p->threads[p->thread_id];
2411 thread_ip_inc_cond(struct thread *t, int cond)
2417 thread_yield(struct rte_swx_pipeline *p)
2419 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2423 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2425 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2432 instr_rx_translate(struct rte_swx_pipeline *p,
2433 struct action *action,
2436 struct instruction *instr,
2437 struct instruction_data *data __rte_unused)
2441 CHECK(!action, EINVAL);
2442 CHECK(n_tokens == 2, EINVAL);
2444 f = metadata_field_parse(p, tokens[1]);
2447 instr->type = INSTR_RX;
2448 instr->io.io.offset = f->offset / 8;
2449 instr->io.io.n_bits = f->n_bits;
2454 instr_rx_exec(struct rte_swx_pipeline *p);
2457 instr_rx_exec(struct rte_swx_pipeline *p)
2459 struct thread *t = &p->threads[p->thread_id];
2460 struct instruction *ip = t->ip;
2461 struct port_in_runtime *port = &p->in[p->port_id];
2462 struct rte_swx_pkt *pkt = &t->pkt;
2466 pkt_received = port->pkt_rx(port->obj, pkt);
2467 t->ptr = &pkt->pkt[pkt->offset];
2468 rte_prefetch0(t->ptr);
2470 TRACE("[Thread %2u] rx %s from port %u\n",
2472 pkt_received ? "1 pkt" : "0 pkts",
2476 t->valid_headers = 0;
2477 t->n_headers_out = 0;
2480 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2483 t->table_state = p->table_state;
2486 pipeline_port_inc(p);
2487 thread_ip_inc_cond(t, pkt_received);
2495 instr_tx_translate(struct rte_swx_pipeline *p,
2496 struct action *action __rte_unused,
2499 struct instruction *instr,
2500 struct instruction_data *data __rte_unused)
2504 CHECK(n_tokens == 2, EINVAL);
2506 f = metadata_field_parse(p, tokens[1]);
2509 instr->type = INSTR_TX;
2510 instr->io.io.offset = f->offset / 8;
2511 instr->io.io.n_bits = f->n_bits;
2516 emit_handler(struct thread *t)
2518 struct header_out_runtime *h0 = &t->headers_out[0];
2519 struct header_out_runtime *h1 = &t->headers_out[1];
2520 uint32_t offset = 0, i;
2522 /* No header change or header decapsulation. */
2523 if ((t->n_headers_out == 1) &&
2524 (h0->ptr + h0->n_bytes == t->ptr)) {
2525 TRACE("Emit handler: no header change or header decap.\n");
2527 t->pkt.offset -= h0->n_bytes;
2528 t->pkt.length += h0->n_bytes;
2533 /* Header encapsulation (optionally, with prior header decasulation). */
2534 if ((t->n_headers_out == 2) &&
2535 (h1->ptr + h1->n_bytes == t->ptr) &&
2536 (h0->ptr == h0->ptr0)) {
2539 TRACE("Emit handler: header encapsulation.\n");
2541 offset = h0->n_bytes + h1->n_bytes;
2542 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2543 t->pkt.offset -= offset;
2544 t->pkt.length += offset;
2549 /* Header insertion. */
2552 /* Header extraction. */
2555 /* For any other case. */
2556 TRACE("Emit handler: complex case.\n");
2558 for (i = 0; i < t->n_headers_out; i++) {
2559 struct header_out_runtime *h = &t->headers_out[i];
2561 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2562 offset += h->n_bytes;
2566 memcpy(t->ptr - offset, t->header_out_storage, offset);
2567 t->pkt.offset -= offset;
2568 t->pkt.length += offset;
2573 instr_tx_exec(struct rte_swx_pipeline *p);
2576 instr_tx_exec(struct rte_swx_pipeline *p)
2578 struct thread *t = &p->threads[p->thread_id];
2579 struct instruction *ip = t->ip;
2580 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2581 struct port_out_runtime *port = &p->out[port_id];
2582 struct rte_swx_pkt *pkt = &t->pkt;
2584 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2592 port->pkt_tx(port->obj, pkt);
2595 thread_ip_reset(p, t);
2603 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2604 struct action *action,
2607 struct instruction *instr,
2608 struct instruction_data *data __rte_unused)
2612 CHECK(!action, EINVAL);
2613 CHECK(n_tokens == 2, EINVAL);
2615 h = header_parse(p, tokens[1]);
2618 instr->type = INSTR_HDR_EXTRACT;
2619 instr->io.hdr.header_id[0] = h->id;
2620 instr->io.hdr.struct_id[0] = h->struct_id;
2621 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2626 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2629 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2631 struct thread *t = &p->threads[p->thread_id];
2632 struct instruction *ip = t->ip;
2633 uint64_t valid_headers = t->valid_headers;
2634 uint8_t *ptr = t->ptr;
2635 uint32_t offset = t->pkt.offset;
2636 uint32_t length = t->pkt.length;
2639 for (i = 0; i < n_extract; i++) {
2640 uint32_t header_id = ip->io.hdr.header_id[i];
2641 uint32_t struct_id = ip->io.hdr.struct_id[i];
2642 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2644 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2650 t->structs[struct_id] = ptr;
2651 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2660 t->valid_headers = valid_headers;
2663 t->pkt.offset = offset;
2664 t->pkt.length = length;
2669 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2671 __instr_hdr_extract_exec(p, 1);
2678 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2680 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2683 __instr_hdr_extract_exec(p, 2);
2690 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2692 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2695 __instr_hdr_extract_exec(p, 3);
2702 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2704 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2707 __instr_hdr_extract_exec(p, 4);
2714 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2716 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2719 __instr_hdr_extract_exec(p, 5);
2726 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2728 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2731 __instr_hdr_extract_exec(p, 6);
2738 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2740 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2743 __instr_hdr_extract_exec(p, 7);
2750 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2752 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2755 __instr_hdr_extract_exec(p, 8);
2765 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2766 struct action *action __rte_unused,
2769 struct instruction *instr,
2770 struct instruction_data *data __rte_unused)
2774 CHECK(n_tokens == 2, EINVAL);
2776 h = header_parse(p, tokens[1]);
2779 instr->type = INSTR_HDR_EMIT;
2780 instr->io.hdr.header_id[0] = h->id;
2781 instr->io.hdr.struct_id[0] = h->struct_id;
2782 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2787 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2790 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2792 struct thread *t = &p->threads[p->thread_id];
2793 struct instruction *ip = t->ip;
2794 uint32_t n_headers_out = t->n_headers_out;
2795 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2796 uint8_t *ho_ptr = NULL;
2797 uint32_t ho_nbytes = 0, i;
2799 for (i = 0; i < n_emit; i++) {
2800 uint32_t header_id = ip->io.hdr.header_id[i];
2801 uint32_t struct_id = ip->io.hdr.struct_id[i];
2802 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2804 struct header_runtime *hi = &t->headers[header_id];
2805 uint8_t *hi_ptr = t->structs[struct_id];
2807 TRACE("[Thread %2u]: emit header %u\n",
2813 if (!t->n_headers_out) {
2814 ho = &t->headers_out[0];
2816 ho->ptr0 = hi->ptr0;
2820 ho_nbytes = n_bytes;
2827 ho_nbytes = ho->n_bytes;
2831 if (ho_ptr + ho_nbytes == hi_ptr) {
2832 ho_nbytes += n_bytes;
2834 ho->n_bytes = ho_nbytes;
2837 ho->ptr0 = hi->ptr0;
2841 ho_nbytes = n_bytes;
2847 ho->n_bytes = ho_nbytes;
2848 t->n_headers_out = n_headers_out;
2852 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2854 __instr_hdr_emit_exec(p, 1);
2861 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2863 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2866 __instr_hdr_emit_exec(p, 1);
2871 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2873 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2876 __instr_hdr_emit_exec(p, 2);
2881 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2883 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2886 __instr_hdr_emit_exec(p, 3);
2891 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2893 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2896 __instr_hdr_emit_exec(p, 4);
2901 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2903 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2906 __instr_hdr_emit_exec(p, 5);
2911 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2913 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2916 __instr_hdr_emit_exec(p, 6);
2921 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2923 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2926 __instr_hdr_emit_exec(p, 7);
2931 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2933 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2936 __instr_hdr_emit_exec(p, 8);
2944 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2945 struct action *action __rte_unused,
2948 struct instruction *instr,
2949 struct instruction_data *data __rte_unused)
2953 CHECK(n_tokens == 2, EINVAL);
2955 h = header_parse(p, tokens[1]);
2958 instr->type = INSTR_HDR_VALIDATE;
2959 instr->valid.header_id = h->id;
2964 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2966 struct thread *t = &p->threads[p->thread_id];
2967 struct instruction *ip = t->ip;
2968 uint32_t header_id = ip->valid.header_id;
2970 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2973 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2983 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2984 struct action *action __rte_unused,
2987 struct instruction *instr,
2988 struct instruction_data *data __rte_unused)
2992 CHECK(n_tokens == 2, EINVAL);
2994 h = header_parse(p, tokens[1]);
2997 instr->type = INSTR_HDR_INVALIDATE;
2998 instr->valid.header_id = h->id;
3003 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3005 struct thread *t = &p->threads[p->thread_id];
3006 struct instruction *ip = t->ip;
3007 uint32_t header_id = ip->valid.header_id;
3009 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3012 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3021 static struct table *
3022 table_find(struct rte_swx_pipeline *p, const char *name);
3025 instr_table_translate(struct rte_swx_pipeline *p,
3026 struct action *action,
3029 struct instruction *instr,
3030 struct instruction_data *data __rte_unused)
3034 CHECK(!action, EINVAL);
3035 CHECK(n_tokens == 2, EINVAL);
3037 t = table_find(p, tokens[1]);
3040 instr->type = INSTR_TABLE;
3041 instr->table.table_id = t->id;
3046 instr_table_exec(struct rte_swx_pipeline *p)
3048 struct thread *t = &p->threads[p->thread_id];
3049 struct instruction *ip = t->ip;
3050 uint32_t table_id = ip->table.table_id;
3051 struct rte_swx_table_state *ts = &t->table_state[table_id];
3052 struct table_runtime *table = &t->tables[table_id];
3054 uint8_t *action_data;
3058 done = table->func(ts->obj,
3066 TRACE("[Thread %2u] table %u (not finalized)\n",
3074 action_id = hit ? action_id : ts->default_action_id;
3075 action_data = hit ? action_data : ts->default_action_data;
3077 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3080 hit ? "hit" : "miss",
3081 (uint32_t)action_id);
3083 t->action_id = action_id;
3084 t->structs[0] = action_data;
3088 thread_ip_action_call(p, t, action_id);
3095 instr_extern_translate(struct rte_swx_pipeline *p,
3096 struct action *action __rte_unused,
3099 struct instruction *instr,
3100 struct instruction_data *data __rte_unused)
3102 char *token = tokens[1];
3104 CHECK(n_tokens == 2, EINVAL);
3106 if (token[0] == 'e') {
3107 struct extern_obj *obj;
3108 struct extern_type_member_func *func;
3110 func = extern_obj_member_func_parse(p, token, &obj);
3111 CHECK(func, EINVAL);
3113 instr->type = INSTR_EXTERN_OBJ;
3114 instr->ext_obj.ext_obj_id = obj->id;
3115 instr->ext_obj.func_id = func->id;
3120 if (token[0] == 'f') {
3121 struct extern_func *func;
3123 func = extern_func_parse(p, token);
3124 CHECK(func, EINVAL);
3126 instr->type = INSTR_EXTERN_FUNC;
3127 instr->ext_func.ext_func_id = func->id;
3136 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3138 struct thread *t = &p->threads[p->thread_id];
3139 struct instruction *ip = t->ip;
3140 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3141 uint32_t func_id = ip->ext_obj.func_id;
3142 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3143 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3145 TRACE("[Thread %2u] extern obj %u member func %u\n",
3150 /* Extern object member function execute. */
3151 uint32_t done = func(obj->obj, obj->mailbox);
3154 thread_ip_inc_cond(t, done);
3155 thread_yield_cond(p, done ^ 1);
3159 instr_extern_func_exec(struct rte_swx_pipeline *p)
3161 struct thread *t = &p->threads[p->thread_id];
3162 struct instruction *ip = t->ip;
3163 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3164 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3165 rte_swx_extern_func_t func = ext_func->func;
3167 TRACE("[Thread %2u] extern func %u\n",
3171 /* Extern function execute. */
3172 uint32_t done = func(ext_func->mailbox);
3175 thread_ip_inc_cond(t, done);
3176 thread_yield_cond(p, done ^ 1);
3183 instr_mov_translate(struct rte_swx_pipeline *p,
3184 struct action *action,
3187 struct instruction *instr,
3188 struct instruction_data *data __rte_unused)
3190 char *dst = tokens[1], *src = tokens[2];
3191 struct field *fdst, *fsrc;
3192 uint32_t dst_struct_id, src_struct_id, src_val;
3194 CHECK(n_tokens == 3, EINVAL);
3196 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3197 CHECK(fdst, EINVAL);
3200 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3202 instr->type = INSTR_MOV;
3203 if ((dst[0] == 'h' && src[0] != 'h') ||
3204 (dst[0] != 'h' && src[0] == 'h'))
3205 instr->type = INSTR_MOV_S;
3207 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3208 instr->mov.dst.n_bits = fdst->n_bits;
3209 instr->mov.dst.offset = fdst->offset / 8;
3210 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3211 instr->mov.src.n_bits = fsrc->n_bits;
3212 instr->mov.src.offset = fsrc->offset / 8;
3217 src_val = strtoul(src, &src, 0);
3218 CHECK(!src[0], EINVAL);
3221 src_val = htonl(src_val);
3223 instr->type = INSTR_MOV_I;
3224 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3225 instr->mov.dst.n_bits = fdst->n_bits;
3226 instr->mov.dst.offset = fdst->offset / 8;
3227 instr->mov.src_val = (uint32_t)src_val;
3232 instr_mov_exec(struct rte_swx_pipeline *p)
3234 struct thread *t = &p->threads[p->thread_id];
3235 struct instruction *ip = t->ip;
3237 TRACE("[Thread %2u] mov\n",
3247 instr_mov_s_exec(struct rte_swx_pipeline *p)
3249 struct thread *t = &p->threads[p->thread_id];
3250 struct instruction *ip = t->ip;
3252 TRACE("[Thread %2u] mov (s)\n",
3262 instr_mov_i_exec(struct rte_swx_pipeline *p)
3264 struct thread *t = &p->threads[p->thread_id];
3265 struct instruction *ip = t->ip;
3267 TRACE("[Thread %2u] mov m.f %x\n",
3281 instr_dma_translate(struct rte_swx_pipeline *p,
3282 struct action *action,
3285 struct instruction *instr,
3286 struct instruction_data *data __rte_unused)
3288 char *dst = tokens[1];
3289 char *src = tokens[2];
3293 CHECK(action, EINVAL);
3294 CHECK(n_tokens == 3, EINVAL);
3296 h = header_parse(p, dst);
3299 tf = action_field_parse(action, src);
3302 instr->type = INSTR_DMA_HT;
3303 instr->dma.dst.header_id[0] = h->id;
3304 instr->dma.dst.struct_id[0] = h->struct_id;
3305 instr->dma.n_bytes[0] = h->st->n_bits / 8;
3306 instr->dma.src.offset[0] = tf->offset / 8;
3312 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3315 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3317 struct thread *t = &p->threads[p->thread_id];
3318 struct instruction *ip = t->ip;
3319 uint8_t *action_data = t->structs[0];
3320 uint64_t valid_headers = t->valid_headers;
3323 for (i = 0; i < n_dma; i++) {
3324 uint32_t header_id = ip->dma.dst.header_id[i];
3325 uint32_t struct_id = ip->dma.dst.struct_id[i];
3326 uint32_t offset = ip->dma.src.offset[i];
3327 uint32_t n_bytes = ip->dma.n_bytes[i];
3329 struct header_runtime *h = &t->headers[header_id];
3330 uint8_t *h_ptr0 = h->ptr0;
3331 uint8_t *h_ptr = t->structs[struct_id];
3333 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3335 void *src = &action_data[offset];
3337 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3340 memcpy(dst, src, n_bytes);
3341 t->structs[struct_id] = dst;
3342 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3345 t->valid_headers = valid_headers;
3349 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3351 __instr_dma_ht_exec(p, 1);
3358 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3360 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3363 __instr_dma_ht_exec(p, 2);
3370 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3372 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3375 __instr_dma_ht_exec(p, 3);
3382 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3384 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3387 __instr_dma_ht_exec(p, 4);
3394 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3396 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3399 __instr_dma_ht_exec(p, 5);
3406 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3408 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3411 __instr_dma_ht_exec(p, 6);
3418 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3420 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3423 __instr_dma_ht_exec(p, 7);
3430 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3432 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3435 __instr_dma_ht_exec(p, 8);
3445 instr_alu_add_translate(struct rte_swx_pipeline *p,
3446 struct action *action,
3449 struct instruction *instr,
3450 struct instruction_data *data __rte_unused)
3452 char *dst = tokens[1], *src = tokens[2];
3453 struct field *fdst, *fsrc;
3454 uint32_t dst_struct_id, src_struct_id, src_val;
3456 CHECK(n_tokens == 3, EINVAL);
3458 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3459 CHECK(fdst, EINVAL);
3461 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3462 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3464 instr->type = INSTR_ALU_ADD;
3465 if (dst[0] == 'h' && src[0] == 'm')
3466 instr->type = INSTR_ALU_ADD_HM;
3467 if (dst[0] == 'm' && src[0] == 'h')
3468 instr->type = INSTR_ALU_ADD_MH;
3469 if (dst[0] == 'h' && src[0] == 'h')
3470 instr->type = INSTR_ALU_ADD_HH;
3472 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3473 instr->alu.dst.n_bits = fdst->n_bits;
3474 instr->alu.dst.offset = fdst->offset / 8;
3475 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3476 instr->alu.src.n_bits = fsrc->n_bits;
3477 instr->alu.src.offset = fsrc->offset / 8;
3481 /* ADD_MI, ADD_HI. */
3482 src_val = strtoul(src, &src, 0);
3483 CHECK(!src[0], EINVAL);
3485 instr->type = INSTR_ALU_ADD_MI;
3487 instr->type = INSTR_ALU_ADD_HI;
3489 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3490 instr->alu.dst.n_bits = fdst->n_bits;
3491 instr->alu.dst.offset = fdst->offset / 8;
3492 instr->alu.src_val = (uint32_t)src_val;
3497 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3498 struct action *action,
3501 struct instruction *instr,
3502 struct instruction_data *data __rte_unused)
3504 char *dst = tokens[1], *src = tokens[2];
3505 struct field *fdst, *fsrc;
3506 uint32_t dst_struct_id, src_struct_id, src_val;
3508 CHECK(n_tokens == 3, EINVAL);
3510 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3511 CHECK(fdst, EINVAL);
3513 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3514 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3516 instr->type = INSTR_ALU_SUB;
3517 if (dst[0] == 'h' && src[0] == 'm')
3518 instr->type = INSTR_ALU_SUB_HM;
3519 if (dst[0] == 'm' && src[0] == 'h')
3520 instr->type = INSTR_ALU_SUB_MH;
3521 if (dst[0] == 'h' && src[0] == 'h')
3522 instr->type = INSTR_ALU_SUB_HH;
3524 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3525 instr->alu.dst.n_bits = fdst->n_bits;
3526 instr->alu.dst.offset = fdst->offset / 8;
3527 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3528 instr->alu.src.n_bits = fsrc->n_bits;
3529 instr->alu.src.offset = fsrc->offset / 8;
3533 /* SUB_MI, SUB_HI. */
3534 src_val = strtoul(src, &src, 0);
3535 CHECK(!src[0], EINVAL);
3537 instr->type = INSTR_ALU_SUB_MI;
3539 instr->type = INSTR_ALU_SUB_HI;
3541 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3542 instr->alu.dst.n_bits = fdst->n_bits;
3543 instr->alu.dst.offset = fdst->offset / 8;
3544 instr->alu.src_val = (uint32_t)src_val;
3549 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3550 struct action *action __rte_unused,
3553 struct instruction *instr,
3554 struct instruction_data *data __rte_unused)
3556 char *dst = tokens[1], *src = tokens[2];
3557 struct header *hdst, *hsrc;
3558 struct field *fdst, *fsrc;
3560 CHECK(n_tokens == 3, EINVAL);
3562 fdst = header_field_parse(p, dst, &hdst);
3563 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3566 fsrc = header_field_parse(p, src, &hsrc);
3568 instr->type = INSTR_ALU_CKADD_FIELD;
3569 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3570 instr->alu.dst.n_bits = fdst->n_bits;
3571 instr->alu.dst.offset = fdst->offset / 8;
3572 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3573 instr->alu.src.n_bits = fsrc->n_bits;
3574 instr->alu.src.offset = fsrc->offset / 8;
3578 /* CKADD_STRUCT, CKADD_STRUCT20. */
3579 hsrc = header_parse(p, src);
3580 CHECK(hsrc, EINVAL);
3582 instr->type = INSTR_ALU_CKADD_STRUCT;
3583 if ((hsrc->st->n_bits / 8) == 20)
3584 instr->type = INSTR_ALU_CKADD_STRUCT20;
3586 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3587 instr->alu.dst.n_bits = fdst->n_bits;
3588 instr->alu.dst.offset = fdst->offset / 8;
3589 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3590 instr->alu.src.n_bits = hsrc->st->n_bits;
3591 instr->alu.src.offset = 0; /* Unused. */
3596 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3597 struct action *action __rte_unused,
3600 struct instruction *instr,
3601 struct instruction_data *data __rte_unused)
3603 char *dst = tokens[1], *src = tokens[2];
3604 struct header *hdst, *hsrc;
3605 struct field *fdst, *fsrc;
3607 CHECK(n_tokens == 3, EINVAL);
3609 fdst = header_field_parse(p, dst, &hdst);
3610 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3612 fsrc = header_field_parse(p, src, &hsrc);
3613 CHECK(fsrc, EINVAL);
3615 instr->type = INSTR_ALU_CKSUB_FIELD;
3616 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3617 instr->alu.dst.n_bits = fdst->n_bits;
3618 instr->alu.dst.offset = fdst->offset / 8;
3619 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3620 instr->alu.src.n_bits = fsrc->n_bits;
3621 instr->alu.src.offset = fsrc->offset / 8;
3626 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3627 struct action *action,
3630 struct instruction *instr,
3631 struct instruction_data *data __rte_unused)
3633 char *dst = tokens[1], *src = tokens[2];
3634 struct field *fdst, *fsrc;
3635 uint32_t dst_struct_id, src_struct_id, src_val;
3637 CHECK(n_tokens == 3, EINVAL);
3639 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3640 CHECK(fdst, EINVAL);
3642 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3643 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3645 instr->type = INSTR_ALU_SHL;
3646 if (dst[0] == 'h' && src[0] == 'm')
3647 instr->type = INSTR_ALU_SHL_HM;
3648 if (dst[0] == 'm' && src[0] == 'h')
3649 instr->type = INSTR_ALU_SHL_MH;
3650 if (dst[0] == 'h' && src[0] == 'h')
3651 instr->type = INSTR_ALU_SHL_HH;
3653 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3654 instr->alu.dst.n_bits = fdst->n_bits;
3655 instr->alu.dst.offset = fdst->offset / 8;
3656 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3657 instr->alu.src.n_bits = fsrc->n_bits;
3658 instr->alu.src.offset = fsrc->offset / 8;
3662 /* SHL_MI, SHL_HI. */
3663 src_val = strtoul(src, &src, 0);
3664 CHECK(!src[0], EINVAL);
3666 instr->type = INSTR_ALU_SHL_MI;
3668 instr->type = INSTR_ALU_SHL_HI;
3670 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3671 instr->alu.dst.n_bits = fdst->n_bits;
3672 instr->alu.dst.offset = fdst->offset / 8;
3673 instr->alu.src_val = (uint32_t)src_val;
3678 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3679 struct action *action,
3682 struct instruction *instr,
3683 struct instruction_data *data __rte_unused)
3685 char *dst = tokens[1], *src = tokens[2];
3686 struct field *fdst, *fsrc;
3687 uint32_t dst_struct_id, src_struct_id, src_val;
3689 CHECK(n_tokens == 3, EINVAL);
3691 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3692 CHECK(fdst, EINVAL);
3694 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3695 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3697 instr->type = INSTR_ALU_SHR;
3698 if (dst[0] == 'h' && src[0] == 'm')
3699 instr->type = INSTR_ALU_SHR_HM;
3700 if (dst[0] == 'm' && src[0] == 'h')
3701 instr->type = INSTR_ALU_SHR_MH;
3702 if (dst[0] == 'h' && src[0] == 'h')
3703 instr->type = INSTR_ALU_SHR_HH;
3705 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3706 instr->alu.dst.n_bits = fdst->n_bits;
3707 instr->alu.dst.offset = fdst->offset / 8;
3708 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3709 instr->alu.src.n_bits = fsrc->n_bits;
3710 instr->alu.src.offset = fsrc->offset / 8;
3714 /* SHR_MI, SHR_HI. */
3715 src_val = strtoul(src, &src, 0);
3716 CHECK(!src[0], EINVAL);
3718 instr->type = INSTR_ALU_SHR_MI;
3720 instr->type = INSTR_ALU_SHR_HI;
3722 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3723 instr->alu.dst.n_bits = fdst->n_bits;
3724 instr->alu.dst.offset = fdst->offset / 8;
3725 instr->alu.src_val = (uint32_t)src_val;
3730 instr_alu_and_translate(struct rte_swx_pipeline *p,
3731 struct action *action,
3734 struct instruction *instr,
3735 struct instruction_data *data __rte_unused)
3737 char *dst = tokens[1], *src = tokens[2];
3738 struct field *fdst, *fsrc;
3739 uint32_t dst_struct_id, src_struct_id, src_val;
3741 CHECK(n_tokens == 3, EINVAL);
3743 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3744 CHECK(fdst, EINVAL);
3747 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3749 instr->type = INSTR_ALU_AND;
3750 if ((dst[0] == 'h' && src[0] != 'h') ||
3751 (dst[0] != 'h' && src[0] == 'h'))
3752 instr->type = INSTR_ALU_AND_S;
3754 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3755 instr->alu.dst.n_bits = fdst->n_bits;
3756 instr->alu.dst.offset = fdst->offset / 8;
3757 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3758 instr->alu.src.n_bits = fsrc->n_bits;
3759 instr->alu.src.offset = fsrc->offset / 8;
3764 src_val = strtoul(src, &src, 0);
3765 CHECK(!src[0], EINVAL);
3768 src_val = htonl(src_val);
3770 instr->type = INSTR_ALU_AND_I;
3771 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3772 instr->alu.dst.n_bits = fdst->n_bits;
3773 instr->alu.dst.offset = fdst->offset / 8;
3774 instr->alu.src_val = (uint32_t)src_val;
3779 instr_alu_or_translate(struct rte_swx_pipeline *p,
3780 struct action *action,
3783 struct instruction *instr,
3784 struct instruction_data *data __rte_unused)
3786 char *dst = tokens[1], *src = tokens[2];
3787 struct field *fdst, *fsrc;
3788 uint32_t dst_struct_id, src_struct_id, src_val;
3790 CHECK(n_tokens == 3, EINVAL);
3792 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3793 CHECK(fdst, EINVAL);
3796 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3798 instr->type = INSTR_ALU_OR;
3799 if ((dst[0] == 'h' && src[0] != 'h') ||
3800 (dst[0] != 'h' && src[0] == 'h'))
3801 instr->type = INSTR_ALU_OR_S;
3803 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3804 instr->alu.dst.n_bits = fdst->n_bits;
3805 instr->alu.dst.offset = fdst->offset / 8;
3806 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3807 instr->alu.src.n_bits = fsrc->n_bits;
3808 instr->alu.src.offset = fsrc->offset / 8;
3813 src_val = strtoul(src, &src, 0);
3814 CHECK(!src[0], EINVAL);
3817 src_val = htonl(src_val);
3819 instr->type = INSTR_ALU_OR_I;
3820 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3821 instr->alu.dst.n_bits = fdst->n_bits;
3822 instr->alu.dst.offset = fdst->offset / 8;
3823 instr->alu.src_val = (uint32_t)src_val;
3828 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3829 struct action *action,
3832 struct instruction *instr,
3833 struct instruction_data *data __rte_unused)
3835 char *dst = tokens[1], *src = tokens[2];
3836 struct field *fdst, *fsrc;
3837 uint32_t dst_struct_id, src_struct_id, src_val;
3839 CHECK(n_tokens == 3, EINVAL);
3841 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3842 CHECK(fdst, EINVAL);
3845 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3847 instr->type = INSTR_ALU_XOR;
3848 if ((dst[0] == 'h' && src[0] != 'h') ||
3849 (dst[0] != 'h' && src[0] == 'h'))
3850 instr->type = INSTR_ALU_XOR_S;
3852 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3853 instr->alu.dst.n_bits = fdst->n_bits;
3854 instr->alu.dst.offset = fdst->offset / 8;
3855 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3856 instr->alu.src.n_bits = fsrc->n_bits;
3857 instr->alu.src.offset = fsrc->offset / 8;
3862 src_val = strtoul(src, &src, 0);
3863 CHECK(!src[0], EINVAL);
3866 src_val = htonl(src_val);
3868 instr->type = INSTR_ALU_XOR_I;
3869 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3870 instr->alu.dst.n_bits = fdst->n_bits;
3871 instr->alu.dst.offset = fdst->offset / 8;
3872 instr->alu.src_val = (uint32_t)src_val;
3877 instr_alu_add_exec(struct rte_swx_pipeline *p)
3879 struct thread *t = &p->threads[p->thread_id];
3880 struct instruction *ip = t->ip;
3882 TRACE("[Thread %2u] add\n", p->thread_id);
3892 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3894 struct thread *t = &p->threads[p->thread_id];
3895 struct instruction *ip = t->ip;
3897 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3907 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3909 struct thread *t = &p->threads[p->thread_id];
3910 struct instruction *ip = t->ip;
3912 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3922 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3924 struct thread *t = &p->threads[p->thread_id];
3925 struct instruction *ip = t->ip;
3927 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3937 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3939 struct thread *t = &p->threads[p->thread_id];
3940 struct instruction *ip = t->ip;
3942 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3952 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3954 struct thread *t = &p->threads[p->thread_id];
3955 struct instruction *ip = t->ip;
3957 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3967 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3969 struct thread *t = &p->threads[p->thread_id];
3970 struct instruction *ip = t->ip;
3972 TRACE("[Thread %2u] sub\n", p->thread_id);
3982 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3984 struct thread *t = &p->threads[p->thread_id];
3985 struct instruction *ip = t->ip;
3987 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3997 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3999 struct thread *t = &p->threads[p->thread_id];
4000 struct instruction *ip = t->ip;
4002 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4012 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4014 struct thread *t = &p->threads[p->thread_id];
4015 struct instruction *ip = t->ip;
4017 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4027 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4029 struct thread *t = &p->threads[p->thread_id];
4030 struct instruction *ip = t->ip;
4032 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4042 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4044 struct thread *t = &p->threads[p->thread_id];
4045 struct instruction *ip = t->ip;
4047 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4057 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4059 struct thread *t = &p->threads[p->thread_id];
4060 struct instruction *ip = t->ip;
4062 TRACE("[Thread %2u] shl\n", p->thread_id);
4072 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4074 struct thread *t = &p->threads[p->thread_id];
4075 struct instruction *ip = t->ip;
4077 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4087 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4089 struct thread *t = &p->threads[p->thread_id];
4090 struct instruction *ip = t->ip;
4092 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4102 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4104 struct thread *t = &p->threads[p->thread_id];
4105 struct instruction *ip = t->ip;
4107 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4117 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4119 struct thread *t = &p->threads[p->thread_id];
4120 struct instruction *ip = t->ip;
4122 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4132 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4134 struct thread *t = &p->threads[p->thread_id];
4135 struct instruction *ip = t->ip;
4137 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4147 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4149 struct thread *t = &p->threads[p->thread_id];
4150 struct instruction *ip = t->ip;
4152 TRACE("[Thread %2u] shr\n", p->thread_id);
4162 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4164 struct thread *t = &p->threads[p->thread_id];
4165 struct instruction *ip = t->ip;
4167 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4177 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4179 struct thread *t = &p->threads[p->thread_id];
4180 struct instruction *ip = t->ip;
4182 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4192 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4194 struct thread *t = &p->threads[p->thread_id];
4195 struct instruction *ip = t->ip;
4197 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4207 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4209 struct thread *t = &p->threads[p->thread_id];
4210 struct instruction *ip = t->ip;
4212 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4222 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4224 struct thread *t = &p->threads[p->thread_id];
4225 struct instruction *ip = t->ip;
4227 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4237 instr_alu_and_exec(struct rte_swx_pipeline *p)
4239 struct thread *t = &p->threads[p->thread_id];
4240 struct instruction *ip = t->ip;
4242 TRACE("[Thread %2u] and\n", p->thread_id);
4252 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
4254 struct thread *t = &p->threads[p->thread_id];
4255 struct instruction *ip = t->ip;
4257 TRACE("[Thread %2u] and (s)\n", p->thread_id);
4267 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4269 struct thread *t = &p->threads[p->thread_id];
4270 struct instruction *ip = t->ip;
4272 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4282 instr_alu_or_exec(struct rte_swx_pipeline *p)
4284 struct thread *t = &p->threads[p->thread_id];
4285 struct instruction *ip = t->ip;
4287 TRACE("[Thread %2u] or\n", p->thread_id);
4297 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
4299 struct thread *t = &p->threads[p->thread_id];
4300 struct instruction *ip = t->ip;
4302 TRACE("[Thread %2u] or (s)\n", p->thread_id);
4312 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4314 struct thread *t = &p->threads[p->thread_id];
4315 struct instruction *ip = t->ip;
4317 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4327 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4329 struct thread *t = &p->threads[p->thread_id];
4330 struct instruction *ip = t->ip;
4332 TRACE("[Thread %2u] xor\n", p->thread_id);
4342 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
4344 struct thread *t = &p->threads[p->thread_id];
4345 struct instruction *ip = t->ip;
4347 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
4357 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4359 struct thread *t = &p->threads[p->thread_id];
4360 struct instruction *ip = t->ip;
4362 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4372 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4374 struct thread *t = &p->threads[p->thread_id];
4375 struct instruction *ip = t->ip;
4376 uint8_t *dst_struct, *src_struct;
4377 uint16_t *dst16_ptr, dst;
4378 uint64_t *src64_ptr, src64, src64_mask, src;
4381 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4384 dst_struct = t->structs[ip->alu.dst.struct_id];
4385 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4388 src_struct = t->structs[ip->alu.src.struct_id];
4389 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4391 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4392 src = src64 & src64_mask;
4397 /* The first input (r) is a 16-bit number. The second and the third
4398 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4399 * three numbers (output r) is a 34-bit number.
4401 r += (src >> 32) + (src & 0xFFFFFFFF);
4403 /* The first input is a 16-bit number. The second input is an 18-bit
4404 * number. In the worst case scenario, the sum of the two numbers is a
4407 r = (r & 0xFFFF) + (r >> 16);
4409 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4410 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4412 r = (r & 0xFFFF) + (r >> 16);
4414 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4415 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4416 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4417 * therefore the output r is always a 16-bit number.
4419 r = (r & 0xFFFF) + (r >> 16);
4424 *dst16_ptr = (uint16_t)r;
4431 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4433 struct thread *t = &p->threads[p->thread_id];
4434 struct instruction *ip = t->ip;
4435 uint8_t *dst_struct, *src_struct;
4436 uint16_t *dst16_ptr, dst;
4437 uint64_t *src64_ptr, src64, src64_mask, src;
4440 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4443 dst_struct = t->structs[ip->alu.dst.struct_id];
4444 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4447 src_struct = t->structs[ip->alu.src.struct_id];
4448 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4450 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4451 src = src64 & src64_mask;
4456 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
4457 * the following sequence of operations in 2's complement arithmetic:
4458 * a '- b = (a - b) % 0xFFFF.
4460 * In order to prevent an underflow for the below subtraction, in which
4461 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
4462 * minuend), we first add a multiple of the 0xFFFF modulus to the
4463 * minuend. The number we add to the minuend needs to be a 34-bit number
4464 * or higher, so for readability reasons we picked the 36-bit multiple.
4465 * We are effectively turning the 16-bit minuend into a 36-bit number:
4466 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
4468 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
4470 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
4471 * result (the output r) is a 36-bit number.
4473 r -= (src >> 32) + (src & 0xFFFFFFFF);
4475 /* The first input is a 16-bit number. The second input is a 20-bit
4476 * number. Their sum is a 21-bit number.
4478 r = (r & 0xFFFF) + (r >> 16);
4480 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4481 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
4483 r = (r & 0xFFFF) + (r >> 16);
4485 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4486 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4487 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4488 * generated, therefore the output r is always a 16-bit number.
4490 r = (r & 0xFFFF) + (r >> 16);
4495 *dst16_ptr = (uint16_t)r;
4502 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
4504 struct thread *t = &p->threads[p->thread_id];
4505 struct instruction *ip = t->ip;
4506 uint8_t *dst_struct, *src_struct;
4507 uint16_t *dst16_ptr;
4508 uint32_t *src32_ptr;
4511 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4514 dst_struct = t->structs[ip->alu.dst.struct_id];
4515 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4517 src_struct = t->structs[ip->alu.src.struct_id];
4518 src32_ptr = (uint32_t *)&src_struct[0];
4520 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4521 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4522 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4523 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4524 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4526 /* The first input is a 16-bit number. The second input is a 19-bit
4527 * number. Their sum is a 20-bit number.
4529 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4531 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4532 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4534 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4536 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4537 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4538 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4539 * generated, therefore the output r is always a 16-bit number.
4541 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4544 r0 = r0 ? r0 : 0xFFFF;
4546 *dst16_ptr = (uint16_t)r0;
4553 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4555 struct thread *t = &p->threads[p->thread_id];
4556 struct instruction *ip = t->ip;
4557 uint8_t *dst_struct, *src_struct;
4558 uint16_t *dst16_ptr;
4559 uint32_t *src32_ptr;
4563 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4566 dst_struct = t->structs[ip->alu.dst.struct_id];
4567 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4569 src_struct = t->structs[ip->alu.src.struct_id];
4570 src32_ptr = (uint32_t *)&src_struct[0];
4572 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4573 * Therefore, in the worst case scenario, a 35-bit number is added to a
4574 * 16-bit number (the input r), so the output r is 36-bit number.
4576 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4579 /* The first input is a 16-bit number. The second input is a 20-bit
4580 * number. Their sum is a 21-bit number.
4582 r = (r & 0xFFFF) + (r >> 16);
4584 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4585 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4587 r = (r & 0xFFFF) + (r >> 16);
4589 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4590 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4591 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4592 * generated, therefore the output r is always a 16-bit number.
4594 r = (r & 0xFFFF) + (r >> 16);
4599 *dst16_ptr = (uint16_t)r;
4608 static struct action *
4609 action_find(struct rte_swx_pipeline *p, const char *name);
4612 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
4613 struct action *action __rte_unused,
4616 struct instruction *instr,
4617 struct instruction_data *data)
4619 CHECK(n_tokens == 2, EINVAL);
4621 strcpy(data->jmp_label, tokens[1]);
4623 instr->type = INSTR_JMP;
4624 instr->jmp.ip = NULL; /* Resolved later. */
4629 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
4630 struct action *action __rte_unused,
4633 struct instruction *instr,
4634 struct instruction_data *data)
4638 CHECK(n_tokens == 3, EINVAL);
4640 strcpy(data->jmp_label, tokens[1]);
4642 h = header_parse(p, tokens[2]);
4645 instr->type = INSTR_JMP_VALID;
4646 instr->jmp.ip = NULL; /* Resolved later. */
4647 instr->jmp.header_id = h->id;
4652 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
4653 struct action *action __rte_unused,
4656 struct instruction *instr,
4657 struct instruction_data *data)
4661 CHECK(n_tokens == 2, EINVAL);
4663 strcpy(data->jmp_label, tokens[1]);
4665 h = header_parse(p, tokens[2]);
4668 instr->type = INSTR_JMP_INVALID;
4669 instr->jmp.ip = NULL; /* Resolved later. */
4670 instr->jmp.header_id = h->id;
4675 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
4676 struct action *action,
4679 struct instruction *instr,
4680 struct instruction_data *data)
4682 CHECK(!action, EINVAL);
4683 CHECK(n_tokens == 2, EINVAL);
4685 strcpy(data->jmp_label, tokens[1]);
4687 instr->type = INSTR_JMP_HIT;
4688 instr->jmp.ip = NULL; /* Resolved later. */
4693 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
4694 struct action *action,
4697 struct instruction *instr,
4698 struct instruction_data *data)
4700 CHECK(!action, EINVAL);
4701 CHECK(n_tokens == 2, EINVAL);
4703 strcpy(data->jmp_label, tokens[1]);
4705 instr->type = INSTR_JMP_MISS;
4706 instr->jmp.ip = NULL; /* Resolved later. */
4711 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
4712 struct action *action,
4715 struct instruction *instr,
4716 struct instruction_data *data)
4720 CHECK(!action, EINVAL);
4721 CHECK(n_tokens == 3, EINVAL);
4723 strcpy(data->jmp_label, tokens[1]);
4725 a = action_find(p, tokens[2]);
4728 instr->type = INSTR_JMP_ACTION_HIT;
4729 instr->jmp.ip = NULL; /* Resolved later. */
4730 instr->jmp.action_id = a->id;
4735 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
4736 struct action *action,
4739 struct instruction *instr,
4740 struct instruction_data *data)
4744 CHECK(!action, EINVAL);
4745 CHECK(n_tokens == 3, EINVAL);
4747 strcpy(data->jmp_label, tokens[1]);
4749 a = action_find(p, tokens[2]);
4752 instr->type = INSTR_JMP_ACTION_MISS;
4753 instr->jmp.ip = NULL; /* Resolved later. */
4754 instr->jmp.action_id = a->id;
4759 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
4760 struct action *action,
4763 struct instruction *instr,
4764 struct instruction_data *data)
4766 char *a = tokens[2], *b = tokens[3];
4767 struct field *fa, *fb;
4768 uint32_t a_struct_id, b_struct_id, b_val;
4770 CHECK(n_tokens == 4, EINVAL);
4772 strcpy(data->jmp_label, tokens[1]);
4774 fa = struct_field_parse(p, action, a, &a_struct_id);
4777 /* JMP_EQ or JMP_EQ_S. */
4778 fb = struct_field_parse(p, action, b, &b_struct_id);
4780 instr->type = INSTR_JMP_EQ;
4781 if ((a[0] == 'h' && b[0] != 'h') ||
4782 (a[0] != 'h' && b[0] == 'h'))
4783 instr->type = INSTR_JMP_EQ_S;
4784 instr->jmp.ip = NULL; /* Resolved later. */
4786 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4787 instr->jmp.a.n_bits = fa->n_bits;
4788 instr->jmp.a.offset = fa->offset / 8;
4789 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4790 instr->jmp.b.n_bits = fb->n_bits;
4791 instr->jmp.b.offset = fb->offset / 8;
4796 b_val = strtoul(b, &b, 0);
4797 CHECK(!b[0], EINVAL);
4800 b_val = htonl(b_val);
4802 instr->type = INSTR_JMP_EQ_I;
4803 instr->jmp.ip = NULL; /* Resolved later. */
4804 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4805 instr->jmp.a.n_bits = fa->n_bits;
4806 instr->jmp.a.offset = fa->offset / 8;
4807 instr->jmp.b_val = (uint32_t)b_val;
4812 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
4813 struct action *action,
4816 struct instruction *instr,
4817 struct instruction_data *data)
4819 char *a = tokens[2], *b = tokens[3];
4820 struct field *fa, *fb;
4821 uint32_t a_struct_id, b_struct_id, b_val;
4823 CHECK(n_tokens == 4, EINVAL);
4825 strcpy(data->jmp_label, tokens[1]);
4827 fa = struct_field_parse(p, action, a, &a_struct_id);
4830 /* JMP_NEQ or JMP_NEQ_S. */
4831 fb = struct_field_parse(p, action, b, &b_struct_id);
4833 instr->type = INSTR_JMP_NEQ;
4834 if ((a[0] == 'h' && b[0] != 'h') ||
4835 (a[0] != 'h' && b[0] == 'h'))
4836 instr->type = INSTR_JMP_NEQ_S;
4837 instr->jmp.ip = NULL; /* Resolved later. */
4839 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4840 instr->jmp.a.n_bits = fa->n_bits;
4841 instr->jmp.a.offset = fa->offset / 8;
4842 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4843 instr->jmp.b.n_bits = fb->n_bits;
4844 instr->jmp.b.offset = fb->offset / 8;
4849 b_val = strtoul(b, &b, 0);
4850 CHECK(!b[0], EINVAL);
4853 b_val = htonl(b_val);
4855 instr->type = INSTR_JMP_NEQ_I;
4856 instr->jmp.ip = NULL; /* Resolved later. */
4857 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4858 instr->jmp.a.n_bits = fa->n_bits;
4859 instr->jmp.a.offset = fa->offset / 8;
4860 instr->jmp.b_val = (uint32_t)b_val;
4865 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
4866 struct action *action,
4869 struct instruction *instr,
4870 struct instruction_data *data)
4872 char *a = tokens[2], *b = tokens[3];
4873 struct field *fa, *fb;
4874 uint32_t a_struct_id, b_struct_id, b_val;
4876 CHECK(n_tokens == 4, EINVAL);
4878 strcpy(data->jmp_label, tokens[1]);
4880 fa = struct_field_parse(p, action, a, &a_struct_id);
4883 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
4884 fb = struct_field_parse(p, action, b, &b_struct_id);
4886 instr->type = INSTR_JMP_LT;
4887 if (a[0] == 'h' && b[0] == 'm')
4888 instr->type = INSTR_JMP_LT_HM;
4889 if (a[0] == 'm' && b[0] == 'h')
4890 instr->type = INSTR_JMP_LT_MH;
4891 if (a[0] == 'h' && b[0] == 'h')
4892 instr->type = INSTR_JMP_LT_HH;
4893 instr->jmp.ip = NULL; /* Resolved later. */
4895 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4896 instr->jmp.a.n_bits = fa->n_bits;
4897 instr->jmp.a.offset = fa->offset / 8;
4898 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4899 instr->jmp.b.n_bits = fb->n_bits;
4900 instr->jmp.b.offset = fb->offset / 8;
4904 /* JMP_LT_MI, JMP_LT_HI. */
4905 b_val = strtoul(b, &b, 0);
4906 CHECK(!b[0], EINVAL);
4908 instr->type = INSTR_JMP_LT_MI;
4910 instr->type = INSTR_JMP_LT_HI;
4911 instr->jmp.ip = NULL; /* Resolved later. */
4913 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4914 instr->jmp.a.n_bits = fa->n_bits;
4915 instr->jmp.a.offset = fa->offset / 8;
4916 instr->jmp.b_val = (uint32_t)b_val;
4921 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
4922 struct action *action,
4925 struct instruction *instr,
4926 struct instruction_data *data)
4928 char *a = tokens[2], *b = tokens[3];
4929 struct field *fa, *fb;
4930 uint32_t a_struct_id, b_struct_id, b_val;
4932 CHECK(n_tokens == 4, EINVAL);
4934 strcpy(data->jmp_label, tokens[1]);
4936 fa = struct_field_parse(p, action, a, &a_struct_id);
4939 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
4940 fb = struct_field_parse(p, action, b, &b_struct_id);
4942 instr->type = INSTR_JMP_GT;
4943 if (a[0] == 'h' && b[0] == 'm')
4944 instr->type = INSTR_JMP_GT_HM;
4945 if (a[0] == 'm' && b[0] == 'h')
4946 instr->type = INSTR_JMP_GT_MH;
4947 if (a[0] == 'h' && b[0] == 'h')
4948 instr->type = INSTR_JMP_GT_HH;
4949 instr->jmp.ip = NULL; /* Resolved later. */
4951 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4952 instr->jmp.a.n_bits = fa->n_bits;
4953 instr->jmp.a.offset = fa->offset / 8;
4954 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4955 instr->jmp.b.n_bits = fb->n_bits;
4956 instr->jmp.b.offset = fb->offset / 8;
4960 /* JMP_GT_MI, JMP_GT_HI. */
4961 b_val = strtoul(b, &b, 0);
4962 CHECK(!b[0], EINVAL);
4964 instr->type = INSTR_JMP_GT_MI;
4966 instr->type = INSTR_JMP_GT_HI;
4967 instr->jmp.ip = NULL; /* Resolved later. */
4969 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4970 instr->jmp.a.n_bits = fa->n_bits;
4971 instr->jmp.a.offset = fa->offset / 8;
4972 instr->jmp.b_val = (uint32_t)b_val;
4977 instr_jmp_exec(struct rte_swx_pipeline *p)
4979 struct thread *t = &p->threads[p->thread_id];
4980 struct instruction *ip = t->ip;
4982 TRACE("[Thread %2u] jmp\n", p->thread_id);
4984 thread_ip_set(t, ip->jmp.ip);
4988 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
4990 struct thread *t = &p->threads[p->thread_id];
4991 struct instruction *ip = t->ip;
4992 uint32_t header_id = ip->jmp.header_id;
4994 TRACE("[Thread %2u] jmpv\n", p->thread_id);
4996 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
5000 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
5002 struct thread *t = &p->threads[p->thread_id];
5003 struct instruction *ip = t->ip;
5004 uint32_t header_id = ip->jmp.header_id;
5006 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
5008 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
5012 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
5014 struct thread *t = &p->threads[p->thread_id];
5015 struct instruction *ip = t->ip;
5016 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
5018 TRACE("[Thread %2u] jmph\n", p->thread_id);
5020 t->ip = ip_next[t->hit];
5024 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
5026 struct thread *t = &p->threads[p->thread_id];
5027 struct instruction *ip = t->ip;
5028 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
5030 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
5032 t->ip = ip_next[t->hit];
5036 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
5038 struct thread *t = &p->threads[p->thread_id];
5039 struct instruction *ip = t->ip;
5041 TRACE("[Thread %2u] jmpa\n", p->thread_id);
5043 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
5047 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
5049 struct thread *t = &p->threads[p->thread_id];
5050 struct instruction *ip = t->ip;
5052 TRACE("[Thread %2u] jmpna\n", p->thread_id);
5054 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
5058 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
5060 struct thread *t = &p->threads[p->thread_id];
5061 struct instruction *ip = t->ip;
5063 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
5069 instr_jmp_eq_s_exec(struct rte_swx_pipeline *p)
5071 struct thread *t = &p->threads[p->thread_id];
5072 struct instruction *ip = t->ip;
5074 TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id);
5076 JMP_CMP_S(t, ip, ==);
5080 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
5082 struct thread *t = &p->threads[p->thread_id];
5083 struct instruction *ip = t->ip;
5085 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
5087 JMP_CMP_I(t, ip, ==);
5091 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
5093 struct thread *t = &p->threads[p->thread_id];
5094 struct instruction *ip = t->ip;
5096 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
5102 instr_jmp_neq_s_exec(struct rte_swx_pipeline *p)
5104 struct thread *t = &p->threads[p->thread_id];
5105 struct instruction *ip = t->ip;
5107 TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id);
5109 JMP_CMP_S(t, ip, !=);
5113 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
5115 struct thread *t = &p->threads[p->thread_id];
5116 struct instruction *ip = t->ip;
5118 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
5120 JMP_CMP_I(t, ip, !=);
5124 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
5126 struct thread *t = &p->threads[p->thread_id];
5127 struct instruction *ip = t->ip;
5129 TRACE("[Thread %2u] jmplt\n", p->thread_id);
5135 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
5137 struct thread *t = &p->threads[p->thread_id];
5138 struct instruction *ip = t->ip;
5140 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
5142 JMP_CMP_MH(t, ip, <);
5146 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
5148 struct thread *t = &p->threads[p->thread_id];
5149 struct instruction *ip = t->ip;
5151 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
5153 JMP_CMP_HM(t, ip, <);
5157 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
5159 struct thread *t = &p->threads[p->thread_id];
5160 struct instruction *ip = t->ip;
5162 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
5164 JMP_CMP_HH(t, ip, <);
5168 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
5170 struct thread *t = &p->threads[p->thread_id];
5171 struct instruction *ip = t->ip;
5173 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
5175 JMP_CMP_MI(t, ip, <);
5179 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
5181 struct thread *t = &p->threads[p->thread_id];
5182 struct instruction *ip = t->ip;
5184 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
5186 JMP_CMP_HI(t, ip, <);
5190 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
5192 struct thread *t = &p->threads[p->thread_id];
5193 struct instruction *ip = t->ip;
5195 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
5201 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
5203 struct thread *t = &p->threads[p->thread_id];
5204 struct instruction *ip = t->ip;
5206 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
5208 JMP_CMP_MH(t, ip, >);
5212 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
5214 struct thread *t = &p->threads[p->thread_id];
5215 struct instruction *ip = t->ip;
5217 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
5219 JMP_CMP_HM(t, ip, >);
5223 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
5225 struct thread *t = &p->threads[p->thread_id];
5226 struct instruction *ip = t->ip;
5228 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
5230 JMP_CMP_HH(t, ip, >);
5234 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
5236 struct thread *t = &p->threads[p->thread_id];
5237 struct instruction *ip = t->ip;
5239 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
5241 JMP_CMP_MI(t, ip, >);
5245 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
5247 struct thread *t = &p->threads[p->thread_id];
5248 struct instruction *ip = t->ip;
5250 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
5252 JMP_CMP_HI(t, ip, >);
5259 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
5260 struct action *action,
5261 char **tokens __rte_unused,
5263 struct instruction *instr,
5264 struct instruction_data *data __rte_unused)
5266 CHECK(action, EINVAL);
5267 CHECK(n_tokens == 1, EINVAL);
5269 instr->type = INSTR_RETURN;
5274 instr_return_exec(struct rte_swx_pipeline *p)
5276 struct thread *t = &p->threads[p->thread_id];
5278 TRACE("[Thread %2u] return\n", p->thread_id);
5283 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
5286 instr_translate(struct rte_swx_pipeline *p,
5287 struct action *action,
5289 struct instruction *instr,
5290 struct instruction_data *data)
5292 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
5293 int n_tokens = 0, tpos = 0;
5295 /* Parse the instruction string into tokens. */
5299 token = strtok_r(string, " \t\v", &string);
5303 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
5305 tokens[n_tokens] = token;
5309 CHECK(n_tokens, EINVAL);
5311 /* Handle the optional instruction label. */
5312 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
5313 strcpy(data->label, tokens[0]);
5316 CHECK(n_tokens - tpos, EINVAL);
5319 /* Identify the instruction type. */
5320 if (!strcmp(tokens[tpos], "rx"))
5321 return instr_rx_translate(p,
5328 if (!strcmp(tokens[tpos], "tx"))
5329 return instr_tx_translate(p,
5336 if (!strcmp(tokens[tpos], "extract"))
5337 return instr_hdr_extract_translate(p,
5344 if (!strcmp(tokens[tpos], "emit"))
5345 return instr_hdr_emit_translate(p,
5352 if (!strcmp(tokens[tpos], "validate"))
5353 return instr_hdr_validate_translate(p,
5360 if (!strcmp(tokens[tpos], "invalidate"))
5361 return instr_hdr_invalidate_translate(p,
5368 if (!strcmp(tokens[tpos], "mov"))
5369 return instr_mov_translate(p,
5376 if (!strcmp(tokens[tpos], "dma"))
5377 return instr_dma_translate(p,
5384 if (!strcmp(tokens[tpos], "add"))
5385 return instr_alu_add_translate(p,
5392 if (!strcmp(tokens[tpos], "sub"))
5393 return instr_alu_sub_translate(p,
5400 if (!strcmp(tokens[tpos], "ckadd"))
5401 return instr_alu_ckadd_translate(p,
5408 if (!strcmp(tokens[tpos], "cksub"))
5409 return instr_alu_cksub_translate(p,
5416 if (!strcmp(tokens[tpos], "and"))
5417 return instr_alu_and_translate(p,
5424 if (!strcmp(tokens[tpos], "or"))
5425 return instr_alu_or_translate(p,
5432 if (!strcmp(tokens[tpos], "xor"))
5433 return instr_alu_xor_translate(p,
5440 if (!strcmp(tokens[tpos], "shl"))
5441 return instr_alu_shl_translate(p,
5448 if (!strcmp(tokens[tpos], "shr"))
5449 return instr_alu_shr_translate(p,
5456 if (!strcmp(tokens[tpos], "table"))
5457 return instr_table_translate(p,
5464 if (!strcmp(tokens[tpos], "extern"))
5465 return instr_extern_translate(p,
5472 if (!strcmp(tokens[tpos], "jmp"))
5473 return instr_jmp_translate(p,
5480 if (!strcmp(tokens[tpos], "jmpv"))
5481 return instr_jmp_valid_translate(p,
5488 if (!strcmp(tokens[tpos], "jmpnv"))
5489 return instr_jmp_invalid_translate(p,
5496 if (!strcmp(tokens[tpos], "jmph"))
5497 return instr_jmp_hit_translate(p,
5504 if (!strcmp(tokens[tpos], "jmpnh"))
5505 return instr_jmp_miss_translate(p,
5512 if (!strcmp(tokens[tpos], "jmpa"))
5513 return instr_jmp_action_hit_translate(p,
5520 if (!strcmp(tokens[tpos], "jmpna"))
5521 return instr_jmp_action_miss_translate(p,
5528 if (!strcmp(tokens[tpos], "jmpeq"))
5529 return instr_jmp_eq_translate(p,
5536 if (!strcmp(tokens[tpos], "jmpneq"))
5537 return instr_jmp_neq_translate(p,
5544 if (!strcmp(tokens[tpos], "jmplt"))
5545 return instr_jmp_lt_translate(p,
5552 if (!strcmp(tokens[tpos], "jmpgt"))
5553 return instr_jmp_gt_translate(p,
5560 if (!strcmp(tokens[tpos], "return"))
5561 return instr_return_translate(p,
5571 static struct instruction_data *
5572 label_find(struct instruction_data *data, uint32_t n, const char *label)
5576 for (i = 0; i < n; i++)
5577 if (!strcmp(label, data[i].label))
5584 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
5586 uint32_t count = 0, i;
5591 for (i = 0; i < n; i++)
5592 if (!strcmp(label, data[i].jmp_label))
5599 instr_label_check(struct instruction_data *instruction_data,
5600 uint32_t n_instructions)
5604 /* Check that all instruction labels are unique. */
5605 for (i = 0; i < n_instructions; i++) {
5606 struct instruction_data *data = &instruction_data[i];
5607 char *label = data->label;
5613 for (j = i + 1; j < n_instructions; j++)
5614 CHECK(strcmp(label, data[j].label), EINVAL);
5617 /* Get users for each instruction label. */
5618 for (i = 0; i < n_instructions; i++) {
5619 struct instruction_data *data = &instruction_data[i];
5620 char *label = data->label;
5622 data->n_users = label_is_used(instruction_data,
5631 instr_jmp_resolve(struct instruction *instructions,
5632 struct instruction_data *instruction_data,
5633 uint32_t n_instructions)
5637 for (i = 0; i < n_instructions; i++) {
5638 struct instruction *instr = &instructions[i];
5639 struct instruction_data *data = &instruction_data[i];
5640 struct instruction_data *found;
5642 if (!instruction_is_jmp(instr))
5645 found = label_find(instruction_data,
5648 CHECK(found, EINVAL);
5650 instr->jmp.ip = &instr[found - instruction_data];
5657 instruction_config(struct rte_swx_pipeline *p,
5659 const char **instructions,
5660 uint32_t n_instructions)
5662 struct instruction *instr = NULL;
5663 struct instruction_data *data = NULL;
5664 char *string = NULL;
5668 CHECK(n_instructions, EINVAL);
5669 CHECK(instructions, EINVAL);
5670 for (i = 0; i < n_instructions; i++)
5671 CHECK(instructions[i], EINVAL);
5673 /* Memory allocation. */
5674 instr = calloc(n_instructions, sizeof(struct instruction));
5680 data = calloc(n_instructions, sizeof(struct instruction_data));
5686 for (i = 0; i < n_instructions; i++) {
5687 string = strdup(instructions[i]);
5693 err = instr_translate(p, a, string, &instr[i], &data[i]);
5700 err = instr_label_check(data, n_instructions);
5704 err = instr_jmp_resolve(instr, data, n_instructions);
5711 a->instructions = instr;
5712 a->n_instructions = n_instructions;
5714 p->instructions = instr;
5715 p->n_instructions = n_instructions;
5727 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
5729 static instr_exec_t instruction_table[] = {
5730 [INSTR_RX] = instr_rx_exec,
5731 [INSTR_TX] = instr_tx_exec,
5733 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
5734 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
5735 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
5736 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
5737 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
5738 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
5739 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
5740 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
5742 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
5743 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
5744 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
5745 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
5746 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
5747 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
5748 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
5749 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
5750 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
5752 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
5753 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
5755 [INSTR_MOV] = instr_mov_exec,
5756 [INSTR_MOV_S] = instr_mov_s_exec,
5757 [INSTR_MOV_I] = instr_mov_i_exec,
5759 [INSTR_DMA_HT] = instr_dma_ht_exec,
5760 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
5761 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
5762 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
5763 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
5764 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
5765 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
5766 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
5768 [INSTR_ALU_ADD] = instr_alu_add_exec,
5769 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
5770 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
5771 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
5772 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
5773 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
5775 [INSTR_ALU_SUB] = instr_alu_sub_exec,
5776 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
5777 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
5778 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
5779 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
5780 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
5782 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
5783 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
5784 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
5785 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
5787 [INSTR_ALU_AND] = instr_alu_and_exec,
5788 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
5789 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
5791 [INSTR_ALU_OR] = instr_alu_or_exec,
5792 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
5793 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
5795 [INSTR_ALU_XOR] = instr_alu_xor_exec,
5796 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
5797 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
5799 [INSTR_ALU_SHL] = instr_alu_shl_exec,
5800 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
5801 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
5802 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
5803 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
5804 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
5806 [INSTR_ALU_SHR] = instr_alu_shr_exec,
5807 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
5808 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
5809 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
5810 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
5811 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
5813 [INSTR_TABLE] = instr_table_exec,
5814 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
5815 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
5817 [INSTR_JMP] = instr_jmp_exec,
5818 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
5819 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
5820 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
5821 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
5822 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
5823 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
5825 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
5826 [INSTR_JMP_EQ_S] = instr_jmp_eq_s_exec,
5827 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
5829 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
5830 [INSTR_JMP_NEQ_S] = instr_jmp_neq_s_exec,
5831 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
5833 [INSTR_JMP_LT] = instr_jmp_lt_exec,
5834 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
5835 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
5836 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
5837 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
5838 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
5840 [INSTR_JMP_GT] = instr_jmp_gt_exec,
5841 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
5842 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
5843 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
5844 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
5845 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
5847 [INSTR_RETURN] = instr_return_exec,
5851 instr_exec(struct rte_swx_pipeline *p)
5853 struct thread *t = &p->threads[p->thread_id];
5854 struct instruction *ip = t->ip;
5855 instr_exec_t instr = instruction_table[ip->type];
5863 static struct action *
5864 action_find(struct rte_swx_pipeline *p, const char *name)
5866 struct action *elem;
5871 TAILQ_FOREACH(elem, &p->actions, node)
5872 if (strcmp(elem->name, name) == 0)
5878 static struct field *
5879 action_field_find(struct action *a, const char *name)
5881 return a->st ? struct_type_field_find(a->st, name) : NULL;
5884 static struct field *
5885 action_field_parse(struct action *action, const char *name)
5887 if (name[0] != 't' || name[1] != '.')
5890 return action_field_find(action, &name[2]);
5894 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
5896 const char *args_struct_type_name,
5897 const char **instructions,
5898 uint32_t n_instructions)
5900 struct struct_type *args_struct_type;
5906 CHECK_NAME(name, EINVAL);
5907 CHECK(!action_find(p, name), EEXIST);
5909 if (args_struct_type_name) {
5910 CHECK_NAME(args_struct_type_name, EINVAL);
5911 args_struct_type = struct_type_find(p, args_struct_type_name);
5912 CHECK(args_struct_type, EINVAL);
5914 args_struct_type = NULL;
5917 /* Node allocation. */
5918 a = calloc(1, sizeof(struct action));
5921 /* Node initialization. */
5922 strcpy(a->name, name);
5923 a->st = args_struct_type;
5924 a->id = p->n_actions;
5926 /* Instruction translation. */
5927 err = instruction_config(p, a, instructions, n_instructions);
5933 /* Node add to tailq. */
5934 TAILQ_INSERT_TAIL(&p->actions, a, node);
5941 action_build(struct rte_swx_pipeline *p)
5943 struct action *action;
5945 p->action_instructions = calloc(p->n_actions,
5946 sizeof(struct instruction *));
5947 CHECK(p->action_instructions, ENOMEM);
5949 TAILQ_FOREACH(action, &p->actions, node)
5950 p->action_instructions[action->id] = action->instructions;
5956 action_build_free(struct rte_swx_pipeline *p)
5958 free(p->action_instructions);
5959 p->action_instructions = NULL;
5963 action_free(struct rte_swx_pipeline *p)
5965 action_build_free(p);
5968 struct action *action;
5970 action = TAILQ_FIRST(&p->actions);
5974 TAILQ_REMOVE(&p->actions, action, node);
5975 free(action->instructions);
5983 static struct table_type *
5984 table_type_find(struct rte_swx_pipeline *p, const char *name)
5986 struct table_type *elem;
5988 TAILQ_FOREACH(elem, &p->table_types, node)
5989 if (strcmp(elem->name, name) == 0)
5995 static struct table_type *
5996 table_type_resolve(struct rte_swx_pipeline *p,
5997 const char *recommended_type_name,
5998 enum rte_swx_table_match_type match_type)
6000 struct table_type *elem;
6002 /* Only consider the recommended type if the match type is correct. */
6003 if (recommended_type_name)
6004 TAILQ_FOREACH(elem, &p->table_types, node)
6005 if (!strcmp(elem->name, recommended_type_name) &&
6006 (elem->match_type == match_type))
6009 /* Ignore the recommended type and get the first element with this match
6012 TAILQ_FOREACH(elem, &p->table_types, node)
6013 if (elem->match_type == match_type)
6019 static struct table *
6020 table_find(struct rte_swx_pipeline *p, const char *name)
6024 TAILQ_FOREACH(elem, &p->tables, node)
6025 if (strcmp(elem->name, name) == 0)
6031 static struct table *
6032 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6034 struct table *table = NULL;
6036 TAILQ_FOREACH(table, &p->tables, node)
6037 if (table->id == id)
6044 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
6046 enum rte_swx_table_match_type match_type,
6047 struct rte_swx_table_ops *ops)
6049 struct table_type *elem;
6053 CHECK_NAME(name, EINVAL);
6054 CHECK(!table_type_find(p, name), EEXIST);
6057 CHECK(ops->create, EINVAL);
6058 CHECK(ops->lkp, EINVAL);
6059 CHECK(ops->free, EINVAL);
6061 /* Node allocation. */
6062 elem = calloc(1, sizeof(struct table_type));
6063 CHECK(elem, ENOMEM);
6065 /* Node initialization. */
6066 strcpy(elem->name, name);
6067 elem->match_type = match_type;
6068 memcpy(&elem->ops, ops, sizeof(*ops));
6070 /* Node add to tailq. */
6071 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
6076 static enum rte_swx_table_match_type
6077 table_match_type_resolve(struct rte_swx_match_field_params *fields,
6082 for (i = 0; i < n_fields; i++)
6083 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
6087 return RTE_SWX_TABLE_MATCH_EXACT;
6089 if ((i == n_fields - 1) &&
6090 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
6091 return RTE_SWX_TABLE_MATCH_LPM;
6093 return RTE_SWX_TABLE_MATCH_WILDCARD;
6097 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
6099 struct rte_swx_pipeline_table_params *params,
6100 const char *recommended_table_type_name,
6104 struct table_type *type;
6106 struct action *default_action;
6107 struct header *header = NULL;
6109 uint32_t offset_prev = 0, action_data_size_max = 0, i;
6113 CHECK_NAME(name, EINVAL);
6114 CHECK(!table_find(p, name), EEXIST);
6116 CHECK(params, EINVAL);
6119 CHECK(!params->n_fields || params->fields, EINVAL);
6120 for (i = 0; i < params->n_fields; i++) {
6121 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6123 struct field *hf, *mf;
6126 CHECK_NAME(field->name, EINVAL);
6128 hf = header_field_parse(p, field->name, &h);
6129 mf = metadata_field_parse(p, field->name);
6130 CHECK(hf || mf, EINVAL);
6132 offset = hf ? hf->offset : mf->offset;
6135 is_header = hf ? 1 : 0;
6136 header = hf ? h : NULL;
6137 offset_prev = offset;
6142 CHECK((is_header && hf && (h->id == header->id)) ||
6143 (!is_header && mf), EINVAL);
6145 CHECK(offset > offset_prev, EINVAL);
6146 offset_prev = offset;
6149 /* Action checks. */
6150 CHECK(params->n_actions, EINVAL);
6151 CHECK(params->action_names, EINVAL);
6152 for (i = 0; i < params->n_actions; i++) {
6153 const char *action_name = params->action_names[i];
6155 uint32_t action_data_size;
6157 CHECK(action_name, EINVAL);
6159 a = action_find(p, action_name);
6162 action_data_size = a->st ? a->st->n_bits / 8 : 0;
6163 if (action_data_size > action_data_size_max)
6164 action_data_size_max = action_data_size;
6167 CHECK(params->default_action_name, EINVAL);
6168 for (i = 0; i < p->n_actions; i++)
6169 if (!strcmp(params->action_names[i],
6170 params->default_action_name))
6172 CHECK(i < params->n_actions, EINVAL);
6173 default_action = action_find(p, params->default_action_name);
6174 CHECK((default_action->st && params->default_action_data) ||
6175 !params->default_action_data, EINVAL);
6177 /* Table type checks. */
6178 if (params->n_fields) {
6179 enum rte_swx_table_match_type match_type;
6181 match_type = table_match_type_resolve(params->fields,
6183 type = table_type_resolve(p,
6184 recommended_table_type_name,
6186 CHECK(type, EINVAL);
6191 /* Memory allocation. */
6192 t = calloc(1, sizeof(struct table));
6195 t->fields = calloc(params->n_fields, sizeof(struct match_field));
6201 t->actions = calloc(params->n_actions, sizeof(struct action *));
6208 if (action_data_size_max) {
6209 t->default_action_data = calloc(1, action_data_size_max);
6210 if (!t->default_action_data) {
6218 /* Node initialization. */
6219 strcpy(t->name, name);
6220 if (args && args[0])
6221 strcpy(t->args, args);
6224 for (i = 0; i < params->n_fields; i++) {
6225 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6226 struct match_field *f = &t->fields[i];
6228 f->match_type = field->match_type;
6229 f->field = is_header ?
6230 header_field_parse(p, field->name, NULL) :
6231 metadata_field_parse(p, field->name);
6233 t->n_fields = params->n_fields;
6234 t->is_header = is_header;
6237 for (i = 0; i < params->n_actions; i++)
6238 t->actions[i] = action_find(p, params->action_names[i]);
6239 t->default_action = default_action;
6240 if (default_action->st)
6241 memcpy(t->default_action_data,
6242 params->default_action_data,
6243 default_action->st->n_bits / 8);
6244 t->n_actions = params->n_actions;
6245 t->default_action_is_const = params->default_action_is_const;
6246 t->action_data_size_max = action_data_size_max;
6249 t->id = p->n_tables;
6251 /* Node add to tailq. */
6252 TAILQ_INSERT_TAIL(&p->tables, t, node);
6258 static struct rte_swx_table_params *
6259 table_params_get(struct table *table)
6261 struct rte_swx_table_params *params;
6262 struct field *first, *last;
6264 uint32_t key_size, key_offset, action_data_size, i;
6266 /* Memory allocation. */
6267 params = calloc(1, sizeof(struct rte_swx_table_params));
6271 /* Key offset and size. */
6272 first = table->fields[0].field;
6273 last = table->fields[table->n_fields - 1].field;
6274 key_offset = first->offset / 8;
6275 key_size = (last->offset + last->n_bits - first->offset) / 8;
6277 /* Memory allocation. */
6278 key_mask = calloc(1, key_size);
6285 for (i = 0; i < table->n_fields; i++) {
6286 struct field *f = table->fields[i].field;
6287 uint32_t start = (f->offset - first->offset) / 8;
6288 size_t size = f->n_bits / 8;
6290 memset(&key_mask[start], 0xFF, size);
6293 /* Action data size. */
6294 action_data_size = 0;
6295 for (i = 0; i < table->n_actions; i++) {
6296 struct action *action = table->actions[i];
6297 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
6299 if (ads > action_data_size)
6300 action_data_size = ads;
6304 params->match_type = table->type->match_type;
6305 params->key_size = key_size;
6306 params->key_offset = key_offset;
6307 params->key_mask0 = key_mask;
6308 params->action_data_size = action_data_size;
6309 params->n_keys_max = table->size;
6315 table_params_free(struct rte_swx_table_params *params)
6320 free(params->key_mask0);
6325 table_state_build(struct rte_swx_pipeline *p)
6327 struct table *table;
6329 p->table_state = calloc(p->n_tables,
6330 sizeof(struct rte_swx_table_state));
6331 CHECK(p->table_state, ENOMEM);
6333 TAILQ_FOREACH(table, &p->tables, node) {
6334 struct rte_swx_table_state *ts = &p->table_state[table->id];
6337 struct rte_swx_table_params *params;
6340 params = table_params_get(table);
6341 CHECK(params, ENOMEM);
6343 ts->obj = table->type->ops.create(params,
6348 table_params_free(params);
6349 CHECK(ts->obj, ENODEV);
6352 /* ts->default_action_data. */
6353 if (table->action_data_size_max) {
6354 ts->default_action_data =
6355 malloc(table->action_data_size_max);
6356 CHECK(ts->default_action_data, ENOMEM);
6358 memcpy(ts->default_action_data,
6359 table->default_action_data,
6360 table->action_data_size_max);
6363 /* ts->default_action_id. */
6364 ts->default_action_id = table->default_action->id;
6371 table_state_build_free(struct rte_swx_pipeline *p)
6375 if (!p->table_state)
6378 for (i = 0; i < p->n_tables; i++) {
6379 struct rte_swx_table_state *ts = &p->table_state[i];
6380 struct table *table = table_find_by_id(p, i);
6383 if (table->type && ts->obj)
6384 table->type->ops.free(ts->obj);
6386 /* ts->default_action_data. */
6387 free(ts->default_action_data);
6390 free(p->table_state);
6391 p->table_state = NULL;
6395 table_state_free(struct rte_swx_pipeline *p)
6397 table_state_build_free(p);
6401 table_stub_lkp(void *table __rte_unused,
6402 void *mailbox __rte_unused,
6403 uint8_t **key __rte_unused,
6404 uint64_t *action_id __rte_unused,
6405 uint8_t **action_data __rte_unused,
6409 return 1; /* DONE. */
6413 table_build(struct rte_swx_pipeline *p)
6417 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6418 struct thread *t = &p->threads[i];
6419 struct table *table;
6421 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
6422 CHECK(t->tables, ENOMEM);
6424 TAILQ_FOREACH(table, &p->tables, node) {
6425 struct table_runtime *r = &t->tables[table->id];
6430 size = table->type->ops.mailbox_size_get();
6433 r->func = table->type->ops.lkp;
6437 r->mailbox = calloc(1, size);
6438 CHECK(r->mailbox, ENOMEM);
6442 r->key = table->is_header ?
6443 &t->structs[table->header->struct_id] :
6444 &t->structs[p->metadata_struct_id];
6446 r->func = table_stub_lkp;
6455 table_build_free(struct rte_swx_pipeline *p)
6459 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6460 struct thread *t = &p->threads[i];
6466 for (j = 0; j < p->n_tables; j++) {
6467 struct table_runtime *r = &t->tables[j];
6478 table_free(struct rte_swx_pipeline *p)
6480 table_build_free(p);
6486 elem = TAILQ_FIRST(&p->tables);
6490 TAILQ_REMOVE(&p->tables, elem, node);
6492 free(elem->actions);
6493 free(elem->default_action_data);
6499 struct table_type *elem;
6501 elem = TAILQ_FIRST(&p->table_types);
6505 TAILQ_REMOVE(&p->table_types, elem, node);
6514 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
6516 struct rte_swx_pipeline *pipeline;
6518 /* Check input parameters. */
6521 /* Memory allocation. */
6522 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
6523 CHECK(pipeline, ENOMEM);
6525 /* Initialization. */
6526 TAILQ_INIT(&pipeline->struct_types);
6527 TAILQ_INIT(&pipeline->port_in_types);
6528 TAILQ_INIT(&pipeline->ports_in);
6529 TAILQ_INIT(&pipeline->port_out_types);
6530 TAILQ_INIT(&pipeline->ports_out);
6531 TAILQ_INIT(&pipeline->extern_types);
6532 TAILQ_INIT(&pipeline->extern_objs);
6533 TAILQ_INIT(&pipeline->extern_funcs);
6534 TAILQ_INIT(&pipeline->headers);
6535 TAILQ_INIT(&pipeline->actions);
6536 TAILQ_INIT(&pipeline->table_types);
6537 TAILQ_INIT(&pipeline->tables);
6539 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
6540 pipeline->numa_node = numa_node;
6547 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
6552 free(p->instructions);
6554 table_state_free(p);
6559 extern_func_free(p);
6569 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
6570 const char **instructions,
6571 uint32_t n_instructions)
6576 err = instruction_config(p, NULL, instructions, n_instructions);
6580 /* Thread instruction pointer reset. */
6581 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6582 struct thread *t = &p->threads[i];
6584 thread_ip_reset(p, t);
6591 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
6596 CHECK(p->build_done == 0, EEXIST);
6598 status = port_in_build(p);
6602 status = port_out_build(p);
6606 status = struct_build(p);
6610 status = extern_obj_build(p);
6614 status = extern_func_build(p);
6618 status = header_build(p);
6622 status = metadata_build(p);
6626 status = action_build(p);
6630 status = table_build(p);
6634 status = table_state_build(p);
6642 table_state_build_free(p);
6643 table_build_free(p);
6644 action_build_free(p);
6645 metadata_build_free(p);
6646 header_build_free(p);
6647 extern_func_build_free(p);
6648 extern_obj_build_free(p);
6649 port_out_build_free(p);
6650 port_in_build_free(p);
6651 struct_build_free(p);
6657 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
6661 for (i = 0; i < n_instructions; i++)
6669 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
6670 struct rte_swx_table_state **table_state)
6672 if (!p || !table_state || !p->build_done)
6675 *table_state = p->table_state;
6680 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
6681 struct rte_swx_table_state *table_state)
6683 if (!p || !table_state || !p->build_done)
6686 p->table_state = table_state;