1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <arpa/inet.h>
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15 #include <rte_cycles.h>
16 #include <rte_meter.h>
18 #include <rte_swx_table_selector.h>
20 #include "rte_swx_pipeline.h"
21 #include "rte_swx_ctl.h"
23 #define CHECK(condition, err_code) \
29 #define CHECK_NAME(name, err_code) \
32 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
35 #define CHECK_INSTRUCTION(instr, err_code) \
38 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
39 RTE_SWX_INSTRUCTION_SIZE), \
47 #define TRACE(...) printf(__VA_ARGS__)
55 #define ntoh64(x) rte_be_to_cpu_64(x)
56 #define hton64(x) rte_cpu_to_be_64(x)
58 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE
60 #include <rte_malloc.h>
63 env_malloc(size_t size, size_t alignment, int numa_node)
65 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
69 env_free(void *start, size_t size __rte_unused)
79 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
83 if (numa_available() == -1)
86 start = numa_alloc_onnode(size, numa_node);
90 memset(start, 0, size);
95 env_free(void *start, size_t size)
97 if (numa_available() == -1)
100 numa_free(start, size);
109 char name[RTE_SWX_NAME_SIZE];
115 TAILQ_ENTRY(struct_type) node;
116 char name[RTE_SWX_NAME_SIZE];
117 struct field *fields;
122 TAILQ_HEAD(struct_type_tailq, struct_type);
127 struct port_in_type {
128 TAILQ_ENTRY(port_in_type) node;
129 char name[RTE_SWX_NAME_SIZE];
130 struct rte_swx_port_in_ops ops;
133 TAILQ_HEAD(port_in_type_tailq, port_in_type);
136 TAILQ_ENTRY(port_in) node;
137 struct port_in_type *type;
142 TAILQ_HEAD(port_in_tailq, port_in);
144 struct port_in_runtime {
145 rte_swx_port_in_pkt_rx_t pkt_rx;
152 struct port_out_type {
153 TAILQ_ENTRY(port_out_type) node;
154 char name[RTE_SWX_NAME_SIZE];
155 struct rte_swx_port_out_ops ops;
158 TAILQ_HEAD(port_out_type_tailq, port_out_type);
161 TAILQ_ENTRY(port_out) node;
162 struct port_out_type *type;
167 TAILQ_HEAD(port_out_tailq, port_out);
169 struct port_out_runtime {
170 rte_swx_port_out_pkt_tx_t pkt_tx;
171 rte_swx_port_out_flush_t flush;
178 struct extern_type_member_func {
179 TAILQ_ENTRY(extern_type_member_func) node;
180 char name[RTE_SWX_NAME_SIZE];
181 rte_swx_extern_type_member_func_t func;
185 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
188 TAILQ_ENTRY(extern_type) node;
189 char name[RTE_SWX_NAME_SIZE];
190 struct struct_type *mailbox_struct_type;
191 rte_swx_extern_type_constructor_t constructor;
192 rte_swx_extern_type_destructor_t destructor;
193 struct extern_type_member_func_tailq funcs;
197 TAILQ_HEAD(extern_type_tailq, extern_type);
200 TAILQ_ENTRY(extern_obj) node;
201 char name[RTE_SWX_NAME_SIZE];
202 struct extern_type *type;
208 TAILQ_HEAD(extern_obj_tailq, extern_obj);
210 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
211 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
214 struct extern_obj_runtime {
217 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
224 TAILQ_ENTRY(extern_func) node;
225 char name[RTE_SWX_NAME_SIZE];
226 struct struct_type *mailbox_struct_type;
227 rte_swx_extern_func_t func;
232 TAILQ_HEAD(extern_func_tailq, extern_func);
234 struct extern_func_runtime {
236 rte_swx_extern_func_t func;
243 TAILQ_ENTRY(header) node;
244 char name[RTE_SWX_NAME_SIZE];
245 struct struct_type *st;
250 TAILQ_HEAD(header_tailq, header);
252 struct header_runtime {
256 struct header_out_runtime {
266 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
267 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
268 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
269 * when transferred to packet meta-data and in NBO when transferred to packet
273 /* Notation conventions:
274 * -Header field: H = h.header.field (dst/src)
275 * -Meta-data field: M = m.field (dst/src)
276 * -Extern object mailbox field: E = e.field (dst/src)
277 * -Extern function mailbox field: F = f.field (dst/src)
278 * -Table action data field: T = t.field (src only)
279 * -Immediate value: I = 32-bit unsigned value (src only)
282 enum instruction_type {
289 INSTR_TX, /* port_out = M */
290 INSTR_TX_I, /* port_out = I */
292 /* extract h.header */
313 /* validate h.header */
316 /* invalidate h.header */
317 INSTR_HDR_INVALIDATE,
321 * dst = HMEF, src = HMEFTI
323 INSTR_MOV, /* dst = MEF, src = MEFT */
324 INSTR_MOV_MH, /* dst = MEF, src = H */
325 INSTR_MOV_HM, /* dst = H, src = MEFT */
326 INSTR_MOV_HH, /* dst = H, src = H */
327 INSTR_MOV_I, /* dst = HMEF, src = I */
329 /* dma h.header t.field
330 * memcpy(h.header, t.field, sizeof(h.header))
343 * dst = HMEF, src = HMEFTI
345 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
346 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
347 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
348 INSTR_ALU_ADD_HH, /* dst = H, src = H */
349 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
350 INSTR_ALU_ADD_HI, /* dst = H, src = I */
354 * dst = HMEF, src = HMEFTI
356 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
357 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
358 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
359 INSTR_ALU_SUB_HH, /* dst = H, src = H */
360 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
361 INSTR_ALU_SUB_HI, /* dst = H, src = I */
364 * dst = dst '+ src[0:1] '+ src[2:3] + ...
365 * dst = H, src = {H, h.header}
367 INSTR_ALU_CKADD_FIELD, /* src = H */
368 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
369 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
375 INSTR_ALU_CKSUB_FIELD,
379 * dst = HMEF, src = HMEFTI
381 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
382 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
383 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
384 INSTR_ALU_AND_HH, /* dst = H, src = H */
385 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
389 * dst = HMEF, src = HMEFTI
391 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
392 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
393 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
394 INSTR_ALU_OR_HH, /* dst = H, src = H */
395 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
399 * dst = HMEF, src = HMEFTI
401 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
402 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
403 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
404 INSTR_ALU_XOR_HH, /* dst = H, src = H */
405 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
409 * dst = HMEF, src = HMEFTI
411 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
412 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
413 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
414 INSTR_ALU_SHL_HH, /* dst = H, src = H */
415 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
416 INSTR_ALU_SHL_HI, /* dst = H, src = I */
420 * dst = HMEF, src = HMEFTI
422 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
423 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
424 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
425 INSTR_ALU_SHR_HH, /* dst = H, src = H */
426 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
427 INSTR_ALU_SHR_HI, /* dst = H, src = I */
429 /* regprefetch REGARRAY index
430 * prefetch REGARRAY[index]
433 INSTR_REGPREFETCH_RH, /* index = H */
434 INSTR_REGPREFETCH_RM, /* index = MEFT */
435 INSTR_REGPREFETCH_RI, /* index = I */
437 /* regrd dst REGARRAY index
438 * dst = REGARRAY[index]
439 * dst = HMEF, index = HMEFTI
441 INSTR_REGRD_HRH, /* dst = H, index = H */
442 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
443 INSTR_REGRD_HRI, /* dst = H, index = I */
444 INSTR_REGRD_MRH, /* dst = MEF, index = H */
445 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
446 INSTR_REGRD_MRI, /* dst = MEF, index = I */
448 /* regwr REGARRAY index src
449 * REGARRAY[index] = src
450 * index = HMEFTI, src = HMEFTI
452 INSTR_REGWR_RHH, /* index = H, src = H */
453 INSTR_REGWR_RHM, /* index = H, src = MEFT */
454 INSTR_REGWR_RHI, /* index = H, src = I */
455 INSTR_REGWR_RMH, /* index = MEFT, src = H */
456 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
457 INSTR_REGWR_RMI, /* index = MEFT, src = I */
458 INSTR_REGWR_RIH, /* index = I, src = H */
459 INSTR_REGWR_RIM, /* index = I, src = MEFT */
460 INSTR_REGWR_RII, /* index = I, src = I */
462 /* regadd REGARRAY index src
463 * REGARRAY[index] += src
464 * index = HMEFTI, src = HMEFTI
466 INSTR_REGADD_RHH, /* index = H, src = H */
467 INSTR_REGADD_RHM, /* index = H, src = MEFT */
468 INSTR_REGADD_RHI, /* index = H, src = I */
469 INSTR_REGADD_RMH, /* index = MEFT, src = H */
470 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
471 INSTR_REGADD_RMI, /* index = MEFT, src = I */
472 INSTR_REGADD_RIH, /* index = I, src = H */
473 INSTR_REGADD_RIM, /* index = I, src = MEFT */
474 INSTR_REGADD_RII, /* index = I, src = I */
476 /* metprefetch METARRAY index
477 * prefetch METARRAY[index]
480 INSTR_METPREFETCH_H, /* index = H */
481 INSTR_METPREFETCH_M, /* index = MEFT */
482 INSTR_METPREFETCH_I, /* index = I */
484 /* meter METARRAY index length color_in color_out
485 * color_out = meter(METARRAY[index], length, color_in)
486 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
488 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
489 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
490 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
491 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
492 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
493 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
494 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
495 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
496 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
497 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
498 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
499 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
505 /* extern e.obj.func */
516 /* jmpv LABEL h.header
517 * Jump if header is valid
521 /* jmpnv LABEL h.header
522 * Jump if header is invalid
527 * Jump if table lookup hit
532 * Jump if table lookup miss
539 INSTR_JMP_ACTION_HIT,
541 /* jmpna LABEL ACTION
542 * Jump if action not run
544 INSTR_JMP_ACTION_MISS,
547 * Jump if a is equal to b
548 * a = HMEFT, b = HMEFTI
550 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
551 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
552 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
553 INSTR_JMP_EQ_HH, /* a = H, b = H */
554 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
557 * Jump if a is not equal to b
558 * a = HMEFT, b = HMEFTI
560 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
561 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
562 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
563 INSTR_JMP_NEQ_HH, /* a = H, b = H */
564 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
567 * Jump if a is less than b
568 * a = HMEFT, b = HMEFTI
570 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
571 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
572 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
573 INSTR_JMP_LT_HH, /* a = H, b = H */
574 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
575 INSTR_JMP_LT_HI, /* a = H, b = I */
578 * Jump if a is greater than b
579 * a = HMEFT, b = HMEFTI
581 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
582 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
583 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
584 INSTR_JMP_GT_HH, /* a = H, b = H */
585 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
586 INSTR_JMP_GT_HI, /* a = H, b = I */
594 struct instr_operand {
615 uint8_t header_id[8];
616 uint8_t struct_id[8];
621 struct instr_hdr_validity {
629 struct instr_extern_obj {
634 struct instr_extern_func {
638 struct instr_dst_src {
639 struct instr_operand dst;
641 struct instr_operand src;
646 struct instr_regarray {
651 struct instr_operand idx;
656 struct instr_operand dstsrc;
666 struct instr_operand idx;
670 struct instr_operand length;
673 struct instr_operand color_in;
674 uint32_t color_in_val;
677 struct instr_operand color_out;
682 uint8_t header_id[8];
683 uint8_t struct_id[8];
694 struct instruction *ip;
697 struct instr_operand a;
703 struct instr_operand b;
709 enum instruction_type type;
712 struct instr_hdr_validity valid;
713 struct instr_dst_src mov;
714 struct instr_regarray regarray;
715 struct instr_meter meter;
716 struct instr_dma dma;
717 struct instr_dst_src alu;
718 struct instr_table table;
719 struct instr_extern_obj ext_obj;
720 struct instr_extern_func ext_func;
721 struct instr_jmp jmp;
725 struct instruction_data {
726 char label[RTE_SWX_NAME_SIZE];
727 char jmp_label[RTE_SWX_NAME_SIZE];
728 uint32_t n_users; /* user = jmp instruction to this instruction. */
736 TAILQ_ENTRY(action) node;
737 char name[RTE_SWX_NAME_SIZE];
738 struct struct_type *st;
739 int *args_endianness; /* 0 = Host Byte Order (HBO). */
740 struct instruction *instructions;
741 uint32_t n_instructions;
745 TAILQ_HEAD(action_tailq, action);
751 TAILQ_ENTRY(table_type) node;
752 char name[RTE_SWX_NAME_SIZE];
753 enum rte_swx_table_match_type match_type;
754 struct rte_swx_table_ops ops;
757 TAILQ_HEAD(table_type_tailq, table_type);
760 enum rte_swx_table_match_type match_type;
765 TAILQ_ENTRY(table) node;
766 char name[RTE_SWX_NAME_SIZE];
767 char args[RTE_SWX_NAME_SIZE];
768 struct table_type *type; /* NULL when n_fields == 0. */
771 struct match_field *fields;
773 struct header *header; /* Only valid when n_fields > 0. */
776 struct action **actions;
777 struct action *default_action;
778 uint8_t *default_action_data;
780 int default_action_is_const;
781 uint32_t action_data_size_max;
787 TAILQ_HEAD(table_tailq, table);
789 struct table_runtime {
790 rte_swx_table_lookup_t func;
795 struct table_statistics {
796 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
797 uint64_t *n_pkts_action;
804 TAILQ_ENTRY(selector) node;
805 char name[RTE_SWX_NAME_SIZE];
807 struct field *group_id_field;
808 struct field **selector_fields;
809 uint32_t n_selector_fields;
810 struct header *selector_header;
811 struct field *member_id_field;
813 uint32_t n_groups_max;
814 uint32_t n_members_per_group_max;
819 TAILQ_HEAD(selector_tailq, selector);
821 struct selector_runtime {
823 uint8_t **group_id_buffer;
824 uint8_t **selector_buffer;
825 uint8_t **member_id_buffer;
828 struct selector_statistics {
836 TAILQ_ENTRY(regarray) node;
837 char name[RTE_SWX_NAME_SIZE];
843 TAILQ_HEAD(regarray_tailq, regarray);
845 struct regarray_runtime {
853 struct meter_profile {
854 TAILQ_ENTRY(meter_profile) node;
855 char name[RTE_SWX_NAME_SIZE];
856 struct rte_meter_trtcm_params params;
857 struct rte_meter_trtcm_profile profile;
861 TAILQ_HEAD(meter_profile_tailq, meter_profile);
864 TAILQ_ENTRY(metarray) node;
865 char name[RTE_SWX_NAME_SIZE];
870 TAILQ_HEAD(metarray_tailq, metarray);
873 struct rte_meter_trtcm m;
874 struct meter_profile *profile;
875 enum rte_color color_mask;
878 uint64_t n_pkts[RTE_COLORS];
879 uint64_t n_bytes[RTE_COLORS];
882 struct metarray_runtime {
883 struct meter *metarray;
892 struct rte_swx_pkt pkt;
898 /* Packet headers. */
899 struct header_runtime *headers; /* Extracted or generated headers. */
900 struct header_out_runtime *headers_out; /* Emitted headers. */
901 uint8_t *header_storage;
902 uint8_t *header_out_storage;
903 uint64_t valid_headers;
904 uint32_t n_headers_out;
906 /* Packet meta-data. */
910 struct table_runtime *tables;
911 struct selector_runtime *selectors;
912 struct rte_swx_table_state *table_state;
914 int hit; /* 0 = Miss, 1 = Hit. */
916 /* Extern objects and functions. */
917 struct extern_obj_runtime *extern_objs;
918 struct extern_func_runtime *extern_funcs;
921 struct instruction *ip;
922 struct instruction *ret;
925 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
926 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
927 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
929 #define HEADER_VALID(thread, header_id) \
930 MASK64_BIT_GET((thread)->valid_headers, header_id)
932 #define ALU(thread, ip, operator) \
934 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
935 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
936 uint64_t dst64 = *dst64_ptr; \
937 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
938 uint64_t dst = dst64 & dst64_mask; \
940 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
941 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
942 uint64_t src64 = *src64_ptr; \
943 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
944 uint64_t src = src64 & src64_mask; \
946 uint64_t result = dst operator src; \
948 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
951 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
953 #define ALU_MH(thread, ip, operator) \
955 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
956 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
957 uint64_t dst64 = *dst64_ptr; \
958 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
959 uint64_t dst = dst64 & dst64_mask; \
961 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
962 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
963 uint64_t src64 = *src64_ptr; \
964 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
966 uint64_t result = dst operator src; \
968 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
971 #define ALU_HM(thread, ip, operator) \
973 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
974 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
975 uint64_t dst64 = *dst64_ptr; \
976 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
977 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
979 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
980 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
981 uint64_t src64 = *src64_ptr; \
982 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
983 uint64_t src = src64 & src64_mask; \
985 uint64_t result = dst operator src; \
986 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
988 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
991 #define ALU_HM_FAST(thread, ip, operator) \
993 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
994 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
995 uint64_t dst64 = *dst64_ptr; \
996 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
997 uint64_t dst = dst64 & dst64_mask; \
999 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1000 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1001 uint64_t src64 = *src64_ptr; \
1002 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1003 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1005 uint64_t result = dst operator src; \
1007 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1010 #define ALU_HH(thread, ip, operator) \
1012 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1013 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1014 uint64_t dst64 = *dst64_ptr; \
1015 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1016 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1018 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1019 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1020 uint64_t src64 = *src64_ptr; \
1021 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1023 uint64_t result = dst operator src; \
1024 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1026 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1029 #define ALU_HH_FAST(thread, ip, operator) \
1031 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1032 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1033 uint64_t dst64 = *dst64_ptr; \
1034 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1035 uint64_t dst = dst64 & dst64_mask; \
1037 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1038 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1039 uint64_t src64 = *src64_ptr; \
1040 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1042 uint64_t result = dst operator src; \
1044 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1051 #define ALU_HM_FAST ALU
1053 #define ALU_HH_FAST ALU
1057 #define ALU_I(thread, ip, operator) \
1059 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1060 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1061 uint64_t dst64 = *dst64_ptr; \
1062 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1063 uint64_t dst = dst64 & dst64_mask; \
1065 uint64_t src = (ip)->alu.src_val; \
1067 uint64_t result = dst operator src; \
1069 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1072 #define ALU_MI ALU_I
1074 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1076 #define ALU_HI(thread, ip, operator) \
1078 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1079 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1080 uint64_t dst64 = *dst64_ptr; \
1081 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1082 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1084 uint64_t src = (ip)->alu.src_val; \
1086 uint64_t result = dst operator src; \
1087 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1089 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1094 #define ALU_HI ALU_I
1098 #define MOV(thread, ip) \
1100 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1101 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1102 uint64_t dst64 = *dst64_ptr; \
1103 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1105 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1106 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1107 uint64_t src64 = *src64_ptr; \
1108 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1109 uint64_t src = src64 & src64_mask; \
1111 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1114 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1116 #define MOV_MH(thread, ip) \
1118 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1119 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1120 uint64_t dst64 = *dst64_ptr; \
1121 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1123 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1124 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1125 uint64_t src64 = *src64_ptr; \
1126 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1128 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1131 #define MOV_HM(thread, ip) \
1133 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1134 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1135 uint64_t dst64 = *dst64_ptr; \
1136 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1138 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1139 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1140 uint64_t src64 = *src64_ptr; \
1141 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1142 uint64_t src = src64 & src64_mask; \
1144 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1145 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1148 #define MOV_HH(thread, ip) \
1150 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1151 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1152 uint64_t dst64 = *dst64_ptr; \
1153 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1155 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1156 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1157 uint64_t src64 = *src64_ptr; \
1159 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1160 src = src >> (64 - (ip)->mov.dst.n_bits); \
1161 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1172 #define MOV_I(thread, ip) \
1174 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1175 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1176 uint64_t dst64 = *dst64_ptr; \
1177 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1179 uint64_t src = (ip)->mov.src_val; \
1181 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1184 #define JMP_CMP(thread, ip, operator) \
1186 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1187 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1188 uint64_t a64 = *a64_ptr; \
1189 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1190 uint64_t a = a64 & a64_mask; \
1192 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1193 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1194 uint64_t b64 = *b64_ptr; \
1195 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1196 uint64_t b = b64 & b64_mask; \
1198 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1201 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1203 #define JMP_CMP_MH(thread, ip, operator) \
1205 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1206 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1207 uint64_t a64 = *a64_ptr; \
1208 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1209 uint64_t a = a64 & a64_mask; \
1211 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1212 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1213 uint64_t b64 = *b64_ptr; \
1214 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1216 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1219 #define JMP_CMP_HM(thread, ip, operator) \
1221 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1222 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1223 uint64_t a64 = *a64_ptr; \
1224 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1226 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1227 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1228 uint64_t b64 = *b64_ptr; \
1229 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1230 uint64_t b = b64 & b64_mask; \
1232 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1235 #define JMP_CMP_HH(thread, ip, operator) \
1237 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1238 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1239 uint64_t a64 = *a64_ptr; \
1240 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1242 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1243 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1244 uint64_t b64 = *b64_ptr; \
1245 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1247 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1250 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1252 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1253 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1254 uint64_t a64 = *a64_ptr; \
1255 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1257 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1258 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1259 uint64_t b64 = *b64_ptr; \
1260 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1262 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1267 #define JMP_CMP_MH JMP_CMP
1268 #define JMP_CMP_HM JMP_CMP
1269 #define JMP_CMP_HH JMP_CMP
1270 #define JMP_CMP_HH_FAST JMP_CMP
1274 #define JMP_CMP_I(thread, ip, operator) \
1276 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1277 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1278 uint64_t a64 = *a64_ptr; \
1279 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1280 uint64_t a = a64 & a64_mask; \
1282 uint64_t b = (ip)->jmp.b_val; \
1284 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1287 #define JMP_CMP_MI JMP_CMP_I
1289 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1291 #define JMP_CMP_HI(thread, ip, operator) \
1293 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1294 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1295 uint64_t a64 = *a64_ptr; \
1296 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1298 uint64_t b = (ip)->jmp.b_val; \
1300 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1305 #define JMP_CMP_HI JMP_CMP_I
1309 #define METADATA_READ(thread, offset, n_bits) \
1311 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1312 uint64_t m64 = *m64_ptr; \
1313 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1317 #define METADATA_WRITE(thread, offset, n_bits, value) \
1319 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1320 uint64_t m64 = *m64_ptr; \
1321 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1323 uint64_t m_new = value; \
1325 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1328 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1329 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1332 struct rte_swx_pipeline {
1333 struct struct_type_tailq struct_types;
1334 struct port_in_type_tailq port_in_types;
1335 struct port_in_tailq ports_in;
1336 struct port_out_type_tailq port_out_types;
1337 struct port_out_tailq ports_out;
1338 struct extern_type_tailq extern_types;
1339 struct extern_obj_tailq extern_objs;
1340 struct extern_func_tailq extern_funcs;
1341 struct header_tailq headers;
1342 struct struct_type *metadata_st;
1343 uint32_t metadata_struct_id;
1344 struct action_tailq actions;
1345 struct table_type_tailq table_types;
1346 struct table_tailq tables;
1347 struct selector_tailq selectors;
1348 struct regarray_tailq regarrays;
1349 struct meter_profile_tailq meter_profiles;
1350 struct metarray_tailq metarrays;
1352 struct port_in_runtime *in;
1353 struct port_out_runtime *out;
1354 struct instruction **action_instructions;
1355 struct rte_swx_table_state *table_state;
1356 struct table_statistics *table_stats;
1357 struct selector_statistics *selector_stats;
1358 struct regarray_runtime *regarray_runtime;
1359 struct metarray_runtime *metarray_runtime;
1360 struct instruction *instructions;
1361 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1364 uint32_t n_ports_in;
1365 uint32_t n_ports_out;
1366 uint32_t n_extern_objs;
1367 uint32_t n_extern_funcs;
1370 uint32_t n_selectors;
1371 uint32_t n_regarrays;
1372 uint32_t n_metarrays;
1376 uint32_t n_instructions;
1384 static struct struct_type *
1385 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1387 struct struct_type *elem;
1389 TAILQ_FOREACH(elem, &p->struct_types, node)
1390 if (strcmp(elem->name, name) == 0)
1396 static struct field *
1397 struct_type_field_find(struct struct_type *st, const char *name)
1401 for (i = 0; i < st->n_fields; i++) {
1402 struct field *f = &st->fields[i];
1404 if (strcmp(f->name, name) == 0)
1412 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1414 struct rte_swx_field_params *fields,
1417 struct struct_type *st;
1421 CHECK_NAME(name, EINVAL);
1422 CHECK(fields, EINVAL);
1423 CHECK(n_fields, EINVAL);
1425 for (i = 0; i < n_fields; i++) {
1426 struct rte_swx_field_params *f = &fields[i];
1429 CHECK_NAME(f->name, EINVAL);
1430 CHECK(f->n_bits, EINVAL);
1431 CHECK(f->n_bits <= 64, EINVAL);
1432 CHECK((f->n_bits & 7) == 0, EINVAL);
1434 for (j = 0; j < i; j++) {
1435 struct rte_swx_field_params *f_prev = &fields[j];
1437 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1441 CHECK(!struct_type_find(p, name), EEXIST);
1443 /* Node allocation. */
1444 st = calloc(1, sizeof(struct struct_type));
1447 st->fields = calloc(n_fields, sizeof(struct field));
1453 /* Node initialization. */
1454 strcpy(st->name, name);
1455 for (i = 0; i < n_fields; i++) {
1456 struct field *dst = &st->fields[i];
1457 struct rte_swx_field_params *src = &fields[i];
1459 strcpy(dst->name, src->name);
1460 dst->n_bits = src->n_bits;
1461 dst->offset = st->n_bits;
1463 st->n_bits += src->n_bits;
1465 st->n_fields = n_fields;
1467 /* Node add to tailq. */
1468 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1474 struct_build(struct rte_swx_pipeline *p)
1478 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1479 struct thread *t = &p->threads[i];
1481 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1482 CHECK(t->structs, ENOMEM);
1489 struct_build_free(struct rte_swx_pipeline *p)
1493 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1494 struct thread *t = &p->threads[i];
1502 struct_free(struct rte_swx_pipeline *p)
1504 struct_build_free(p);
1508 struct struct_type *elem;
1510 elem = TAILQ_FIRST(&p->struct_types);
1514 TAILQ_REMOVE(&p->struct_types, elem, node);
1523 static struct port_in_type *
1524 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1526 struct port_in_type *elem;
1531 TAILQ_FOREACH(elem, &p->port_in_types, node)
1532 if (strcmp(elem->name, name) == 0)
1539 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1541 struct rte_swx_port_in_ops *ops)
1543 struct port_in_type *elem;
1546 CHECK_NAME(name, EINVAL);
1548 CHECK(ops->create, EINVAL);
1549 CHECK(ops->free, EINVAL);
1550 CHECK(ops->pkt_rx, EINVAL);
1551 CHECK(ops->stats_read, EINVAL);
1553 CHECK(!port_in_type_find(p, name), EEXIST);
1555 /* Node allocation. */
1556 elem = calloc(1, sizeof(struct port_in_type));
1557 CHECK(elem, ENOMEM);
1559 /* Node initialization. */
1560 strcpy(elem->name, name);
1561 memcpy(&elem->ops, ops, sizeof(*ops));
1563 /* Node add to tailq. */
1564 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1569 static struct port_in *
1570 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1572 struct port_in *port;
1574 TAILQ_FOREACH(port, &p->ports_in, node)
1575 if (port->id == port_id)
1582 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1584 const char *port_type_name,
1587 struct port_in_type *type = NULL;
1588 struct port_in *port = NULL;
1593 CHECK(!port_in_find(p, port_id), EINVAL);
1595 CHECK_NAME(port_type_name, EINVAL);
1596 type = port_in_type_find(p, port_type_name);
1597 CHECK(type, EINVAL);
1599 obj = type->ops.create(args);
1602 /* Node allocation. */
1603 port = calloc(1, sizeof(struct port_in));
1604 CHECK(port, ENOMEM);
1606 /* Node initialization. */
1611 /* Node add to tailq. */
1612 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1613 if (p->n_ports_in < port_id + 1)
1614 p->n_ports_in = port_id + 1;
1620 port_in_build(struct rte_swx_pipeline *p)
1622 struct port_in *port;
1625 CHECK(p->n_ports_in, EINVAL);
1626 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1628 for (i = 0; i < p->n_ports_in; i++)
1629 CHECK(port_in_find(p, i), EINVAL);
1631 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1632 CHECK(p->in, ENOMEM);
1634 TAILQ_FOREACH(port, &p->ports_in, node) {
1635 struct port_in_runtime *in = &p->in[port->id];
1637 in->pkt_rx = port->type->ops.pkt_rx;
1638 in->obj = port->obj;
1645 port_in_build_free(struct rte_swx_pipeline *p)
1652 port_in_free(struct rte_swx_pipeline *p)
1654 port_in_build_free(p);
1658 struct port_in *port;
1660 port = TAILQ_FIRST(&p->ports_in);
1664 TAILQ_REMOVE(&p->ports_in, port, node);
1665 port->type->ops.free(port->obj);
1669 /* Input port types. */
1671 struct port_in_type *elem;
1673 elem = TAILQ_FIRST(&p->port_in_types);
1677 TAILQ_REMOVE(&p->port_in_types, elem, node);
1685 static struct port_out_type *
1686 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1688 struct port_out_type *elem;
1693 TAILQ_FOREACH(elem, &p->port_out_types, node)
1694 if (!strcmp(elem->name, name))
1701 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1703 struct rte_swx_port_out_ops *ops)
1705 struct port_out_type *elem;
1708 CHECK_NAME(name, EINVAL);
1710 CHECK(ops->create, EINVAL);
1711 CHECK(ops->free, EINVAL);
1712 CHECK(ops->pkt_tx, EINVAL);
1713 CHECK(ops->stats_read, EINVAL);
1715 CHECK(!port_out_type_find(p, name), EEXIST);
1717 /* Node allocation. */
1718 elem = calloc(1, sizeof(struct port_out_type));
1719 CHECK(elem, ENOMEM);
1721 /* Node initialization. */
1722 strcpy(elem->name, name);
1723 memcpy(&elem->ops, ops, sizeof(*ops));
1725 /* Node add to tailq. */
1726 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1731 static struct port_out *
1732 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1734 struct port_out *port;
1736 TAILQ_FOREACH(port, &p->ports_out, node)
1737 if (port->id == port_id)
1744 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1746 const char *port_type_name,
1749 struct port_out_type *type = NULL;
1750 struct port_out *port = NULL;
1755 CHECK(!port_out_find(p, port_id), EINVAL);
1757 CHECK_NAME(port_type_name, EINVAL);
1758 type = port_out_type_find(p, port_type_name);
1759 CHECK(type, EINVAL);
1761 obj = type->ops.create(args);
1764 /* Node allocation. */
1765 port = calloc(1, sizeof(struct port_out));
1766 CHECK(port, ENOMEM);
1768 /* Node initialization. */
1773 /* Node add to tailq. */
1774 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1775 if (p->n_ports_out < port_id + 1)
1776 p->n_ports_out = port_id + 1;
1782 port_out_build(struct rte_swx_pipeline *p)
1784 struct port_out *port;
1787 CHECK(p->n_ports_out, EINVAL);
1789 for (i = 0; i < p->n_ports_out; i++)
1790 CHECK(port_out_find(p, i), EINVAL);
1792 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1793 CHECK(p->out, ENOMEM);
1795 TAILQ_FOREACH(port, &p->ports_out, node) {
1796 struct port_out_runtime *out = &p->out[port->id];
1798 out->pkt_tx = port->type->ops.pkt_tx;
1799 out->flush = port->type->ops.flush;
1800 out->obj = port->obj;
1807 port_out_build_free(struct rte_swx_pipeline *p)
1814 port_out_free(struct rte_swx_pipeline *p)
1816 port_out_build_free(p);
1820 struct port_out *port;
1822 port = TAILQ_FIRST(&p->ports_out);
1826 TAILQ_REMOVE(&p->ports_out, port, node);
1827 port->type->ops.free(port->obj);
1831 /* Output port types. */
1833 struct port_out_type *elem;
1835 elem = TAILQ_FIRST(&p->port_out_types);
1839 TAILQ_REMOVE(&p->port_out_types, elem, node);
1847 static struct extern_type *
1848 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1850 struct extern_type *elem;
1852 TAILQ_FOREACH(elem, &p->extern_types, node)
1853 if (strcmp(elem->name, name) == 0)
1859 static struct extern_type_member_func *
1860 extern_type_member_func_find(struct extern_type *type, const char *name)
1862 struct extern_type_member_func *elem;
1864 TAILQ_FOREACH(elem, &type->funcs, node)
1865 if (strcmp(elem->name, name) == 0)
1871 static struct extern_obj *
1872 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1874 struct extern_obj *elem;
1876 TAILQ_FOREACH(elem, &p->extern_objs, node)
1877 if (strcmp(elem->name, name) == 0)
1883 static struct extern_type_member_func *
1884 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1886 struct extern_obj **obj)
1888 struct extern_obj *object;
1889 struct extern_type_member_func *func;
1890 char *object_name, *func_name;
1892 if (name[0] != 'e' || name[1] != '.')
1895 object_name = strdup(&name[2]);
1899 func_name = strchr(object_name, '.');
1908 object = extern_obj_find(p, object_name);
1914 func = extern_type_member_func_find(object->type, func_name);
1927 static struct field *
1928 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1930 struct extern_obj **object)
1932 struct extern_obj *obj;
1934 char *obj_name, *field_name;
1936 if ((name[0] != 'e') || (name[1] != '.'))
1939 obj_name = strdup(&name[2]);
1943 field_name = strchr(obj_name, '.');
1952 obj = extern_obj_find(p, obj_name);
1958 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1972 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1974 const char *mailbox_struct_type_name,
1975 rte_swx_extern_type_constructor_t constructor,
1976 rte_swx_extern_type_destructor_t destructor)
1978 struct extern_type *elem;
1979 struct struct_type *mailbox_struct_type;
1983 CHECK_NAME(name, EINVAL);
1984 CHECK(!extern_type_find(p, name), EEXIST);
1986 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1987 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1988 CHECK(mailbox_struct_type, EINVAL);
1990 CHECK(constructor, EINVAL);
1991 CHECK(destructor, EINVAL);
1993 /* Node allocation. */
1994 elem = calloc(1, sizeof(struct extern_type));
1995 CHECK(elem, ENOMEM);
1997 /* Node initialization. */
1998 strcpy(elem->name, name);
1999 elem->mailbox_struct_type = mailbox_struct_type;
2000 elem->constructor = constructor;
2001 elem->destructor = destructor;
2002 TAILQ_INIT(&elem->funcs);
2004 /* Node add to tailq. */
2005 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
2011 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
2012 const char *extern_type_name,
2014 rte_swx_extern_type_member_func_t member_func)
2016 struct extern_type *type;
2017 struct extern_type_member_func *type_member;
2021 CHECK_NAME(extern_type_name, EINVAL);
2022 type = extern_type_find(p, extern_type_name);
2023 CHECK(type, EINVAL);
2024 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
2026 CHECK_NAME(name, EINVAL);
2027 CHECK(!extern_type_member_func_find(type, name), EEXIST);
2029 CHECK(member_func, EINVAL);
2031 /* Node allocation. */
2032 type_member = calloc(1, sizeof(struct extern_type_member_func));
2033 CHECK(type_member, ENOMEM);
2035 /* Node initialization. */
2036 strcpy(type_member->name, name);
2037 type_member->func = member_func;
2038 type_member->id = type->n_funcs;
2040 /* Node add to tailq. */
2041 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
2048 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
2049 const char *extern_type_name,
2053 struct extern_type *type;
2054 struct extern_obj *obj;
2059 CHECK_NAME(extern_type_name, EINVAL);
2060 type = extern_type_find(p, extern_type_name);
2061 CHECK(type, EINVAL);
2063 CHECK_NAME(name, EINVAL);
2064 CHECK(!extern_obj_find(p, name), EEXIST);
2066 /* Node allocation. */
2067 obj = calloc(1, sizeof(struct extern_obj));
2070 /* Object construction. */
2071 obj_handle = type->constructor(args);
2077 /* Node initialization. */
2078 strcpy(obj->name, name);
2080 obj->obj = obj_handle;
2081 obj->struct_id = p->n_structs;
2082 obj->id = p->n_extern_objs;
2084 /* Node add to tailq. */
2085 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
2093 extern_obj_build(struct rte_swx_pipeline *p)
2097 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2098 struct thread *t = &p->threads[i];
2099 struct extern_obj *obj;
2101 t->extern_objs = calloc(p->n_extern_objs,
2102 sizeof(struct extern_obj_runtime));
2103 CHECK(t->extern_objs, ENOMEM);
2105 TAILQ_FOREACH(obj, &p->extern_objs, node) {
2106 struct extern_obj_runtime *r =
2107 &t->extern_objs[obj->id];
2108 struct extern_type_member_func *func;
2109 uint32_t mailbox_size =
2110 obj->type->mailbox_struct_type->n_bits / 8;
2114 r->mailbox = calloc(1, mailbox_size);
2115 CHECK(r->mailbox, ENOMEM);
2117 TAILQ_FOREACH(func, &obj->type->funcs, node)
2118 r->funcs[func->id] = func->func;
2120 t->structs[obj->struct_id] = r->mailbox;
2128 extern_obj_build_free(struct rte_swx_pipeline *p)
2132 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2133 struct thread *t = &p->threads[i];
2136 if (!t->extern_objs)
2139 for (j = 0; j < p->n_extern_objs; j++) {
2140 struct extern_obj_runtime *r = &t->extern_objs[j];
2145 free(t->extern_objs);
2146 t->extern_objs = NULL;
2151 extern_obj_free(struct rte_swx_pipeline *p)
2153 extern_obj_build_free(p);
2155 /* Extern objects. */
2157 struct extern_obj *elem;
2159 elem = TAILQ_FIRST(&p->extern_objs);
2163 TAILQ_REMOVE(&p->extern_objs, elem, node);
2165 elem->type->destructor(elem->obj);
2171 struct extern_type *elem;
2173 elem = TAILQ_FIRST(&p->extern_types);
2177 TAILQ_REMOVE(&p->extern_types, elem, node);
2180 struct extern_type_member_func *func;
2182 func = TAILQ_FIRST(&elem->funcs);
2186 TAILQ_REMOVE(&elem->funcs, func, node);
2197 static struct extern_func *
2198 extern_func_find(struct rte_swx_pipeline *p, const char *name)
2200 struct extern_func *elem;
2202 TAILQ_FOREACH(elem, &p->extern_funcs, node)
2203 if (strcmp(elem->name, name) == 0)
2209 static struct extern_func *
2210 extern_func_parse(struct rte_swx_pipeline *p,
2213 if (name[0] != 'f' || name[1] != '.')
2216 return extern_func_find(p, &name[2]);
2219 static struct field *
2220 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
2222 struct extern_func **function)
2224 struct extern_func *func;
2226 char *func_name, *field_name;
2228 if ((name[0] != 'f') || (name[1] != '.'))
2231 func_name = strdup(&name[2]);
2235 field_name = strchr(func_name, '.');
2244 func = extern_func_find(p, func_name);
2250 f = struct_type_field_find(func->mailbox_struct_type, field_name);
2264 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
2266 const char *mailbox_struct_type_name,
2267 rte_swx_extern_func_t func)
2269 struct extern_func *f;
2270 struct struct_type *mailbox_struct_type;
2274 CHECK_NAME(name, EINVAL);
2275 CHECK(!extern_func_find(p, name), EEXIST);
2277 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2278 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2279 CHECK(mailbox_struct_type, EINVAL);
2281 CHECK(func, EINVAL);
2283 /* Node allocation. */
2284 f = calloc(1, sizeof(struct extern_func));
2285 CHECK(func, ENOMEM);
2287 /* Node initialization. */
2288 strcpy(f->name, name);
2289 f->mailbox_struct_type = mailbox_struct_type;
2291 f->struct_id = p->n_structs;
2292 f->id = p->n_extern_funcs;
2294 /* Node add to tailq. */
2295 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
2296 p->n_extern_funcs++;
2303 extern_func_build(struct rte_swx_pipeline *p)
2307 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2308 struct thread *t = &p->threads[i];
2309 struct extern_func *func;
2311 /* Memory allocation. */
2312 t->extern_funcs = calloc(p->n_extern_funcs,
2313 sizeof(struct extern_func_runtime));
2314 CHECK(t->extern_funcs, ENOMEM);
2316 /* Extern function. */
2317 TAILQ_FOREACH(func, &p->extern_funcs, node) {
2318 struct extern_func_runtime *r =
2319 &t->extern_funcs[func->id];
2320 uint32_t mailbox_size =
2321 func->mailbox_struct_type->n_bits / 8;
2323 r->func = func->func;
2325 r->mailbox = calloc(1, mailbox_size);
2326 CHECK(r->mailbox, ENOMEM);
2328 t->structs[func->struct_id] = r->mailbox;
2336 extern_func_build_free(struct rte_swx_pipeline *p)
2340 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2341 struct thread *t = &p->threads[i];
2344 if (!t->extern_funcs)
2347 for (j = 0; j < p->n_extern_funcs; j++) {
2348 struct extern_func_runtime *r = &t->extern_funcs[j];
2353 free(t->extern_funcs);
2354 t->extern_funcs = NULL;
2359 extern_func_free(struct rte_swx_pipeline *p)
2361 extern_func_build_free(p);
2364 struct extern_func *elem;
2366 elem = TAILQ_FIRST(&p->extern_funcs);
2370 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2378 static struct header *
2379 header_find(struct rte_swx_pipeline *p, const char *name)
2381 struct header *elem;
2383 TAILQ_FOREACH(elem, &p->headers, node)
2384 if (strcmp(elem->name, name) == 0)
2390 static struct header *
2391 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id)
2393 struct header *elem;
2395 TAILQ_FOREACH(elem, &p->headers, node)
2396 if (elem->struct_id == struct_id)
2402 static struct header *
2403 header_parse(struct rte_swx_pipeline *p,
2406 if (name[0] != 'h' || name[1] != '.')
2409 return header_find(p, &name[2]);
2412 static struct field *
2413 header_field_parse(struct rte_swx_pipeline *p,
2415 struct header **header)
2419 char *header_name, *field_name;
2421 if ((name[0] != 'h') || (name[1] != '.'))
2424 header_name = strdup(&name[2]);
2428 field_name = strchr(header_name, '.');
2437 h = header_find(p, header_name);
2443 f = struct_type_field_find(h->st, field_name);
2457 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2459 const char *struct_type_name)
2461 struct struct_type *st;
2463 size_t n_headers_max;
2466 CHECK_NAME(name, EINVAL);
2467 CHECK_NAME(struct_type_name, EINVAL);
2469 CHECK(!header_find(p, name), EEXIST);
2471 st = struct_type_find(p, struct_type_name);
2474 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2475 CHECK(p->n_headers < n_headers_max, ENOSPC);
2477 /* Node allocation. */
2478 h = calloc(1, sizeof(struct header));
2481 /* Node initialization. */
2482 strcpy(h->name, name);
2484 h->struct_id = p->n_structs;
2485 h->id = p->n_headers;
2487 /* Node add to tailq. */
2488 TAILQ_INSERT_TAIL(&p->headers, h, node);
2496 header_build(struct rte_swx_pipeline *p)
2499 uint32_t n_bytes = 0, i;
2501 TAILQ_FOREACH(h, &p->headers, node) {
2502 n_bytes += h->st->n_bits / 8;
2505 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2506 struct thread *t = &p->threads[i];
2507 uint32_t offset = 0;
2509 t->headers = calloc(p->n_headers,
2510 sizeof(struct header_runtime));
2511 CHECK(t->headers, ENOMEM);
2513 t->headers_out = calloc(p->n_headers,
2514 sizeof(struct header_out_runtime));
2515 CHECK(t->headers_out, ENOMEM);
2517 t->header_storage = calloc(1, n_bytes);
2518 CHECK(t->header_storage, ENOMEM);
2520 t->header_out_storage = calloc(1, n_bytes);
2521 CHECK(t->header_out_storage, ENOMEM);
2523 TAILQ_FOREACH(h, &p->headers, node) {
2524 uint8_t *header_storage;
2526 header_storage = &t->header_storage[offset];
2527 offset += h->st->n_bits / 8;
2529 t->headers[h->id].ptr0 = header_storage;
2530 t->structs[h->struct_id] = header_storage;
2538 header_build_free(struct rte_swx_pipeline *p)
2542 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2543 struct thread *t = &p->threads[i];
2545 free(t->headers_out);
2546 t->headers_out = NULL;
2551 free(t->header_out_storage);
2552 t->header_out_storage = NULL;
2554 free(t->header_storage);
2555 t->header_storage = NULL;
2560 header_free(struct rte_swx_pipeline *p)
2562 header_build_free(p);
2565 struct header *elem;
2567 elem = TAILQ_FIRST(&p->headers);
2571 TAILQ_REMOVE(&p->headers, elem, node);
2579 static struct field *
2580 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2582 if (!p->metadata_st)
2585 if (name[0] != 'm' || name[1] != '.')
2588 return struct_type_field_find(p->metadata_st, &name[2]);
2592 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2593 const char *struct_type_name)
2595 struct struct_type *st = NULL;
2599 CHECK_NAME(struct_type_name, EINVAL);
2600 st = struct_type_find(p, struct_type_name);
2602 CHECK(!p->metadata_st, EINVAL);
2604 p->metadata_st = st;
2605 p->metadata_struct_id = p->n_structs;
2613 metadata_build(struct rte_swx_pipeline *p)
2615 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2618 /* Thread-level initialization. */
2619 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2620 struct thread *t = &p->threads[i];
2623 metadata = calloc(1, n_bytes);
2624 CHECK(metadata, ENOMEM);
2626 t->metadata = metadata;
2627 t->structs[p->metadata_struct_id] = metadata;
2634 metadata_build_free(struct rte_swx_pipeline *p)
2638 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2639 struct thread *t = &p->threads[i];
2647 metadata_free(struct rte_swx_pipeline *p)
2649 metadata_build_free(p);
2656 instruction_is_tx(enum instruction_type type)
2669 instruction_is_jmp(struct instruction *instr)
2671 switch (instr->type) {
2673 case INSTR_JMP_VALID:
2674 case INSTR_JMP_INVALID:
2676 case INSTR_JMP_MISS:
2677 case INSTR_JMP_ACTION_HIT:
2678 case INSTR_JMP_ACTION_MISS:
2680 case INSTR_JMP_EQ_MH:
2681 case INSTR_JMP_EQ_HM:
2682 case INSTR_JMP_EQ_HH:
2683 case INSTR_JMP_EQ_I:
2685 case INSTR_JMP_NEQ_MH:
2686 case INSTR_JMP_NEQ_HM:
2687 case INSTR_JMP_NEQ_HH:
2688 case INSTR_JMP_NEQ_I:
2690 case INSTR_JMP_LT_MH:
2691 case INSTR_JMP_LT_HM:
2692 case INSTR_JMP_LT_HH:
2693 case INSTR_JMP_LT_MI:
2694 case INSTR_JMP_LT_HI:
2696 case INSTR_JMP_GT_MH:
2697 case INSTR_JMP_GT_HM:
2698 case INSTR_JMP_GT_HH:
2699 case INSTR_JMP_GT_MI:
2700 case INSTR_JMP_GT_HI:
2708 static struct field *
2709 action_field_parse(struct action *action, const char *name);
2711 static struct field *
2712 struct_field_parse(struct rte_swx_pipeline *p,
2713 struct action *action,
2715 uint32_t *struct_id)
2722 struct header *header;
2724 f = header_field_parse(p, name, &header);
2728 *struct_id = header->struct_id;
2734 f = metadata_field_parse(p, name);
2738 *struct_id = p->metadata_struct_id;
2747 f = action_field_parse(action, name);
2757 struct extern_obj *obj;
2759 f = extern_obj_mailbox_field_parse(p, name, &obj);
2763 *struct_id = obj->struct_id;
2769 struct extern_func *func;
2771 f = extern_func_mailbox_field_parse(p, name, &func);
2775 *struct_id = func->struct_id;
2785 pipeline_port_inc(struct rte_swx_pipeline *p)
2787 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2791 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2793 t->ip = p->instructions;
2797 thread_ip_set(struct thread *t, struct instruction *ip)
2803 thread_ip_action_call(struct rte_swx_pipeline *p,
2808 t->ip = p->action_instructions[action_id];
2812 thread_ip_inc(struct rte_swx_pipeline *p);
2815 thread_ip_inc(struct rte_swx_pipeline *p)
2817 struct thread *t = &p->threads[p->thread_id];
2823 thread_ip_inc_cond(struct thread *t, int cond)
2829 thread_yield(struct rte_swx_pipeline *p)
2831 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2835 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2837 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2844 instr_rx_translate(struct rte_swx_pipeline *p,
2845 struct action *action,
2848 struct instruction *instr,
2849 struct instruction_data *data __rte_unused)
2853 CHECK(!action, EINVAL);
2854 CHECK(n_tokens == 2, EINVAL);
2856 f = metadata_field_parse(p, tokens[1]);
2859 instr->type = INSTR_RX;
2860 instr->io.io.offset = f->offset / 8;
2861 instr->io.io.n_bits = f->n_bits;
2866 instr_rx_exec(struct rte_swx_pipeline *p);
2869 instr_rx_exec(struct rte_swx_pipeline *p)
2871 struct thread *t = &p->threads[p->thread_id];
2872 struct instruction *ip = t->ip;
2873 struct port_in_runtime *port = &p->in[p->port_id];
2874 struct rte_swx_pkt *pkt = &t->pkt;
2878 pkt_received = port->pkt_rx(port->obj, pkt);
2879 t->ptr = &pkt->pkt[pkt->offset];
2880 rte_prefetch0(t->ptr);
2882 TRACE("[Thread %2u] rx %s from port %u\n",
2884 pkt_received ? "1 pkt" : "0 pkts",
2888 t->valid_headers = 0;
2889 t->n_headers_out = 0;
2892 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2895 t->table_state = p->table_state;
2898 pipeline_port_inc(p);
2899 thread_ip_inc_cond(t, pkt_received);
2907 instr_tx_translate(struct rte_swx_pipeline *p,
2908 struct action *action __rte_unused,
2911 struct instruction *instr,
2912 struct instruction_data *data __rte_unused)
2914 char *port = tokens[1];
2918 CHECK(n_tokens == 2, EINVAL);
2920 f = metadata_field_parse(p, port);
2922 instr->type = INSTR_TX;
2923 instr->io.io.offset = f->offset / 8;
2924 instr->io.io.n_bits = f->n_bits;
2929 port_val = strtoul(port, &port, 0);
2930 CHECK(!port[0], EINVAL);
2932 instr->type = INSTR_TX_I;
2933 instr->io.io.val = port_val;
2938 instr_drop_translate(struct rte_swx_pipeline *p,
2939 struct action *action __rte_unused,
2940 char **tokens __rte_unused,
2942 struct instruction *instr,
2943 struct instruction_data *data __rte_unused)
2945 CHECK(n_tokens == 1, EINVAL);
2948 instr->type = INSTR_TX_I;
2949 instr->io.io.val = p->n_ports_out - 1;
2954 emit_handler(struct thread *t)
2956 struct header_out_runtime *h0 = &t->headers_out[0];
2957 struct header_out_runtime *h1 = &t->headers_out[1];
2958 uint32_t offset = 0, i;
2960 /* No header change or header decapsulation. */
2961 if ((t->n_headers_out == 1) &&
2962 (h0->ptr + h0->n_bytes == t->ptr)) {
2963 TRACE("Emit handler: no header change or header decap.\n");
2965 t->pkt.offset -= h0->n_bytes;
2966 t->pkt.length += h0->n_bytes;
2971 /* Header encapsulation (optionally, with prior header decasulation). */
2972 if ((t->n_headers_out == 2) &&
2973 (h1->ptr + h1->n_bytes == t->ptr) &&
2974 (h0->ptr == h0->ptr0)) {
2977 TRACE("Emit handler: header encapsulation.\n");
2979 offset = h0->n_bytes + h1->n_bytes;
2980 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2981 t->pkt.offset -= offset;
2982 t->pkt.length += offset;
2987 /* Header insertion. */
2990 /* Header extraction. */
2993 /* For any other case. */
2994 TRACE("Emit handler: complex case.\n");
2996 for (i = 0; i < t->n_headers_out; i++) {
2997 struct header_out_runtime *h = &t->headers_out[i];
2999 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
3000 offset += h->n_bytes;
3004 memcpy(t->ptr - offset, t->header_out_storage, offset);
3005 t->pkt.offset -= offset;
3006 t->pkt.length += offset;
3011 instr_tx_exec(struct rte_swx_pipeline *p);
3014 instr_tx_exec(struct rte_swx_pipeline *p)
3016 struct thread *t = &p->threads[p->thread_id];
3017 struct instruction *ip = t->ip;
3018 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
3019 struct port_out_runtime *port = &p->out[port_id];
3020 struct rte_swx_pkt *pkt = &t->pkt;
3022 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
3030 port->pkt_tx(port->obj, pkt);
3033 thread_ip_reset(p, t);
3038 instr_tx_i_exec(struct rte_swx_pipeline *p)
3040 struct thread *t = &p->threads[p->thread_id];
3041 struct instruction *ip = t->ip;
3042 uint64_t port_id = ip->io.io.val;
3043 struct port_out_runtime *port = &p->out[port_id];
3044 struct rte_swx_pkt *pkt = &t->pkt;
3046 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
3054 port->pkt_tx(port->obj, pkt);
3057 thread_ip_reset(p, t);
3065 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
3066 struct action *action,
3069 struct instruction *instr,
3070 struct instruction_data *data __rte_unused)
3074 CHECK(!action, EINVAL);
3075 CHECK(n_tokens == 2, EINVAL);
3077 h = header_parse(p, tokens[1]);
3080 instr->type = INSTR_HDR_EXTRACT;
3081 instr->io.hdr.header_id[0] = h->id;
3082 instr->io.hdr.struct_id[0] = h->struct_id;
3083 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3088 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
3091 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
3093 struct thread *t = &p->threads[p->thread_id];
3094 struct instruction *ip = t->ip;
3095 uint64_t valid_headers = t->valid_headers;
3096 uint8_t *ptr = t->ptr;
3097 uint32_t offset = t->pkt.offset;
3098 uint32_t length = t->pkt.length;
3101 for (i = 0; i < n_extract; i++) {
3102 uint32_t header_id = ip->io.hdr.header_id[i];
3103 uint32_t struct_id = ip->io.hdr.struct_id[i];
3104 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3106 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
3112 t->structs[struct_id] = ptr;
3113 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3122 t->valid_headers = valid_headers;
3125 t->pkt.offset = offset;
3126 t->pkt.length = length;
3131 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
3133 __instr_hdr_extract_exec(p, 1);
3140 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
3142 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3145 __instr_hdr_extract_exec(p, 2);
3152 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
3154 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3157 __instr_hdr_extract_exec(p, 3);
3164 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
3166 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3169 __instr_hdr_extract_exec(p, 4);
3176 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
3178 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3181 __instr_hdr_extract_exec(p, 5);
3188 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
3190 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3193 __instr_hdr_extract_exec(p, 6);
3200 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
3202 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3205 __instr_hdr_extract_exec(p, 7);
3212 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
3214 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3217 __instr_hdr_extract_exec(p, 8);
3227 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
3228 struct action *action __rte_unused,
3231 struct instruction *instr,
3232 struct instruction_data *data __rte_unused)
3236 CHECK(n_tokens == 2, EINVAL);
3238 h = header_parse(p, tokens[1]);
3241 instr->type = INSTR_HDR_EMIT;
3242 instr->io.hdr.header_id[0] = h->id;
3243 instr->io.hdr.struct_id[0] = h->struct_id;
3244 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3249 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
3252 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
3254 struct thread *t = &p->threads[p->thread_id];
3255 struct instruction *ip = t->ip;
3256 uint64_t valid_headers = t->valid_headers;
3257 uint32_t n_headers_out = t->n_headers_out;
3258 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
3259 uint8_t *ho_ptr = NULL;
3260 uint32_t ho_nbytes = 0, first = 1, i;
3262 for (i = 0; i < n_emit; i++) {
3263 uint32_t header_id = ip->io.hdr.header_id[i];
3264 uint32_t struct_id = ip->io.hdr.struct_id[i];
3265 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3267 struct header_runtime *hi = &t->headers[header_id];
3268 uint8_t *hi_ptr = t->structs[struct_id];
3270 if (!MASK64_BIT_GET(valid_headers, header_id))
3273 TRACE("[Thread %2u]: emit header %u\n",
3281 if (!t->n_headers_out) {
3282 ho = &t->headers_out[0];
3284 ho->ptr0 = hi->ptr0;
3288 ho_nbytes = n_bytes;
3295 ho_nbytes = ho->n_bytes;
3299 if (ho_ptr + ho_nbytes == hi_ptr) {
3300 ho_nbytes += n_bytes;
3302 ho->n_bytes = ho_nbytes;
3305 ho->ptr0 = hi->ptr0;
3309 ho_nbytes = n_bytes;
3315 ho->n_bytes = ho_nbytes;
3316 t->n_headers_out = n_headers_out;
3320 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
3322 __instr_hdr_emit_exec(p, 1);
3329 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
3331 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3334 __instr_hdr_emit_exec(p, 1);
3339 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
3341 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3344 __instr_hdr_emit_exec(p, 2);
3349 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
3351 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3354 __instr_hdr_emit_exec(p, 3);
3359 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
3361 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3364 __instr_hdr_emit_exec(p, 4);
3369 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
3371 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3374 __instr_hdr_emit_exec(p, 5);
3379 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
3381 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3384 __instr_hdr_emit_exec(p, 6);
3389 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
3391 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3394 __instr_hdr_emit_exec(p, 7);
3399 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
3401 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
3404 __instr_hdr_emit_exec(p, 8);
3412 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
3413 struct action *action __rte_unused,
3416 struct instruction *instr,
3417 struct instruction_data *data __rte_unused)
3421 CHECK(n_tokens == 2, EINVAL);
3423 h = header_parse(p, tokens[1]);
3426 instr->type = INSTR_HDR_VALIDATE;
3427 instr->valid.header_id = h->id;
3432 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
3434 struct thread *t = &p->threads[p->thread_id];
3435 struct instruction *ip = t->ip;
3436 uint32_t header_id = ip->valid.header_id;
3438 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
3441 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
3451 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
3452 struct action *action __rte_unused,
3455 struct instruction *instr,
3456 struct instruction_data *data __rte_unused)
3460 CHECK(n_tokens == 2, EINVAL);
3462 h = header_parse(p, tokens[1]);
3465 instr->type = INSTR_HDR_INVALIDATE;
3466 instr->valid.header_id = h->id;
3471 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3473 struct thread *t = &p->threads[p->thread_id];
3474 struct instruction *ip = t->ip;
3475 uint32_t header_id = ip->valid.header_id;
3477 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3480 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3489 static struct table *
3490 table_find(struct rte_swx_pipeline *p, const char *name);
3492 static struct selector *
3493 selector_find(struct rte_swx_pipeline *p, const char *name);
3496 instr_table_translate(struct rte_swx_pipeline *p,
3497 struct action *action,
3500 struct instruction *instr,
3501 struct instruction_data *data __rte_unused)
3506 CHECK(!action, EINVAL);
3507 CHECK(n_tokens == 2, EINVAL);
3509 t = table_find(p, tokens[1]);
3511 instr->type = INSTR_TABLE;
3512 instr->table.table_id = t->id;
3516 s = selector_find(p, tokens[1]);
3518 instr->type = INSTR_SELECTOR;
3519 instr->table.table_id = s->id;
3527 instr_table_exec(struct rte_swx_pipeline *p)
3529 struct thread *t = &p->threads[p->thread_id];
3530 struct instruction *ip = t->ip;
3531 uint32_t table_id = ip->table.table_id;
3532 struct rte_swx_table_state *ts = &t->table_state[table_id];
3533 struct table_runtime *table = &t->tables[table_id];
3534 struct table_statistics *stats = &p->table_stats[table_id];
3535 uint64_t action_id, n_pkts_hit, n_pkts_action;
3536 uint8_t *action_data;
3540 done = table->func(ts->obj,
3548 TRACE("[Thread %2u] table %u (not finalized)\n",
3556 action_id = hit ? action_id : ts->default_action_id;
3557 action_data = hit ? action_data : ts->default_action_data;
3558 n_pkts_hit = stats->n_pkts_hit[hit];
3559 n_pkts_action = stats->n_pkts_action[action_id];
3561 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3564 hit ? "hit" : "miss",
3565 (uint32_t)action_id);
3567 t->action_id = action_id;
3568 t->structs[0] = action_data;
3570 stats->n_pkts_hit[hit] = n_pkts_hit + 1;
3571 stats->n_pkts_action[action_id] = n_pkts_action + 1;
3574 thread_ip_action_call(p, t, action_id);
3578 instr_selector_exec(struct rte_swx_pipeline *p)
3580 struct thread *t = &p->threads[p->thread_id];
3581 struct instruction *ip = t->ip;
3582 uint32_t selector_id = ip->table.table_id;
3583 struct rte_swx_table_state *ts = &t->table_state[p->n_tables + selector_id];
3584 struct selector_runtime *selector = &t->selectors[selector_id];
3585 struct selector_statistics *stats = &p->selector_stats[selector_id];
3586 uint64_t n_pkts = stats->n_pkts;
3590 done = rte_swx_table_selector_select(ts->obj,
3592 selector->group_id_buffer,
3593 selector->selector_buffer,
3594 selector->member_id_buffer);
3597 TRACE("[Thread %2u] selector %u (not finalized)\n",
3606 TRACE("[Thread %2u] selector %u\n",
3610 stats->n_pkts = n_pkts + 1;
3620 instr_extern_translate(struct rte_swx_pipeline *p,
3621 struct action *action __rte_unused,
3624 struct instruction *instr,
3625 struct instruction_data *data __rte_unused)
3627 char *token = tokens[1];
3629 CHECK(n_tokens == 2, EINVAL);
3631 if (token[0] == 'e') {
3632 struct extern_obj *obj;
3633 struct extern_type_member_func *func;
3635 func = extern_obj_member_func_parse(p, token, &obj);
3636 CHECK(func, EINVAL);
3638 instr->type = INSTR_EXTERN_OBJ;
3639 instr->ext_obj.ext_obj_id = obj->id;
3640 instr->ext_obj.func_id = func->id;
3645 if (token[0] == 'f') {
3646 struct extern_func *func;
3648 func = extern_func_parse(p, token);
3649 CHECK(func, EINVAL);
3651 instr->type = INSTR_EXTERN_FUNC;
3652 instr->ext_func.ext_func_id = func->id;
3661 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3663 struct thread *t = &p->threads[p->thread_id];
3664 struct instruction *ip = t->ip;
3665 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3666 uint32_t func_id = ip->ext_obj.func_id;
3667 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3668 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3670 TRACE("[Thread %2u] extern obj %u member func %u\n",
3675 /* Extern object member function execute. */
3676 uint32_t done = func(obj->obj, obj->mailbox);
3679 thread_ip_inc_cond(t, done);
3680 thread_yield_cond(p, done ^ 1);
3684 instr_extern_func_exec(struct rte_swx_pipeline *p)
3686 struct thread *t = &p->threads[p->thread_id];
3687 struct instruction *ip = t->ip;
3688 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3689 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3690 rte_swx_extern_func_t func = ext_func->func;
3692 TRACE("[Thread %2u] extern func %u\n",
3696 /* Extern function execute. */
3697 uint32_t done = func(ext_func->mailbox);
3700 thread_ip_inc_cond(t, done);
3701 thread_yield_cond(p, done ^ 1);
3708 instr_mov_translate(struct rte_swx_pipeline *p,
3709 struct action *action,
3712 struct instruction *instr,
3713 struct instruction_data *data __rte_unused)
3715 char *dst = tokens[1], *src = tokens[2];
3716 struct field *fdst, *fsrc;
3718 uint32_t dst_struct_id = 0, src_struct_id = 0;
3720 CHECK(n_tokens == 3, EINVAL);
3722 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3723 CHECK(fdst, EINVAL);
3725 /* MOV, MOV_MH, MOV_HM or MOV_HH. */
3726 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3728 instr->type = INSTR_MOV;
3729 if (dst[0] != 'h' && src[0] == 'h')
3730 instr->type = INSTR_MOV_MH;
3731 if (dst[0] == 'h' && src[0] != 'h')
3732 instr->type = INSTR_MOV_HM;
3733 if (dst[0] == 'h' && src[0] == 'h')
3734 instr->type = INSTR_MOV_HH;
3736 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3737 instr->mov.dst.n_bits = fdst->n_bits;
3738 instr->mov.dst.offset = fdst->offset / 8;
3739 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3740 instr->mov.src.n_bits = fsrc->n_bits;
3741 instr->mov.src.offset = fsrc->offset / 8;
3746 src_val = strtoull(src, &src, 0);
3747 CHECK(!src[0], EINVAL);
3750 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3752 instr->type = INSTR_MOV_I;
3753 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3754 instr->mov.dst.n_bits = fdst->n_bits;
3755 instr->mov.dst.offset = fdst->offset / 8;
3756 instr->mov.src_val = src_val;
3761 instr_mov_exec(struct rte_swx_pipeline *p)
3763 struct thread *t = &p->threads[p->thread_id];
3764 struct instruction *ip = t->ip;
3766 TRACE("[Thread %2u] mov\n",
3776 instr_mov_mh_exec(struct rte_swx_pipeline *p)
3778 struct thread *t = &p->threads[p->thread_id];
3779 struct instruction *ip = t->ip;
3781 TRACE("[Thread %2u] mov (mh)\n",
3791 instr_mov_hm_exec(struct rte_swx_pipeline *p)
3793 struct thread *t = &p->threads[p->thread_id];
3794 struct instruction *ip = t->ip;
3796 TRACE("[Thread %2u] mov (hm)\n",
3806 instr_mov_hh_exec(struct rte_swx_pipeline *p)
3808 struct thread *t = &p->threads[p->thread_id];
3809 struct instruction *ip = t->ip;
3811 TRACE("[Thread %2u] mov (hh)\n",
3821 instr_mov_i_exec(struct rte_swx_pipeline *p)
3823 struct thread *t = &p->threads[p->thread_id];
3824 struct instruction *ip = t->ip;
3826 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3840 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3843 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3845 struct thread *t = &p->threads[p->thread_id];
3846 struct instruction *ip = t->ip;
3847 uint8_t *action_data = t->structs[0];
3848 uint64_t valid_headers = t->valid_headers;
3851 for (i = 0; i < n_dma; i++) {
3852 uint32_t header_id = ip->dma.dst.header_id[i];
3853 uint32_t struct_id = ip->dma.dst.struct_id[i];
3854 uint32_t offset = ip->dma.src.offset[i];
3855 uint32_t n_bytes = ip->dma.n_bytes[i];
3857 struct header_runtime *h = &t->headers[header_id];
3858 uint8_t *h_ptr0 = h->ptr0;
3859 uint8_t *h_ptr = t->structs[struct_id];
3861 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3863 void *src = &action_data[offset];
3865 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3868 memcpy(dst, src, n_bytes);
3869 t->structs[struct_id] = dst;
3870 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3873 t->valid_headers = valid_headers;
3877 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3879 __instr_dma_ht_exec(p, 1);
3886 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3888 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3891 __instr_dma_ht_exec(p, 2);
3898 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3900 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3903 __instr_dma_ht_exec(p, 3);
3910 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3912 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3915 __instr_dma_ht_exec(p, 4);
3922 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3924 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3927 __instr_dma_ht_exec(p, 5);
3934 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3936 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3939 __instr_dma_ht_exec(p, 6);
3946 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3948 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3951 __instr_dma_ht_exec(p, 7);
3958 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3960 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3963 __instr_dma_ht_exec(p, 8);
3973 instr_alu_add_translate(struct rte_swx_pipeline *p,
3974 struct action *action,
3977 struct instruction *instr,
3978 struct instruction_data *data __rte_unused)
3980 char *dst = tokens[1], *src = tokens[2];
3981 struct field *fdst, *fsrc;
3983 uint32_t dst_struct_id = 0, src_struct_id = 0;
3985 CHECK(n_tokens == 3, EINVAL);
3987 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3988 CHECK(fdst, EINVAL);
3990 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3991 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3993 instr->type = INSTR_ALU_ADD;
3994 if (dst[0] == 'h' && src[0] != 'h')
3995 instr->type = INSTR_ALU_ADD_HM;
3996 if (dst[0] != 'h' && src[0] == 'h')
3997 instr->type = INSTR_ALU_ADD_MH;
3998 if (dst[0] == 'h' && src[0] == 'h')
3999 instr->type = INSTR_ALU_ADD_HH;
4001 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4002 instr->alu.dst.n_bits = fdst->n_bits;
4003 instr->alu.dst.offset = fdst->offset / 8;
4004 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4005 instr->alu.src.n_bits = fsrc->n_bits;
4006 instr->alu.src.offset = fsrc->offset / 8;
4010 /* ADD_MI, ADD_HI. */
4011 src_val = strtoull(src, &src, 0);
4012 CHECK(!src[0], EINVAL);
4014 instr->type = INSTR_ALU_ADD_MI;
4016 instr->type = INSTR_ALU_ADD_HI;
4018 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4019 instr->alu.dst.n_bits = fdst->n_bits;
4020 instr->alu.dst.offset = fdst->offset / 8;
4021 instr->alu.src_val = src_val;
4026 instr_alu_sub_translate(struct rte_swx_pipeline *p,
4027 struct action *action,
4030 struct instruction *instr,
4031 struct instruction_data *data __rte_unused)
4033 char *dst = tokens[1], *src = tokens[2];
4034 struct field *fdst, *fsrc;
4036 uint32_t dst_struct_id = 0, src_struct_id = 0;
4038 CHECK(n_tokens == 3, EINVAL);
4040 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4041 CHECK(fdst, EINVAL);
4043 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
4044 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4046 instr->type = INSTR_ALU_SUB;
4047 if (dst[0] == 'h' && src[0] != 'h')
4048 instr->type = INSTR_ALU_SUB_HM;
4049 if (dst[0] != 'h' && src[0] == 'h')
4050 instr->type = INSTR_ALU_SUB_MH;
4051 if (dst[0] == 'h' && src[0] == 'h')
4052 instr->type = INSTR_ALU_SUB_HH;
4054 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4055 instr->alu.dst.n_bits = fdst->n_bits;
4056 instr->alu.dst.offset = fdst->offset / 8;
4057 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4058 instr->alu.src.n_bits = fsrc->n_bits;
4059 instr->alu.src.offset = fsrc->offset / 8;
4063 /* SUB_MI, SUB_HI. */
4064 src_val = strtoull(src, &src, 0);
4065 CHECK(!src[0], EINVAL);
4067 instr->type = INSTR_ALU_SUB_MI;
4069 instr->type = INSTR_ALU_SUB_HI;
4071 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4072 instr->alu.dst.n_bits = fdst->n_bits;
4073 instr->alu.dst.offset = fdst->offset / 8;
4074 instr->alu.src_val = src_val;
4079 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
4080 struct action *action __rte_unused,
4083 struct instruction *instr,
4084 struct instruction_data *data __rte_unused)
4086 char *dst = tokens[1], *src = tokens[2];
4087 struct header *hdst, *hsrc;
4088 struct field *fdst, *fsrc;
4090 CHECK(n_tokens == 3, EINVAL);
4092 fdst = header_field_parse(p, dst, &hdst);
4093 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4096 fsrc = header_field_parse(p, src, &hsrc);
4098 instr->type = INSTR_ALU_CKADD_FIELD;
4099 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4100 instr->alu.dst.n_bits = fdst->n_bits;
4101 instr->alu.dst.offset = fdst->offset / 8;
4102 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4103 instr->alu.src.n_bits = fsrc->n_bits;
4104 instr->alu.src.offset = fsrc->offset / 8;
4108 /* CKADD_STRUCT, CKADD_STRUCT20. */
4109 hsrc = header_parse(p, src);
4110 CHECK(hsrc, EINVAL);
4112 instr->type = INSTR_ALU_CKADD_STRUCT;
4113 if ((hsrc->st->n_bits / 8) == 20)
4114 instr->type = INSTR_ALU_CKADD_STRUCT20;
4116 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4117 instr->alu.dst.n_bits = fdst->n_bits;
4118 instr->alu.dst.offset = fdst->offset / 8;
4119 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4120 instr->alu.src.n_bits = hsrc->st->n_bits;
4121 instr->alu.src.offset = 0; /* Unused. */
4126 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
4127 struct action *action __rte_unused,
4130 struct instruction *instr,
4131 struct instruction_data *data __rte_unused)
4133 char *dst = tokens[1], *src = tokens[2];
4134 struct header *hdst, *hsrc;
4135 struct field *fdst, *fsrc;
4137 CHECK(n_tokens == 3, EINVAL);
4139 fdst = header_field_parse(p, dst, &hdst);
4140 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4142 fsrc = header_field_parse(p, src, &hsrc);
4143 CHECK(fsrc, EINVAL);
4145 instr->type = INSTR_ALU_CKSUB_FIELD;
4146 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4147 instr->alu.dst.n_bits = fdst->n_bits;
4148 instr->alu.dst.offset = fdst->offset / 8;
4149 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4150 instr->alu.src.n_bits = fsrc->n_bits;
4151 instr->alu.src.offset = fsrc->offset / 8;
4156 instr_alu_shl_translate(struct rte_swx_pipeline *p,
4157 struct action *action,
4160 struct instruction *instr,
4161 struct instruction_data *data __rte_unused)
4163 char *dst = tokens[1], *src = tokens[2];
4164 struct field *fdst, *fsrc;
4166 uint32_t dst_struct_id = 0, src_struct_id = 0;
4168 CHECK(n_tokens == 3, EINVAL);
4170 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4171 CHECK(fdst, EINVAL);
4173 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
4174 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4176 instr->type = INSTR_ALU_SHL;
4177 if (dst[0] == 'h' && src[0] != 'h')
4178 instr->type = INSTR_ALU_SHL_HM;
4179 if (dst[0] != 'h' && src[0] == 'h')
4180 instr->type = INSTR_ALU_SHL_MH;
4181 if (dst[0] == 'h' && src[0] == 'h')
4182 instr->type = INSTR_ALU_SHL_HH;
4184 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4185 instr->alu.dst.n_bits = fdst->n_bits;
4186 instr->alu.dst.offset = fdst->offset / 8;
4187 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4188 instr->alu.src.n_bits = fsrc->n_bits;
4189 instr->alu.src.offset = fsrc->offset / 8;
4193 /* SHL_MI, SHL_HI. */
4194 src_val = strtoull(src, &src, 0);
4195 CHECK(!src[0], EINVAL);
4197 instr->type = INSTR_ALU_SHL_MI;
4199 instr->type = INSTR_ALU_SHL_HI;
4201 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4202 instr->alu.dst.n_bits = fdst->n_bits;
4203 instr->alu.dst.offset = fdst->offset / 8;
4204 instr->alu.src_val = src_val;
4209 instr_alu_shr_translate(struct rte_swx_pipeline *p,
4210 struct action *action,
4213 struct instruction *instr,
4214 struct instruction_data *data __rte_unused)
4216 char *dst = tokens[1], *src = tokens[2];
4217 struct field *fdst, *fsrc;
4219 uint32_t dst_struct_id = 0, src_struct_id = 0;
4221 CHECK(n_tokens == 3, EINVAL);
4223 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4224 CHECK(fdst, EINVAL);
4226 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
4227 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4229 instr->type = INSTR_ALU_SHR;
4230 if (dst[0] == 'h' && src[0] != 'h')
4231 instr->type = INSTR_ALU_SHR_HM;
4232 if (dst[0] != 'h' && src[0] == 'h')
4233 instr->type = INSTR_ALU_SHR_MH;
4234 if (dst[0] == 'h' && src[0] == 'h')
4235 instr->type = INSTR_ALU_SHR_HH;
4237 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4238 instr->alu.dst.n_bits = fdst->n_bits;
4239 instr->alu.dst.offset = fdst->offset / 8;
4240 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4241 instr->alu.src.n_bits = fsrc->n_bits;
4242 instr->alu.src.offset = fsrc->offset / 8;
4246 /* SHR_MI, SHR_HI. */
4247 src_val = strtoull(src, &src, 0);
4248 CHECK(!src[0], EINVAL);
4250 instr->type = INSTR_ALU_SHR_MI;
4252 instr->type = INSTR_ALU_SHR_HI;
4254 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4255 instr->alu.dst.n_bits = fdst->n_bits;
4256 instr->alu.dst.offset = fdst->offset / 8;
4257 instr->alu.src_val = src_val;
4262 instr_alu_and_translate(struct rte_swx_pipeline *p,
4263 struct action *action,
4266 struct instruction *instr,
4267 struct instruction_data *data __rte_unused)
4269 char *dst = tokens[1], *src = tokens[2];
4270 struct field *fdst, *fsrc;
4272 uint32_t dst_struct_id = 0, src_struct_id = 0;
4274 CHECK(n_tokens == 3, EINVAL);
4276 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4277 CHECK(fdst, EINVAL);
4279 /* AND, AND_MH, AND_HM, AND_HH. */
4280 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4282 instr->type = INSTR_ALU_AND;
4283 if (dst[0] != 'h' && src[0] == 'h')
4284 instr->type = INSTR_ALU_AND_MH;
4285 if (dst[0] == 'h' && src[0] != 'h')
4286 instr->type = INSTR_ALU_AND_HM;
4287 if (dst[0] == 'h' && src[0] == 'h')
4288 instr->type = INSTR_ALU_AND_HH;
4290 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4291 instr->alu.dst.n_bits = fdst->n_bits;
4292 instr->alu.dst.offset = fdst->offset / 8;
4293 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4294 instr->alu.src.n_bits = fsrc->n_bits;
4295 instr->alu.src.offset = fsrc->offset / 8;
4300 src_val = strtoull(src, &src, 0);
4301 CHECK(!src[0], EINVAL);
4304 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4306 instr->type = INSTR_ALU_AND_I;
4307 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4308 instr->alu.dst.n_bits = fdst->n_bits;
4309 instr->alu.dst.offset = fdst->offset / 8;
4310 instr->alu.src_val = src_val;
4315 instr_alu_or_translate(struct rte_swx_pipeline *p,
4316 struct action *action,
4319 struct instruction *instr,
4320 struct instruction_data *data __rte_unused)
4322 char *dst = tokens[1], *src = tokens[2];
4323 struct field *fdst, *fsrc;
4325 uint32_t dst_struct_id = 0, src_struct_id = 0;
4327 CHECK(n_tokens == 3, EINVAL);
4329 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4330 CHECK(fdst, EINVAL);
4332 /* OR, OR_MH, OR_HM, OR_HH. */
4333 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4335 instr->type = INSTR_ALU_OR;
4336 if (dst[0] != 'h' && src[0] == 'h')
4337 instr->type = INSTR_ALU_OR_MH;
4338 if (dst[0] == 'h' && src[0] != 'h')
4339 instr->type = INSTR_ALU_OR_HM;
4340 if (dst[0] == 'h' && src[0] == 'h')
4341 instr->type = INSTR_ALU_OR_HH;
4343 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4344 instr->alu.dst.n_bits = fdst->n_bits;
4345 instr->alu.dst.offset = fdst->offset / 8;
4346 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4347 instr->alu.src.n_bits = fsrc->n_bits;
4348 instr->alu.src.offset = fsrc->offset / 8;
4353 src_val = strtoull(src, &src, 0);
4354 CHECK(!src[0], EINVAL);
4357 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4359 instr->type = INSTR_ALU_OR_I;
4360 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4361 instr->alu.dst.n_bits = fdst->n_bits;
4362 instr->alu.dst.offset = fdst->offset / 8;
4363 instr->alu.src_val = src_val;
4368 instr_alu_xor_translate(struct rte_swx_pipeline *p,
4369 struct action *action,
4372 struct instruction *instr,
4373 struct instruction_data *data __rte_unused)
4375 char *dst = tokens[1], *src = tokens[2];
4376 struct field *fdst, *fsrc;
4378 uint32_t dst_struct_id = 0, src_struct_id = 0;
4380 CHECK(n_tokens == 3, EINVAL);
4382 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4383 CHECK(fdst, EINVAL);
4385 /* XOR, XOR_MH, XOR_HM, XOR_HH. */
4386 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4388 instr->type = INSTR_ALU_XOR;
4389 if (dst[0] != 'h' && src[0] == 'h')
4390 instr->type = INSTR_ALU_XOR_MH;
4391 if (dst[0] == 'h' && src[0] != 'h')
4392 instr->type = INSTR_ALU_XOR_HM;
4393 if (dst[0] == 'h' && src[0] == 'h')
4394 instr->type = INSTR_ALU_XOR_HH;
4396 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4397 instr->alu.dst.n_bits = fdst->n_bits;
4398 instr->alu.dst.offset = fdst->offset / 8;
4399 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4400 instr->alu.src.n_bits = fsrc->n_bits;
4401 instr->alu.src.offset = fsrc->offset / 8;
4406 src_val = strtoull(src, &src, 0);
4407 CHECK(!src[0], EINVAL);
4410 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4412 instr->type = INSTR_ALU_XOR_I;
4413 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4414 instr->alu.dst.n_bits = fdst->n_bits;
4415 instr->alu.dst.offset = fdst->offset / 8;
4416 instr->alu.src_val = src_val;
4421 instr_alu_add_exec(struct rte_swx_pipeline *p)
4423 struct thread *t = &p->threads[p->thread_id];
4424 struct instruction *ip = t->ip;
4426 TRACE("[Thread %2u] add\n", p->thread_id);
4436 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
4438 struct thread *t = &p->threads[p->thread_id];
4439 struct instruction *ip = t->ip;
4441 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
4451 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
4453 struct thread *t = &p->threads[p->thread_id];
4454 struct instruction *ip = t->ip;
4456 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
4466 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
4468 struct thread *t = &p->threads[p->thread_id];
4469 struct instruction *ip = t->ip;
4471 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
4481 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
4483 struct thread *t = &p->threads[p->thread_id];
4484 struct instruction *ip = t->ip;
4486 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
4496 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
4498 struct thread *t = &p->threads[p->thread_id];
4499 struct instruction *ip = t->ip;
4501 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
4511 instr_alu_sub_exec(struct rte_swx_pipeline *p)
4513 struct thread *t = &p->threads[p->thread_id];
4514 struct instruction *ip = t->ip;
4516 TRACE("[Thread %2u] sub\n", p->thread_id);
4526 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4528 struct thread *t = &p->threads[p->thread_id];
4529 struct instruction *ip = t->ip;
4531 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4541 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4543 struct thread *t = &p->threads[p->thread_id];
4544 struct instruction *ip = t->ip;
4546 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4556 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4558 struct thread *t = &p->threads[p->thread_id];
4559 struct instruction *ip = t->ip;
4561 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4571 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4573 struct thread *t = &p->threads[p->thread_id];
4574 struct instruction *ip = t->ip;
4576 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4586 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4588 struct thread *t = &p->threads[p->thread_id];
4589 struct instruction *ip = t->ip;
4591 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4601 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4603 struct thread *t = &p->threads[p->thread_id];
4604 struct instruction *ip = t->ip;
4606 TRACE("[Thread %2u] shl\n", p->thread_id);
4616 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4618 struct thread *t = &p->threads[p->thread_id];
4619 struct instruction *ip = t->ip;
4621 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4631 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4633 struct thread *t = &p->threads[p->thread_id];
4634 struct instruction *ip = t->ip;
4636 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4646 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4648 struct thread *t = &p->threads[p->thread_id];
4649 struct instruction *ip = t->ip;
4651 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4661 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4663 struct thread *t = &p->threads[p->thread_id];
4664 struct instruction *ip = t->ip;
4666 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4676 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4678 struct thread *t = &p->threads[p->thread_id];
4679 struct instruction *ip = t->ip;
4681 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4691 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4693 struct thread *t = &p->threads[p->thread_id];
4694 struct instruction *ip = t->ip;
4696 TRACE("[Thread %2u] shr\n", p->thread_id);
4706 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4708 struct thread *t = &p->threads[p->thread_id];
4709 struct instruction *ip = t->ip;
4711 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4721 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4723 struct thread *t = &p->threads[p->thread_id];
4724 struct instruction *ip = t->ip;
4726 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4736 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4738 struct thread *t = &p->threads[p->thread_id];
4739 struct instruction *ip = t->ip;
4741 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4751 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4753 struct thread *t = &p->threads[p->thread_id];
4754 struct instruction *ip = t->ip;
4756 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4766 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4768 struct thread *t = &p->threads[p->thread_id];
4769 struct instruction *ip = t->ip;
4771 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4781 instr_alu_and_exec(struct rte_swx_pipeline *p)
4783 struct thread *t = &p->threads[p->thread_id];
4784 struct instruction *ip = t->ip;
4786 TRACE("[Thread %2u] and\n", p->thread_id);
4796 instr_alu_and_mh_exec(struct rte_swx_pipeline *p)
4798 struct thread *t = &p->threads[p->thread_id];
4799 struct instruction *ip = t->ip;
4801 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
4811 instr_alu_and_hm_exec(struct rte_swx_pipeline *p)
4813 struct thread *t = &p->threads[p->thread_id];
4814 struct instruction *ip = t->ip;
4816 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
4819 ALU_HM_FAST(t, ip, &);
4826 instr_alu_and_hh_exec(struct rte_swx_pipeline *p)
4828 struct thread *t = &p->threads[p->thread_id];
4829 struct instruction *ip = t->ip;
4831 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
4834 ALU_HH_FAST(t, ip, &);
4841 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4843 struct thread *t = &p->threads[p->thread_id];
4844 struct instruction *ip = t->ip;
4846 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4856 instr_alu_or_exec(struct rte_swx_pipeline *p)
4858 struct thread *t = &p->threads[p->thread_id];
4859 struct instruction *ip = t->ip;
4861 TRACE("[Thread %2u] or\n", p->thread_id);
4871 instr_alu_or_mh_exec(struct rte_swx_pipeline *p)
4873 struct thread *t = &p->threads[p->thread_id];
4874 struct instruction *ip = t->ip;
4876 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
4886 instr_alu_or_hm_exec(struct rte_swx_pipeline *p)
4888 struct thread *t = &p->threads[p->thread_id];
4889 struct instruction *ip = t->ip;
4891 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
4894 ALU_HM_FAST(t, ip, |);
4901 instr_alu_or_hh_exec(struct rte_swx_pipeline *p)
4903 struct thread *t = &p->threads[p->thread_id];
4904 struct instruction *ip = t->ip;
4906 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
4909 ALU_HH_FAST(t, ip, |);
4916 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4918 struct thread *t = &p->threads[p->thread_id];
4919 struct instruction *ip = t->ip;
4921 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4931 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4933 struct thread *t = &p->threads[p->thread_id];
4934 struct instruction *ip = t->ip;
4936 TRACE("[Thread %2u] xor\n", p->thread_id);
4946 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p)
4948 struct thread *t = &p->threads[p->thread_id];
4949 struct instruction *ip = t->ip;
4951 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
4961 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p)
4963 struct thread *t = &p->threads[p->thread_id];
4964 struct instruction *ip = t->ip;
4966 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
4969 ALU_HM_FAST(t, ip, ^);
4976 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p)
4978 struct thread *t = &p->threads[p->thread_id];
4979 struct instruction *ip = t->ip;
4981 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
4984 ALU_HH_FAST(t, ip, ^);
4991 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4993 struct thread *t = &p->threads[p->thread_id];
4994 struct instruction *ip = t->ip;
4996 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
5006 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
5008 struct thread *t = &p->threads[p->thread_id];
5009 struct instruction *ip = t->ip;
5010 uint8_t *dst_struct, *src_struct;
5011 uint16_t *dst16_ptr, dst;
5012 uint64_t *src64_ptr, src64, src64_mask, src;
5015 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
5018 dst_struct = t->structs[ip->alu.dst.struct_id];
5019 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5022 src_struct = t->structs[ip->alu.src.struct_id];
5023 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5025 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5026 src = src64 & src64_mask;
5031 /* The first input (r) is a 16-bit number. The second and the third
5032 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
5033 * three numbers (output r) is a 34-bit number.
5035 r += (src >> 32) + (src & 0xFFFFFFFF);
5037 /* The first input is a 16-bit number. The second input is an 18-bit
5038 * number. In the worst case scenario, the sum of the two numbers is a
5041 r = (r & 0xFFFF) + (r >> 16);
5043 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5044 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
5046 r = (r & 0xFFFF) + (r >> 16);
5048 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5049 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5050 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
5051 * therefore the output r is always a 16-bit number.
5053 r = (r & 0xFFFF) + (r >> 16);
5058 *dst16_ptr = (uint16_t)r;
5065 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
5067 struct thread *t = &p->threads[p->thread_id];
5068 struct instruction *ip = t->ip;
5069 uint8_t *dst_struct, *src_struct;
5070 uint16_t *dst16_ptr, dst;
5071 uint64_t *src64_ptr, src64, src64_mask, src;
5074 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
5077 dst_struct = t->structs[ip->alu.dst.struct_id];
5078 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5081 src_struct = t->structs[ip->alu.src.struct_id];
5082 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5084 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5085 src = src64 & src64_mask;
5090 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
5091 * the following sequence of operations in 2's complement arithmetic:
5092 * a '- b = (a - b) % 0xFFFF.
5094 * In order to prevent an underflow for the below subtraction, in which
5095 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
5096 * minuend), we first add a multiple of the 0xFFFF modulus to the
5097 * minuend. The number we add to the minuend needs to be a 34-bit number
5098 * or higher, so for readability reasons we picked the 36-bit multiple.
5099 * We are effectively turning the 16-bit minuend into a 36-bit number:
5100 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
5102 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
5104 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
5105 * result (the output r) is a 36-bit number.
5107 r -= (src >> 32) + (src & 0xFFFFFFFF);
5109 /* The first input is a 16-bit number. The second input is a 20-bit
5110 * number. Their sum is a 21-bit number.
5112 r = (r & 0xFFFF) + (r >> 16);
5114 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5115 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
5117 r = (r & 0xFFFF) + (r >> 16);
5119 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5120 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5121 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5122 * generated, therefore the output r is always a 16-bit number.
5124 r = (r & 0xFFFF) + (r >> 16);
5129 *dst16_ptr = (uint16_t)r;
5136 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
5138 struct thread *t = &p->threads[p->thread_id];
5139 struct instruction *ip = t->ip;
5140 uint8_t *dst_struct, *src_struct;
5141 uint16_t *dst16_ptr;
5142 uint32_t *src32_ptr;
5145 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
5148 dst_struct = t->structs[ip->alu.dst.struct_id];
5149 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5151 src_struct = t->structs[ip->alu.src.struct_id];
5152 src32_ptr = (uint32_t *)&src_struct[0];
5154 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
5155 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
5156 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
5157 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
5158 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
5160 /* The first input is a 16-bit number. The second input is a 19-bit
5161 * number. Their sum is a 20-bit number.
5163 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5165 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5166 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
5168 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5170 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5171 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5172 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
5173 * generated, therefore the output r is always a 16-bit number.
5175 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5178 r0 = r0 ? r0 : 0xFFFF;
5180 *dst16_ptr = (uint16_t)r0;
5187 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
5189 struct thread *t = &p->threads[p->thread_id];
5190 struct instruction *ip = t->ip;
5191 uint8_t *dst_struct, *src_struct;
5192 uint16_t *dst16_ptr;
5193 uint32_t *src32_ptr;
5197 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
5200 dst_struct = t->structs[ip->alu.dst.struct_id];
5201 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5203 src_struct = t->structs[ip->alu.src.struct_id];
5204 src32_ptr = (uint32_t *)&src_struct[0];
5206 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
5207 * Therefore, in the worst case scenario, a 35-bit number is added to a
5208 * 16-bit number (the input r), so the output r is 36-bit number.
5210 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
5213 /* The first input is a 16-bit number. The second input is a 20-bit
5214 * number. Their sum is a 21-bit number.
5216 r = (r & 0xFFFF) + (r >> 16);
5218 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5219 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
5221 r = (r & 0xFFFF) + (r >> 16);
5223 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5224 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5225 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5226 * generated, therefore the output r is always a 16-bit number.
5228 r = (r & 0xFFFF) + (r >> 16);
5233 *dst16_ptr = (uint16_t)r;
5242 static struct regarray *
5243 regarray_find(struct rte_swx_pipeline *p, const char *name);
5246 instr_regprefetch_translate(struct rte_swx_pipeline *p,
5247 struct action *action,
5250 struct instruction *instr,
5251 struct instruction_data *data __rte_unused)
5253 char *regarray = tokens[1], *idx = tokens[2];
5256 uint32_t idx_struct_id, idx_val;
5258 CHECK(n_tokens == 3, EINVAL);
5260 r = regarray_find(p, regarray);
5263 /* REGPREFETCH_RH, REGPREFETCH_RM. */
5264 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5266 instr->type = INSTR_REGPREFETCH_RM;
5268 instr->type = INSTR_REGPREFETCH_RH;
5270 instr->regarray.regarray_id = r->id;
5271 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5272 instr->regarray.idx.n_bits = fidx->n_bits;
5273 instr->regarray.idx.offset = fidx->offset / 8;
5274 instr->regarray.dstsrc_val = 0; /* Unused. */
5278 /* REGPREFETCH_RI. */
5279 idx_val = strtoul(idx, &idx, 0);
5280 CHECK(!idx[0], EINVAL);
5282 instr->type = INSTR_REGPREFETCH_RI;
5283 instr->regarray.regarray_id = r->id;
5284 instr->regarray.idx_val = idx_val;
5285 instr->regarray.dstsrc_val = 0; /* Unused. */
5290 instr_regrd_translate(struct rte_swx_pipeline *p,
5291 struct action *action,
5294 struct instruction *instr,
5295 struct instruction_data *data __rte_unused)
5297 char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
5299 struct field *fdst, *fidx;
5300 uint32_t dst_struct_id, idx_struct_id, idx_val;
5302 CHECK(n_tokens == 4, EINVAL);
5304 r = regarray_find(p, regarray);
5307 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
5308 CHECK(fdst, EINVAL);
5310 /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
5311 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5313 instr->type = INSTR_REGRD_MRM;
5314 if (dst[0] == 'h' && idx[0] != 'h')
5315 instr->type = INSTR_REGRD_HRM;
5316 if (dst[0] != 'h' && idx[0] == 'h')
5317 instr->type = INSTR_REGRD_MRH;
5318 if (dst[0] == 'h' && idx[0] == 'h')
5319 instr->type = INSTR_REGRD_HRH;
5321 instr->regarray.regarray_id = r->id;
5322 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5323 instr->regarray.idx.n_bits = fidx->n_bits;
5324 instr->regarray.idx.offset = fidx->offset / 8;
5325 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5326 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5327 instr->regarray.dstsrc.offset = fdst->offset / 8;
5331 /* REGRD_MRI, REGRD_HRI. */
5332 idx_val = strtoul(idx, &idx, 0);
5333 CHECK(!idx[0], EINVAL);
5335 instr->type = INSTR_REGRD_MRI;
5337 instr->type = INSTR_REGRD_HRI;
5339 instr->regarray.regarray_id = r->id;
5340 instr->regarray.idx_val = idx_val;
5341 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5342 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5343 instr->regarray.dstsrc.offset = fdst->offset / 8;
5348 instr_regwr_translate(struct rte_swx_pipeline *p,
5349 struct action *action,
5352 struct instruction *instr,
5353 struct instruction_data *data __rte_unused)
5355 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5357 struct field *fidx, *fsrc;
5359 uint32_t idx_struct_id, idx_val, src_struct_id;
5361 CHECK(n_tokens == 4, EINVAL);
5363 r = regarray_find(p, regarray);
5366 /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
5367 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5368 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5370 instr->type = INSTR_REGWR_RMM;
5371 if (idx[0] == 'h' && src[0] != 'h')
5372 instr->type = INSTR_REGWR_RHM;
5373 if (idx[0] != 'h' && src[0] == 'h')
5374 instr->type = INSTR_REGWR_RMH;
5375 if (idx[0] == 'h' && src[0] == 'h')
5376 instr->type = INSTR_REGWR_RHH;
5378 instr->regarray.regarray_id = r->id;
5379 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5380 instr->regarray.idx.n_bits = fidx->n_bits;
5381 instr->regarray.idx.offset = fidx->offset / 8;
5382 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5383 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5384 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5388 /* REGWR_RHI, REGWR_RMI. */
5389 if (fidx && !fsrc) {
5390 src_val = strtoull(src, &src, 0);
5391 CHECK(!src[0], EINVAL);
5393 instr->type = INSTR_REGWR_RMI;
5395 instr->type = INSTR_REGWR_RHI;
5397 instr->regarray.regarray_id = r->id;
5398 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5399 instr->regarray.idx.n_bits = fidx->n_bits;
5400 instr->regarray.idx.offset = fidx->offset / 8;
5401 instr->regarray.dstsrc_val = src_val;
5405 /* REGWR_RIH, REGWR_RIM. */
5406 if (!fidx && fsrc) {
5407 idx_val = strtoul(idx, &idx, 0);
5408 CHECK(!idx[0], EINVAL);
5410 instr->type = INSTR_REGWR_RIM;
5412 instr->type = INSTR_REGWR_RIH;
5414 instr->regarray.regarray_id = r->id;
5415 instr->regarray.idx_val = idx_val;
5416 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5417 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5418 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5423 src_val = strtoull(src, &src, 0);
5424 CHECK(!src[0], EINVAL);
5426 idx_val = strtoul(idx, &idx, 0);
5427 CHECK(!idx[0], EINVAL);
5429 instr->type = INSTR_REGWR_RII;
5430 instr->regarray.idx_val = idx_val;
5431 instr->regarray.dstsrc_val = src_val;
5437 instr_regadd_translate(struct rte_swx_pipeline *p,
5438 struct action *action,
5441 struct instruction *instr,
5442 struct instruction_data *data __rte_unused)
5444 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5446 struct field *fidx, *fsrc;
5448 uint32_t idx_struct_id, idx_val, src_struct_id;
5450 CHECK(n_tokens == 4, EINVAL);
5452 r = regarray_find(p, regarray);
5455 /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
5456 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5457 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5459 instr->type = INSTR_REGADD_RMM;
5460 if (idx[0] == 'h' && src[0] != 'h')
5461 instr->type = INSTR_REGADD_RHM;
5462 if (idx[0] != 'h' && src[0] == 'h')
5463 instr->type = INSTR_REGADD_RMH;
5464 if (idx[0] == 'h' && src[0] == 'h')
5465 instr->type = INSTR_REGADD_RHH;
5467 instr->regarray.regarray_id = r->id;
5468 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5469 instr->regarray.idx.n_bits = fidx->n_bits;
5470 instr->regarray.idx.offset = fidx->offset / 8;
5471 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5472 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5473 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5477 /* REGADD_RHI, REGADD_RMI. */
5478 if (fidx && !fsrc) {
5479 src_val = strtoull(src, &src, 0);
5480 CHECK(!src[0], EINVAL);
5482 instr->type = INSTR_REGADD_RMI;
5484 instr->type = INSTR_REGADD_RHI;
5486 instr->regarray.regarray_id = r->id;
5487 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5488 instr->regarray.idx.n_bits = fidx->n_bits;
5489 instr->regarray.idx.offset = fidx->offset / 8;
5490 instr->regarray.dstsrc_val = src_val;
5494 /* REGADD_RIH, REGADD_RIM. */
5495 if (!fidx && fsrc) {
5496 idx_val = strtoul(idx, &idx, 0);
5497 CHECK(!idx[0], EINVAL);
5499 instr->type = INSTR_REGADD_RIM;
5501 instr->type = INSTR_REGADD_RIH;
5503 instr->regarray.regarray_id = r->id;
5504 instr->regarray.idx_val = idx_val;
5505 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5506 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5507 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5512 src_val = strtoull(src, &src, 0);
5513 CHECK(!src[0], EINVAL);
5515 idx_val = strtoul(idx, &idx, 0);
5516 CHECK(!idx[0], EINVAL);
5518 instr->type = INSTR_REGADD_RII;
5519 instr->regarray.idx_val = idx_val;
5520 instr->regarray.dstsrc_val = src_val;
5524 static inline uint64_t *
5525 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
5527 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5531 static inline uint64_t
5532 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5534 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5536 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5537 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5538 uint64_t idx64 = *idx64_ptr;
5539 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
5540 uint64_t idx = idx64 & idx64_mask & r->size_mask;
5545 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5547 static inline uint64_t
5548 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5550 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5552 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5553 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5554 uint64_t idx64 = *idx64_ptr;
5555 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
5562 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
5566 static inline uint64_t
5567 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
5569 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5571 uint64_t idx = ip->regarray.idx_val & r->size_mask;
5576 static inline uint64_t
5577 instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
5579 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5580 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5581 uint64_t src64 = *src64_ptr;
5582 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5583 uint64_t src = src64 & src64_mask;
5588 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5590 static inline uint64_t
5591 instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
5593 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5594 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5595 uint64_t src64 = *src64_ptr;
5596 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
5603 #define instr_regarray_src_nbo instr_regarray_src_hbo
5608 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5610 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5611 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5612 uint64_t dst64 = *dst64_ptr;
5613 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5615 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5619 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5622 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5624 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5625 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5626 uint64_t dst64 = *dst64_ptr;
5627 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5629 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
5630 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5635 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
5640 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
5642 struct thread *t = &p->threads[p->thread_id];
5643 struct instruction *ip = t->ip;
5644 uint64_t *regarray, idx;
5646 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
5649 regarray = instr_regarray_regarray(p, ip);
5650 idx = instr_regarray_idx_nbo(p, t, ip);
5651 rte_prefetch0(®array[idx]);
5658 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
5660 struct thread *t = &p->threads[p->thread_id];
5661 struct instruction *ip = t->ip;
5662 uint64_t *regarray, idx;
5664 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
5667 regarray = instr_regarray_regarray(p, ip);
5668 idx = instr_regarray_idx_hbo(p, t, ip);
5669 rte_prefetch0(®array[idx]);
5676 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
5678 struct thread *t = &p->threads[p->thread_id];
5679 struct instruction *ip = t->ip;
5680 uint64_t *regarray, idx;
5682 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
5685 regarray = instr_regarray_regarray(p, ip);
5686 idx = instr_regarray_idx_imm(p, ip);
5687 rte_prefetch0(®array[idx]);
5694 instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
5696 struct thread *t = &p->threads[p->thread_id];
5697 struct instruction *ip = t->ip;
5698 uint64_t *regarray, idx;
5700 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
5703 regarray = instr_regarray_regarray(p, ip);
5704 idx = instr_regarray_idx_nbo(p, t, ip);
5705 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5712 instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
5714 struct thread *t = &p->threads[p->thread_id];
5715 struct instruction *ip = t->ip;
5716 uint64_t *regarray, idx;
5718 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
5721 regarray = instr_regarray_regarray(p, ip);
5722 idx = instr_regarray_idx_hbo(p, t, ip);
5723 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5730 instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
5732 struct thread *t = &p->threads[p->thread_id];
5733 struct instruction *ip = t->ip;
5734 uint64_t *regarray, idx;
5736 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
5739 regarray = instr_regarray_regarray(p, ip);
5740 idx = instr_regarray_idx_nbo(p, t, ip);
5741 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5748 instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
5750 struct thread *t = &p->threads[p->thread_id];
5751 struct instruction *ip = t->ip;
5752 uint64_t *regarray, idx;
5755 regarray = instr_regarray_regarray(p, ip);
5756 idx = instr_regarray_idx_hbo(p, t, ip);
5757 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5764 instr_regrd_hri_exec(struct rte_swx_pipeline *p)
5766 struct thread *t = &p->threads[p->thread_id];
5767 struct instruction *ip = t->ip;
5768 uint64_t *regarray, idx;
5770 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
5773 regarray = instr_regarray_regarray(p, ip);
5774 idx = instr_regarray_idx_imm(p, ip);
5775 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5782 instr_regrd_mri_exec(struct rte_swx_pipeline *p)
5784 struct thread *t = &p->threads[p->thread_id];
5785 struct instruction *ip = t->ip;
5786 uint64_t *regarray, idx;
5788 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
5791 regarray = instr_regarray_regarray(p, ip);
5792 idx = instr_regarray_idx_imm(p, ip);
5793 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5800 instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
5802 struct thread *t = &p->threads[p->thread_id];
5803 struct instruction *ip = t->ip;
5804 uint64_t *regarray, idx, src;
5806 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
5809 regarray = instr_regarray_regarray(p, ip);
5810 idx = instr_regarray_idx_nbo(p, t, ip);
5811 src = instr_regarray_src_nbo(t, ip);
5812 regarray[idx] = src;
5819 instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
5821 struct thread *t = &p->threads[p->thread_id];
5822 struct instruction *ip = t->ip;
5823 uint64_t *regarray, idx, src;
5825 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
5828 regarray = instr_regarray_regarray(p, ip);
5829 idx = instr_regarray_idx_nbo(p, t, ip);
5830 src = instr_regarray_src_hbo(t, ip);
5831 regarray[idx] = src;
5838 instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
5840 struct thread *t = &p->threads[p->thread_id];
5841 struct instruction *ip = t->ip;
5842 uint64_t *regarray, idx, src;
5844 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
5847 regarray = instr_regarray_regarray(p, ip);
5848 idx = instr_regarray_idx_hbo(p, t, ip);
5849 src = instr_regarray_src_nbo(t, ip);
5850 regarray[idx] = src;
5857 instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
5859 struct thread *t = &p->threads[p->thread_id];
5860 struct instruction *ip = t->ip;
5861 uint64_t *regarray, idx, src;
5863 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
5866 regarray = instr_regarray_regarray(p, ip);
5867 idx = instr_regarray_idx_hbo(p, t, ip);
5868 src = instr_regarray_src_hbo(t, ip);
5869 regarray[idx] = src;
5876 instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
5878 struct thread *t = &p->threads[p->thread_id];
5879 struct instruction *ip = t->ip;
5880 uint64_t *regarray, idx, src;
5882 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
5885 regarray = instr_regarray_regarray(p, ip);
5886 idx = instr_regarray_idx_nbo(p, t, ip);
5887 src = ip->regarray.dstsrc_val;
5888 regarray[idx] = src;
5895 instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
5897 struct thread *t = &p->threads[p->thread_id];
5898 struct instruction *ip = t->ip;
5899 uint64_t *regarray, idx, src;
5901 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
5904 regarray = instr_regarray_regarray(p, ip);
5905 idx = instr_regarray_idx_hbo(p, t, ip);
5906 src = ip->regarray.dstsrc_val;
5907 regarray[idx] = src;
5914 instr_regwr_rih_exec(struct rte_swx_pipeline *p)
5916 struct thread *t = &p->threads[p->thread_id];
5917 struct instruction *ip = t->ip;
5918 uint64_t *regarray, idx, src;
5920 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
5923 regarray = instr_regarray_regarray(p, ip);
5924 idx = instr_regarray_idx_imm(p, ip);
5925 src = instr_regarray_src_nbo(t, ip);
5926 regarray[idx] = src;
5933 instr_regwr_rim_exec(struct rte_swx_pipeline *p)
5935 struct thread *t = &p->threads[p->thread_id];
5936 struct instruction *ip = t->ip;
5937 uint64_t *regarray, idx, src;
5939 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
5942 regarray = instr_regarray_regarray(p, ip);
5943 idx = instr_regarray_idx_imm(p, ip);
5944 src = instr_regarray_src_hbo(t, ip);
5945 regarray[idx] = src;
5952 instr_regwr_rii_exec(struct rte_swx_pipeline *p)
5954 struct thread *t = &p->threads[p->thread_id];
5955 struct instruction *ip = t->ip;
5956 uint64_t *regarray, idx, src;
5958 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
5961 regarray = instr_regarray_regarray(p, ip);
5962 idx = instr_regarray_idx_imm(p, ip);
5963 src = ip->regarray.dstsrc_val;
5964 regarray[idx] = src;
5971 instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
5973 struct thread *t = &p->threads[p->thread_id];
5974 struct instruction *ip = t->ip;
5975 uint64_t *regarray, idx, src;
5977 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
5980 regarray = instr_regarray_regarray(p, ip);
5981 idx = instr_regarray_idx_nbo(p, t, ip);
5982 src = instr_regarray_src_nbo(t, ip);
5983 regarray[idx] += src;
5990 instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
5992 struct thread *t = &p->threads[p->thread_id];
5993 struct instruction *ip = t->ip;
5994 uint64_t *regarray, idx, src;
5996 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
5999 regarray = instr_regarray_regarray(p, ip);
6000 idx = instr_regarray_idx_nbo(p, t, ip);
6001 src = instr_regarray_src_hbo(t, ip);
6002 regarray[idx] += src;
6009 instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
6011 struct thread *t = &p->threads[p->thread_id];
6012 struct instruction *ip = t->ip;
6013 uint64_t *regarray, idx, src;
6015 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
6018 regarray = instr_regarray_regarray(p, ip);
6019 idx = instr_regarray_idx_hbo(p, t, ip);
6020 src = instr_regarray_src_nbo(t, ip);
6021 regarray[idx] += src;
6028 instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
6030 struct thread *t = &p->threads[p->thread_id];
6031 struct instruction *ip = t->ip;
6032 uint64_t *regarray, idx, src;
6034 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
6037 regarray = instr_regarray_regarray(p, ip);
6038 idx = instr_regarray_idx_hbo(p, t, ip);
6039 src = instr_regarray_src_hbo(t, ip);
6040 regarray[idx] += src;
6047 instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
6049 struct thread *t = &p->threads[p->thread_id];
6050 struct instruction *ip = t->ip;
6051 uint64_t *regarray, idx, src;
6053 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
6056 regarray = instr_regarray_regarray(p, ip);
6057 idx = instr_regarray_idx_nbo(p, t, ip);
6058 src = ip->regarray.dstsrc_val;
6059 regarray[idx] += src;
6066 instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
6068 struct thread *t = &p->threads[p->thread_id];
6069 struct instruction *ip = t->ip;
6070 uint64_t *regarray, idx, src;
6072 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
6075 regarray = instr_regarray_regarray(p, ip);
6076 idx = instr_regarray_idx_hbo(p, t, ip);
6077 src = ip->regarray.dstsrc_val;
6078 regarray[idx] += src;
6085 instr_regadd_rih_exec(struct rte_swx_pipeline *p)
6087 struct thread *t = &p->threads[p->thread_id];
6088 struct instruction *ip = t->ip;
6089 uint64_t *regarray, idx, src;
6091 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
6094 regarray = instr_regarray_regarray(p, ip);
6095 idx = instr_regarray_idx_imm(p, ip);
6096 src = instr_regarray_src_nbo(t, ip);
6097 regarray[idx] += src;
6104 instr_regadd_rim_exec(struct rte_swx_pipeline *p)
6106 struct thread *t = &p->threads[p->thread_id];
6107 struct instruction *ip = t->ip;
6108 uint64_t *regarray, idx, src;
6110 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
6113 regarray = instr_regarray_regarray(p, ip);
6114 idx = instr_regarray_idx_imm(p, ip);
6115 src = instr_regarray_src_hbo(t, ip);
6116 regarray[idx] += src;
6123 instr_regadd_rii_exec(struct rte_swx_pipeline *p)
6125 struct thread *t = &p->threads[p->thread_id];
6126 struct instruction *ip = t->ip;
6127 uint64_t *regarray, idx, src;
6129 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
6132 regarray = instr_regarray_regarray(p, ip);
6133 idx = instr_regarray_idx_imm(p, ip);
6134 src = ip->regarray.dstsrc_val;
6135 regarray[idx] += src;
6144 static struct metarray *
6145 metarray_find(struct rte_swx_pipeline *p, const char *name);
6148 instr_metprefetch_translate(struct rte_swx_pipeline *p,
6149 struct action *action,
6152 struct instruction *instr,
6153 struct instruction_data *data __rte_unused)
6155 char *metarray = tokens[1], *idx = tokens[2];
6158 uint32_t idx_struct_id, idx_val;
6160 CHECK(n_tokens == 3, EINVAL);
6162 m = metarray_find(p, metarray);
6165 /* METPREFETCH_H, METPREFETCH_M. */
6166 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6168 instr->type = INSTR_METPREFETCH_M;
6170 instr->type = INSTR_METPREFETCH_H;
6172 instr->meter.metarray_id = m->id;
6173 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6174 instr->meter.idx.n_bits = fidx->n_bits;
6175 instr->meter.idx.offset = fidx->offset / 8;
6179 /* METPREFETCH_I. */
6180 idx_val = strtoul(idx, &idx, 0);
6181 CHECK(!idx[0], EINVAL);
6183 instr->type = INSTR_METPREFETCH_I;
6184 instr->meter.metarray_id = m->id;
6185 instr->meter.idx_val = idx_val;
6190 instr_meter_translate(struct rte_swx_pipeline *p,
6191 struct action *action,
6194 struct instruction *instr,
6195 struct instruction_data *data __rte_unused)
6197 char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
6198 char *color_in = tokens[4], *color_out = tokens[5];
6200 struct field *fidx, *flength, *fcin, *fcout;
6201 uint32_t idx_struct_id, length_struct_id;
6202 uint32_t color_in_struct_id, color_out_struct_id;
6204 CHECK(n_tokens == 6, EINVAL);
6206 m = metarray_find(p, metarray);
6209 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6211 flength = struct_field_parse(p, action, length, &length_struct_id);
6212 CHECK(flength, EINVAL);
6214 fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
6216 fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
6217 CHECK(fcout, EINVAL);
6219 /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
6221 instr->type = INSTR_METER_MMM;
6222 if (idx[0] == 'h' && length[0] == 'h')
6223 instr->type = INSTR_METER_HHM;
6224 if (idx[0] == 'h' && length[0] != 'h')
6225 instr->type = INSTR_METER_HMM;
6226 if (idx[0] != 'h' && length[0] == 'h')
6227 instr->type = INSTR_METER_MHM;
6229 instr->meter.metarray_id = m->id;
6231 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6232 instr->meter.idx.n_bits = fidx->n_bits;
6233 instr->meter.idx.offset = fidx->offset / 8;
6235 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6236 instr->meter.length.n_bits = flength->n_bits;
6237 instr->meter.length.offset = flength->offset / 8;
6239 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6240 instr->meter.color_in.n_bits = fcin->n_bits;
6241 instr->meter.color_in.offset = fcin->offset / 8;
6243 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6244 instr->meter.color_out.n_bits = fcout->n_bits;
6245 instr->meter.color_out.offset = fcout->offset / 8;
6250 /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
6251 if (fidx && !fcin) {
6252 uint32_t color_in_val = strtoul(color_in, &color_in, 0);
6253 CHECK(!color_in[0], EINVAL);
6255 instr->type = INSTR_METER_MMI;
6256 if (idx[0] == 'h' && length[0] == 'h')
6257 instr->type = INSTR_METER_HHI;
6258 if (idx[0] == 'h' && length[0] != 'h')
6259 instr->type = INSTR_METER_HMI;
6260 if (idx[0] != 'h' && length[0] == 'h')
6261 instr->type = INSTR_METER_MHI;
6263 instr->meter.metarray_id = m->id;
6265 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6266 instr->meter.idx.n_bits = fidx->n_bits;
6267 instr->meter.idx.offset = fidx->offset / 8;
6269 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6270 instr->meter.length.n_bits = flength->n_bits;
6271 instr->meter.length.offset = flength->offset / 8;
6273 instr->meter.color_in_val = color_in_val;
6275 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6276 instr->meter.color_out.n_bits = fcout->n_bits;
6277 instr->meter.color_out.offset = fcout->offset / 8;
6282 /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
6283 if (!fidx && fcin) {
6286 idx_val = strtoul(idx, &idx, 0);
6287 CHECK(!idx[0], EINVAL);
6289 instr->type = INSTR_METER_IMM;
6290 if (length[0] == 'h')
6291 instr->type = INSTR_METER_IHM;
6293 instr->meter.metarray_id = m->id;
6295 instr->meter.idx_val = idx_val;
6297 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6298 instr->meter.length.n_bits = flength->n_bits;
6299 instr->meter.length.offset = flength->offset / 8;
6301 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6302 instr->meter.color_in.n_bits = fcin->n_bits;
6303 instr->meter.color_in.offset = fcin->offset / 8;
6305 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6306 instr->meter.color_out.n_bits = fcout->n_bits;
6307 instr->meter.color_out.offset = fcout->offset / 8;
6312 /* index = I, length = HMEFT, color_in = I, color_out = MEF. */
6313 if (!fidx && !fcin) {
6314 uint32_t idx_val, color_in_val;
6316 idx_val = strtoul(idx, &idx, 0);
6317 CHECK(!idx[0], EINVAL);
6319 color_in_val = strtoul(color_in, &color_in, 0);
6320 CHECK(!color_in[0], EINVAL);
6322 instr->type = INSTR_METER_IMI;
6323 if (length[0] == 'h')
6324 instr->type = INSTR_METER_IHI;
6326 instr->meter.metarray_id = m->id;
6328 instr->meter.idx_val = idx_val;
6330 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6331 instr->meter.length.n_bits = flength->n_bits;
6332 instr->meter.length.offset = flength->offset / 8;
6334 instr->meter.color_in_val = color_in_val;
6336 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6337 instr->meter.color_out.n_bits = fcout->n_bits;
6338 instr->meter.color_out.offset = fcout->offset / 8;
6346 static inline struct meter *
6347 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6349 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6351 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6352 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6353 uint64_t idx64 = *idx64_ptr;
6354 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
6355 uint64_t idx = idx64 & idx64_mask & r->size_mask;
6357 return &r->metarray[idx];
6360 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6362 static inline struct meter *
6363 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6365 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6367 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6368 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6369 uint64_t idx64 = *idx64_ptr;
6370 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
6372 return &r->metarray[idx];
6377 #define instr_meter_idx_nbo instr_meter_idx_hbo
6381 static inline struct meter *
6382 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
6384 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6386 uint64_t idx = ip->meter.idx_val & r->size_mask;
6388 return &r->metarray[idx];
6391 static inline uint32_t
6392 instr_meter_length_hbo(struct thread *t, struct instruction *ip)
6394 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6395 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6396 uint64_t src64 = *src64_ptr;
6397 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
6398 uint64_t src = src64 & src64_mask;
6400 return (uint32_t)src;
6403 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6405 static inline uint32_t
6406 instr_meter_length_nbo(struct thread *t, struct instruction *ip)
6408 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6409 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6410 uint64_t src64 = *src64_ptr;
6411 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
6413 return (uint32_t)src;
6418 #define instr_meter_length_nbo instr_meter_length_hbo
6422 static inline enum rte_color
6423 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
6425 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
6426 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
6427 uint64_t src64 = *src64_ptr;
6428 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
6429 uint64_t src = src64 & src64_mask;
6431 return (enum rte_color)src;
6435 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
6437 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
6438 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
6439 uint64_t dst64 = *dst64_ptr;
6440 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
6442 uint64_t src = (uint64_t)color_out;
6444 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
6448 instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
6450 struct thread *t = &p->threads[p->thread_id];
6451 struct instruction *ip = t->ip;
6454 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
6457 m = instr_meter_idx_nbo(p, t, ip);
6465 instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
6467 struct thread *t = &p->threads[p->thread_id];
6468 struct instruction *ip = t->ip;
6471 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
6474 m = instr_meter_idx_hbo(p, t, ip);
6482 instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
6484 struct thread *t = &p->threads[p->thread_id];
6485 struct instruction *ip = t->ip;
6488 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
6491 m = instr_meter_idx_imm(p, ip);
6499 instr_meter_hhm_exec(struct rte_swx_pipeline *p)
6501 struct thread *t = &p->threads[p->thread_id];
6502 struct instruction *ip = t->ip;
6504 uint64_t time, n_pkts, n_bytes;
6506 enum rte_color color_in, color_out;
6508 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
6511 m = instr_meter_idx_nbo(p, t, ip);
6512 rte_prefetch0(m->n_pkts);
6513 time = rte_get_tsc_cycles();
6514 length = instr_meter_length_nbo(t, ip);
6515 color_in = instr_meter_color_in_hbo(t, ip);
6517 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6518 &m->profile->profile,
6523 color_out &= m->color_mask;
6525 n_pkts = m->n_pkts[color_out];
6526 n_bytes = m->n_bytes[color_out];
6528 instr_meter_color_out_hbo_set(t, ip, color_out);
6530 m->n_pkts[color_out] = n_pkts + 1;
6531 m->n_bytes[color_out] = n_bytes + length;
6538 instr_meter_hhi_exec(struct rte_swx_pipeline *p)
6540 struct thread *t = &p->threads[p->thread_id];
6541 struct instruction *ip = t->ip;
6543 uint64_t time, n_pkts, n_bytes;
6545 enum rte_color color_in, color_out;
6547 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
6550 m = instr_meter_idx_nbo(p, t, ip);
6551 rte_prefetch0(m->n_pkts);
6552 time = rte_get_tsc_cycles();
6553 length = instr_meter_length_nbo(t, ip);
6554 color_in = (enum rte_color)ip->meter.color_in_val;
6556 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6557 &m->profile->profile,
6562 color_out &= m->color_mask;
6564 n_pkts = m->n_pkts[color_out];
6565 n_bytes = m->n_bytes[color_out];
6567 instr_meter_color_out_hbo_set(t, ip, color_out);
6569 m->n_pkts[color_out] = n_pkts + 1;
6570 m->n_bytes[color_out] = n_bytes + length;
6577 instr_meter_hmm_exec(struct rte_swx_pipeline *p)
6579 struct thread *t = &p->threads[p->thread_id];
6580 struct instruction *ip = t->ip;
6582 uint64_t time, n_pkts, n_bytes;
6584 enum rte_color color_in, color_out;
6586 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
6589 m = instr_meter_idx_nbo(p, t, ip);
6590 rte_prefetch0(m->n_pkts);
6591 time = rte_get_tsc_cycles();
6592 length = instr_meter_length_hbo(t, ip);
6593 color_in = instr_meter_color_in_hbo(t, ip);
6595 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6596 &m->profile->profile,
6601 color_out &= m->color_mask;
6603 n_pkts = m->n_pkts[color_out];
6604 n_bytes = m->n_bytes[color_out];
6606 instr_meter_color_out_hbo_set(t, ip, color_out);
6608 m->n_pkts[color_out] = n_pkts + 1;
6609 m->n_bytes[color_out] = n_bytes + length;
6615 instr_meter_hmi_exec(struct rte_swx_pipeline *p)
6617 struct thread *t = &p->threads[p->thread_id];
6618 struct instruction *ip = t->ip;
6620 uint64_t time, n_pkts, n_bytes;
6622 enum rte_color color_in, color_out;
6624 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
6627 m = instr_meter_idx_nbo(p, t, ip);
6628 rte_prefetch0(m->n_pkts);
6629 time = rte_get_tsc_cycles();
6630 length = instr_meter_length_hbo(t, ip);
6631 color_in = (enum rte_color)ip->meter.color_in_val;
6633 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6634 &m->profile->profile,
6639 color_out &= m->color_mask;
6641 n_pkts = m->n_pkts[color_out];
6642 n_bytes = m->n_bytes[color_out];
6644 instr_meter_color_out_hbo_set(t, ip, color_out);
6646 m->n_pkts[color_out] = n_pkts + 1;
6647 m->n_bytes[color_out] = n_bytes + length;
6654 instr_meter_mhm_exec(struct rte_swx_pipeline *p)
6656 struct thread *t = &p->threads[p->thread_id];
6657 struct instruction *ip = t->ip;
6659 uint64_t time, n_pkts, n_bytes;
6661 enum rte_color color_in, color_out;
6663 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
6666 m = instr_meter_idx_hbo(p, t, ip);
6667 rte_prefetch0(m->n_pkts);
6668 time = rte_get_tsc_cycles();
6669 length = instr_meter_length_nbo(t, ip);
6670 color_in = instr_meter_color_in_hbo(t, ip);
6672 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6673 &m->profile->profile,
6678 color_out &= m->color_mask;
6680 n_pkts = m->n_pkts[color_out];
6681 n_bytes = m->n_bytes[color_out];
6683 instr_meter_color_out_hbo_set(t, ip, color_out);
6685 m->n_pkts[color_out] = n_pkts + 1;
6686 m->n_bytes[color_out] = n_bytes + length;
6693 instr_meter_mhi_exec(struct rte_swx_pipeline *p)
6695 struct thread *t = &p->threads[p->thread_id];
6696 struct instruction *ip = t->ip;
6698 uint64_t time, n_pkts, n_bytes;
6700 enum rte_color color_in, color_out;
6702 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
6705 m = instr_meter_idx_hbo(p, t, ip);
6706 rte_prefetch0(m->n_pkts);
6707 time = rte_get_tsc_cycles();
6708 length = instr_meter_length_nbo(t, ip);
6709 color_in = (enum rte_color)ip->meter.color_in_val;
6711 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6712 &m->profile->profile,
6717 color_out &= m->color_mask;
6719 n_pkts = m->n_pkts[color_out];
6720 n_bytes = m->n_bytes[color_out];
6722 instr_meter_color_out_hbo_set(t, ip, color_out);
6724 m->n_pkts[color_out] = n_pkts + 1;
6725 m->n_bytes[color_out] = n_bytes + length;
6732 instr_meter_mmm_exec(struct rte_swx_pipeline *p)
6734 struct thread *t = &p->threads[p->thread_id];
6735 struct instruction *ip = t->ip;
6737 uint64_t time, n_pkts, n_bytes;
6739 enum rte_color color_in, color_out;
6741 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
6744 m = instr_meter_idx_hbo(p, t, ip);
6745 rte_prefetch0(m->n_pkts);
6746 time = rte_get_tsc_cycles();
6747 length = instr_meter_length_hbo(t, ip);
6748 color_in = instr_meter_color_in_hbo(t, ip);
6750 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6751 &m->profile->profile,
6756 color_out &= m->color_mask;
6758 n_pkts = m->n_pkts[color_out];
6759 n_bytes = m->n_bytes[color_out];
6761 instr_meter_color_out_hbo_set(t, ip, color_out);
6763 m->n_pkts[color_out] = n_pkts + 1;
6764 m->n_bytes[color_out] = n_bytes + length;
6771 instr_meter_mmi_exec(struct rte_swx_pipeline *p)
6773 struct thread *t = &p->threads[p->thread_id];
6774 struct instruction *ip = t->ip;
6776 uint64_t time, n_pkts, n_bytes;
6778 enum rte_color color_in, color_out;
6780 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
6783 m = instr_meter_idx_hbo(p, t, ip);
6784 rte_prefetch0(m->n_pkts);
6785 time = rte_get_tsc_cycles();
6786 length = instr_meter_length_hbo(t, ip);
6787 color_in = (enum rte_color)ip->meter.color_in_val;
6789 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6790 &m->profile->profile,
6795 color_out &= m->color_mask;
6797 n_pkts = m->n_pkts[color_out];
6798 n_bytes = m->n_bytes[color_out];
6800 instr_meter_color_out_hbo_set(t, ip, color_out);
6802 m->n_pkts[color_out] = n_pkts + 1;
6803 m->n_bytes[color_out] = n_bytes + length;
6810 instr_meter_ihm_exec(struct rte_swx_pipeline *p)
6812 struct thread *t = &p->threads[p->thread_id];
6813 struct instruction *ip = t->ip;
6815 uint64_t time, n_pkts, n_bytes;
6817 enum rte_color color_in, color_out;
6819 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
6822 m = instr_meter_idx_imm(p, ip);
6823 rte_prefetch0(m->n_pkts);
6824 time = rte_get_tsc_cycles();
6825 length = instr_meter_length_nbo(t, ip);
6826 color_in = instr_meter_color_in_hbo(t, ip);
6828 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6829 &m->profile->profile,
6834 color_out &= m->color_mask;
6836 n_pkts = m->n_pkts[color_out];
6837 n_bytes = m->n_bytes[color_out];
6839 instr_meter_color_out_hbo_set(t, ip, color_out);
6841 m->n_pkts[color_out] = n_pkts + 1;
6842 m->n_bytes[color_out] = n_bytes + length;
6849 instr_meter_ihi_exec(struct rte_swx_pipeline *p)
6851 struct thread *t = &p->threads[p->thread_id];
6852 struct instruction *ip = t->ip;
6854 uint64_t time, n_pkts, n_bytes;
6856 enum rte_color color_in, color_out;
6858 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
6861 m = instr_meter_idx_imm(p, ip);
6862 rte_prefetch0(m->n_pkts);
6863 time = rte_get_tsc_cycles();
6864 length = instr_meter_length_nbo(t, ip);
6865 color_in = (enum rte_color)ip->meter.color_in_val;
6867 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6868 &m->profile->profile,
6873 color_out &= m->color_mask;
6875 n_pkts = m->n_pkts[color_out];
6876 n_bytes = m->n_bytes[color_out];
6878 instr_meter_color_out_hbo_set(t, ip, color_out);
6880 m->n_pkts[color_out] = n_pkts + 1;
6881 m->n_bytes[color_out] = n_bytes + length;
6888 instr_meter_imm_exec(struct rte_swx_pipeline *p)
6890 struct thread *t = &p->threads[p->thread_id];
6891 struct instruction *ip = t->ip;
6893 uint64_t time, n_pkts, n_bytes;
6895 enum rte_color color_in, color_out;
6897 TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
6900 m = instr_meter_idx_imm(p, ip);
6901 rte_prefetch0(m->n_pkts);
6902 time = rte_get_tsc_cycles();
6903 length = instr_meter_length_hbo(t, ip);
6904 color_in = instr_meter_color_in_hbo(t, ip);
6906 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6907 &m->profile->profile,
6912 color_out &= m->color_mask;
6914 n_pkts = m->n_pkts[color_out];
6915 n_bytes = m->n_bytes[color_out];
6917 instr_meter_color_out_hbo_set(t, ip, color_out);
6919 m->n_pkts[color_out] = n_pkts + 1;
6920 m->n_bytes[color_out] = n_bytes + length;
6926 instr_meter_imi_exec(struct rte_swx_pipeline *p)
6928 struct thread *t = &p->threads[p->thread_id];
6929 struct instruction *ip = t->ip;
6931 uint64_t time, n_pkts, n_bytes;
6933 enum rte_color color_in, color_out;
6935 TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
6938 m = instr_meter_idx_imm(p, ip);
6939 rte_prefetch0(m->n_pkts);
6940 time = rte_get_tsc_cycles();
6941 length = instr_meter_length_hbo(t, ip);
6942 color_in = (enum rte_color)ip->meter.color_in_val;
6944 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6945 &m->profile->profile,
6950 color_out &= m->color_mask;
6952 n_pkts = m->n_pkts[color_out];
6953 n_bytes = m->n_bytes[color_out];
6955 instr_meter_color_out_hbo_set(t, ip, color_out);
6957 m->n_pkts[color_out] = n_pkts + 1;
6958 m->n_bytes[color_out] = n_bytes + length;
6967 static struct action *
6968 action_find(struct rte_swx_pipeline *p, const char *name);
6971 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
6972 struct action *action __rte_unused,
6975 struct instruction *instr,
6976 struct instruction_data *data)
6978 CHECK(n_tokens == 2, EINVAL);
6980 strcpy(data->jmp_label, tokens[1]);
6982 instr->type = INSTR_JMP;
6983 instr->jmp.ip = NULL; /* Resolved later. */
6988 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
6989 struct action *action __rte_unused,
6992 struct instruction *instr,
6993 struct instruction_data *data)
6997 CHECK(n_tokens == 3, EINVAL);
6999 strcpy(data->jmp_label, tokens[1]);
7001 h = header_parse(p, tokens[2]);
7004 instr->type = INSTR_JMP_VALID;
7005 instr->jmp.ip = NULL; /* Resolved later. */
7006 instr->jmp.header_id = h->id;
7011 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
7012 struct action *action __rte_unused,
7015 struct instruction *instr,
7016 struct instruction_data *data)
7020 CHECK(n_tokens == 3, EINVAL);
7022 strcpy(data->jmp_label, tokens[1]);
7024 h = header_parse(p, tokens[2]);
7027 instr->type = INSTR_JMP_INVALID;
7028 instr->jmp.ip = NULL; /* Resolved later. */
7029 instr->jmp.header_id = h->id;
7034 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
7035 struct action *action,
7038 struct instruction *instr,
7039 struct instruction_data *data)
7041 CHECK(!action, EINVAL);
7042 CHECK(n_tokens == 2, EINVAL);
7044 strcpy(data->jmp_label, tokens[1]);
7046 instr->type = INSTR_JMP_HIT;
7047 instr->jmp.ip = NULL; /* Resolved later. */
7052 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
7053 struct action *action,
7056 struct instruction *instr,
7057 struct instruction_data *data)
7059 CHECK(!action, EINVAL);
7060 CHECK(n_tokens == 2, EINVAL);
7062 strcpy(data->jmp_label, tokens[1]);
7064 instr->type = INSTR_JMP_MISS;
7065 instr->jmp.ip = NULL; /* Resolved later. */
7070 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
7071 struct action *action,
7074 struct instruction *instr,
7075 struct instruction_data *data)
7079 CHECK(!action, EINVAL);
7080 CHECK(n_tokens == 3, EINVAL);
7082 strcpy(data->jmp_label, tokens[1]);
7084 a = action_find(p, tokens[2]);
7087 instr->type = INSTR_JMP_ACTION_HIT;
7088 instr->jmp.ip = NULL; /* Resolved later. */
7089 instr->jmp.action_id = a->id;
7094 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
7095 struct action *action,
7098 struct instruction *instr,
7099 struct instruction_data *data)
7103 CHECK(!action, EINVAL);
7104 CHECK(n_tokens == 3, EINVAL);
7106 strcpy(data->jmp_label, tokens[1]);
7108 a = action_find(p, tokens[2]);
7111 instr->type = INSTR_JMP_ACTION_MISS;
7112 instr->jmp.ip = NULL; /* Resolved later. */
7113 instr->jmp.action_id = a->id;
7118 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
7119 struct action *action,
7122 struct instruction *instr,
7123 struct instruction_data *data)
7125 char *a = tokens[2], *b = tokens[3];
7126 struct field *fa, *fb;
7128 uint32_t a_struct_id, b_struct_id;
7130 CHECK(n_tokens == 4, EINVAL);
7132 strcpy(data->jmp_label, tokens[1]);
7134 fa = struct_field_parse(p, action, a, &a_struct_id);
7137 /* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */
7138 fb = struct_field_parse(p, action, b, &b_struct_id);
7140 instr->type = INSTR_JMP_EQ;
7141 if (a[0] != 'h' && b[0] == 'h')
7142 instr->type = INSTR_JMP_EQ_MH;
7143 if (a[0] == 'h' && b[0] != 'h')
7144 instr->type = INSTR_JMP_EQ_HM;
7145 if (a[0] == 'h' && b[0] == 'h')
7146 instr->type = INSTR_JMP_EQ_HH;
7147 instr->jmp.ip = NULL; /* Resolved later. */
7149 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7150 instr->jmp.a.n_bits = fa->n_bits;
7151 instr->jmp.a.offset = fa->offset / 8;
7152 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7153 instr->jmp.b.n_bits = fb->n_bits;
7154 instr->jmp.b.offset = fb->offset / 8;
7159 b_val = strtoull(b, &b, 0);
7160 CHECK(!b[0], EINVAL);
7163 b_val = hton64(b_val) >> (64 - fa->n_bits);
7165 instr->type = INSTR_JMP_EQ_I;
7166 instr->jmp.ip = NULL; /* Resolved later. */
7167 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7168 instr->jmp.a.n_bits = fa->n_bits;
7169 instr->jmp.a.offset = fa->offset / 8;
7170 instr->jmp.b_val = b_val;
7175 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
7176 struct action *action,
7179 struct instruction *instr,
7180 struct instruction_data *data)
7182 char *a = tokens[2], *b = tokens[3];
7183 struct field *fa, *fb;
7185 uint32_t a_struct_id, b_struct_id;
7187 CHECK(n_tokens == 4, EINVAL);
7189 strcpy(data->jmp_label, tokens[1]);
7191 fa = struct_field_parse(p, action, a, &a_struct_id);
7194 /* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */
7195 fb = struct_field_parse(p, action, b, &b_struct_id);
7197 instr->type = INSTR_JMP_NEQ;
7198 if (a[0] != 'h' && b[0] == 'h')
7199 instr->type = INSTR_JMP_NEQ_MH;
7200 if (a[0] == 'h' && b[0] != 'h')
7201 instr->type = INSTR_JMP_NEQ_HM;
7202 if (a[0] == 'h' && b[0] == 'h')
7203 instr->type = INSTR_JMP_NEQ_HH;
7204 instr->jmp.ip = NULL; /* Resolved later. */
7206 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7207 instr->jmp.a.n_bits = fa->n_bits;
7208 instr->jmp.a.offset = fa->offset / 8;
7209 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7210 instr->jmp.b.n_bits = fb->n_bits;
7211 instr->jmp.b.offset = fb->offset / 8;
7216 b_val = strtoull(b, &b, 0);
7217 CHECK(!b[0], EINVAL);
7220 b_val = hton64(b_val) >> (64 - fa->n_bits);
7222 instr->type = INSTR_JMP_NEQ_I;
7223 instr->jmp.ip = NULL; /* Resolved later. */
7224 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7225 instr->jmp.a.n_bits = fa->n_bits;
7226 instr->jmp.a.offset = fa->offset / 8;
7227 instr->jmp.b_val = b_val;
7232 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
7233 struct action *action,
7236 struct instruction *instr,
7237 struct instruction_data *data)
7239 char *a = tokens[2], *b = tokens[3];
7240 struct field *fa, *fb;
7242 uint32_t a_struct_id, b_struct_id;
7244 CHECK(n_tokens == 4, EINVAL);
7246 strcpy(data->jmp_label, tokens[1]);
7248 fa = struct_field_parse(p, action, a, &a_struct_id);
7251 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
7252 fb = struct_field_parse(p, action, b, &b_struct_id);
7254 instr->type = INSTR_JMP_LT;
7255 if (a[0] == 'h' && b[0] != 'h')
7256 instr->type = INSTR_JMP_LT_HM;
7257 if (a[0] != 'h' && b[0] == 'h')
7258 instr->type = INSTR_JMP_LT_MH;
7259 if (a[0] == 'h' && b[0] == 'h')
7260 instr->type = INSTR_JMP_LT_HH;
7261 instr->jmp.ip = NULL; /* Resolved later. */
7263 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7264 instr->jmp.a.n_bits = fa->n_bits;
7265 instr->jmp.a.offset = fa->offset / 8;
7266 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7267 instr->jmp.b.n_bits = fb->n_bits;
7268 instr->jmp.b.offset = fb->offset / 8;
7272 /* JMP_LT_MI, JMP_LT_HI. */
7273 b_val = strtoull(b, &b, 0);
7274 CHECK(!b[0], EINVAL);
7276 instr->type = INSTR_JMP_LT_MI;
7278 instr->type = INSTR_JMP_LT_HI;
7279 instr->jmp.ip = NULL; /* Resolved later. */
7281 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7282 instr->jmp.a.n_bits = fa->n_bits;
7283 instr->jmp.a.offset = fa->offset / 8;
7284 instr->jmp.b_val = b_val;
7289 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
7290 struct action *action,
7293 struct instruction *instr,
7294 struct instruction_data *data)
7296 char *a = tokens[2], *b = tokens[3];
7297 struct field *fa, *fb;
7299 uint32_t a_struct_id, b_struct_id;
7301 CHECK(n_tokens == 4, EINVAL);
7303 strcpy(data->jmp_label, tokens[1]);
7305 fa = struct_field_parse(p, action, a, &a_struct_id);
7308 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
7309 fb = struct_field_parse(p, action, b, &b_struct_id);
7311 instr->type = INSTR_JMP_GT;
7312 if (a[0] == 'h' && b[0] != 'h')
7313 instr->type = INSTR_JMP_GT_HM;
7314 if (a[0] != 'h' && b[0] == 'h')
7315 instr->type = INSTR_JMP_GT_MH;
7316 if (a[0] == 'h' && b[0] == 'h')
7317 instr->type = INSTR_JMP_GT_HH;
7318 instr->jmp.ip = NULL; /* Resolved later. */
7320 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7321 instr->jmp.a.n_bits = fa->n_bits;
7322 instr->jmp.a.offset = fa->offset / 8;
7323 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7324 instr->jmp.b.n_bits = fb->n_bits;
7325 instr->jmp.b.offset = fb->offset / 8;
7329 /* JMP_GT_MI, JMP_GT_HI. */
7330 b_val = strtoull(b, &b, 0);
7331 CHECK(!b[0], EINVAL);
7333 instr->type = INSTR_JMP_GT_MI;
7335 instr->type = INSTR_JMP_GT_HI;
7336 instr->jmp.ip = NULL; /* Resolved later. */
7338 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7339 instr->jmp.a.n_bits = fa->n_bits;
7340 instr->jmp.a.offset = fa->offset / 8;
7341 instr->jmp.b_val = b_val;
7346 instr_jmp_exec(struct rte_swx_pipeline *p)
7348 struct thread *t = &p->threads[p->thread_id];
7349 struct instruction *ip = t->ip;
7351 TRACE("[Thread %2u] jmp\n", p->thread_id);
7353 thread_ip_set(t, ip->jmp.ip);
7357 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
7359 struct thread *t = &p->threads[p->thread_id];
7360 struct instruction *ip = t->ip;
7361 uint32_t header_id = ip->jmp.header_id;
7363 TRACE("[Thread %2u] jmpv\n", p->thread_id);
7365 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
7369 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
7371 struct thread *t = &p->threads[p->thread_id];
7372 struct instruction *ip = t->ip;
7373 uint32_t header_id = ip->jmp.header_id;
7375 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
7377 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
7381 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
7383 struct thread *t = &p->threads[p->thread_id];
7384 struct instruction *ip = t->ip;
7385 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
7387 TRACE("[Thread %2u] jmph\n", p->thread_id);
7389 t->ip = ip_next[t->hit];
7393 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
7395 struct thread *t = &p->threads[p->thread_id];
7396 struct instruction *ip = t->ip;
7397 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
7399 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
7401 t->ip = ip_next[t->hit];
7405 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
7407 struct thread *t = &p->threads[p->thread_id];
7408 struct instruction *ip = t->ip;
7410 TRACE("[Thread %2u] jmpa\n", p->thread_id);
7412 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
7416 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
7418 struct thread *t = &p->threads[p->thread_id];
7419 struct instruction *ip = t->ip;
7421 TRACE("[Thread %2u] jmpna\n", p->thread_id);
7423 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
7427 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
7429 struct thread *t = &p->threads[p->thread_id];
7430 struct instruction *ip = t->ip;
7432 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
7438 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p)
7440 struct thread *t = &p->threads[p->thread_id];
7441 struct instruction *ip = t->ip;
7443 TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id);
7445 JMP_CMP_MH(t, ip, ==);
7449 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p)
7451 struct thread *t = &p->threads[p->thread_id];
7452 struct instruction *ip = t->ip;
7454 TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id);
7456 JMP_CMP_HM(t, ip, ==);
7460 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p)
7462 struct thread *t = &p->threads[p->thread_id];
7463 struct instruction *ip = t->ip;
7465 TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id);
7467 JMP_CMP_HH_FAST(t, ip, ==);
7471 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
7473 struct thread *t = &p->threads[p->thread_id];
7474 struct instruction *ip = t->ip;
7476 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
7478 JMP_CMP_I(t, ip, ==);
7482 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
7484 struct thread *t = &p->threads[p->thread_id];
7485 struct instruction *ip = t->ip;
7487 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
7493 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p)
7495 struct thread *t = &p->threads[p->thread_id];
7496 struct instruction *ip = t->ip;
7498 TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id);
7500 JMP_CMP_MH(t, ip, !=);
7504 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p)
7506 struct thread *t = &p->threads[p->thread_id];
7507 struct instruction *ip = t->ip;
7509 TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id);
7511 JMP_CMP_HM(t, ip, !=);
7515 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p)
7517 struct thread *t = &p->threads[p->thread_id];
7518 struct instruction *ip = t->ip;
7520 TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id);
7522 JMP_CMP_HH_FAST(t, ip, !=);
7526 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
7528 struct thread *t = &p->threads[p->thread_id];
7529 struct instruction *ip = t->ip;
7531 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
7533 JMP_CMP_I(t, ip, !=);
7537 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
7539 struct thread *t = &p->threads[p->thread_id];
7540 struct instruction *ip = t->ip;
7542 TRACE("[Thread %2u] jmplt\n", p->thread_id);
7548 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
7550 struct thread *t = &p->threads[p->thread_id];
7551 struct instruction *ip = t->ip;
7553 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
7555 JMP_CMP_MH(t, ip, <);
7559 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
7561 struct thread *t = &p->threads[p->thread_id];
7562 struct instruction *ip = t->ip;
7564 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
7566 JMP_CMP_HM(t, ip, <);
7570 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
7572 struct thread *t = &p->threads[p->thread_id];
7573 struct instruction *ip = t->ip;
7575 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
7577 JMP_CMP_HH(t, ip, <);
7581 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
7583 struct thread *t = &p->threads[p->thread_id];
7584 struct instruction *ip = t->ip;
7586 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
7588 JMP_CMP_MI(t, ip, <);
7592 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
7594 struct thread *t = &p->threads[p->thread_id];
7595 struct instruction *ip = t->ip;
7597 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
7599 JMP_CMP_HI(t, ip, <);
7603 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
7605 struct thread *t = &p->threads[p->thread_id];
7606 struct instruction *ip = t->ip;
7608 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
7614 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
7616 struct thread *t = &p->threads[p->thread_id];
7617 struct instruction *ip = t->ip;
7619 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
7621 JMP_CMP_MH(t, ip, >);
7625 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
7627 struct thread *t = &p->threads[p->thread_id];
7628 struct instruction *ip = t->ip;
7630 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
7632 JMP_CMP_HM(t, ip, >);
7636 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
7638 struct thread *t = &p->threads[p->thread_id];
7639 struct instruction *ip = t->ip;
7641 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
7643 JMP_CMP_HH(t, ip, >);
7647 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
7649 struct thread *t = &p->threads[p->thread_id];
7650 struct instruction *ip = t->ip;
7652 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
7654 JMP_CMP_MI(t, ip, >);
7658 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
7660 struct thread *t = &p->threads[p->thread_id];
7661 struct instruction *ip = t->ip;
7663 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
7665 JMP_CMP_HI(t, ip, >);
7672 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
7673 struct action *action,
7674 char **tokens __rte_unused,
7676 struct instruction *instr,
7677 struct instruction_data *data __rte_unused)
7679 CHECK(action, EINVAL);
7680 CHECK(n_tokens == 1, EINVAL);
7682 instr->type = INSTR_RETURN;
7687 instr_return_exec(struct rte_swx_pipeline *p)
7689 struct thread *t = &p->threads[p->thread_id];
7691 TRACE("[Thread %2u] return\n", p->thread_id);
7697 instr_translate(struct rte_swx_pipeline *p,
7698 struct action *action,
7700 struct instruction *instr,
7701 struct instruction_data *data)
7703 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
7704 int n_tokens = 0, tpos = 0;
7706 /* Parse the instruction string into tokens. */
7710 token = strtok_r(string, " \t\v", &string);
7714 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
7715 CHECK_NAME(token, EINVAL);
7717 tokens[n_tokens] = token;
7721 CHECK(n_tokens, EINVAL);
7723 /* Handle the optional instruction label. */
7724 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
7725 strcpy(data->label, tokens[0]);
7728 CHECK(n_tokens - tpos, EINVAL);
7731 /* Identify the instruction type. */
7732 if (!strcmp(tokens[tpos], "rx"))
7733 return instr_rx_translate(p,
7740 if (!strcmp(tokens[tpos], "tx"))
7741 return instr_tx_translate(p,
7748 if (!strcmp(tokens[tpos], "drop"))
7749 return instr_drop_translate(p,
7756 if (!strcmp(tokens[tpos], "extract"))
7757 return instr_hdr_extract_translate(p,
7764 if (!strcmp(tokens[tpos], "emit"))
7765 return instr_hdr_emit_translate(p,
7772 if (!strcmp(tokens[tpos], "validate"))
7773 return instr_hdr_validate_translate(p,
7780 if (!strcmp(tokens[tpos], "invalidate"))
7781 return instr_hdr_invalidate_translate(p,
7788 if (!strcmp(tokens[tpos], "mov"))
7789 return instr_mov_translate(p,
7796 if (!strcmp(tokens[tpos], "add"))
7797 return instr_alu_add_translate(p,
7804 if (!strcmp(tokens[tpos], "sub"))
7805 return instr_alu_sub_translate(p,
7812 if (!strcmp(tokens[tpos], "ckadd"))
7813 return instr_alu_ckadd_translate(p,
7820 if (!strcmp(tokens[tpos], "cksub"))
7821 return instr_alu_cksub_translate(p,
7828 if (!strcmp(tokens[tpos], "and"))
7829 return instr_alu_and_translate(p,
7836 if (!strcmp(tokens[tpos], "or"))
7837 return instr_alu_or_translate(p,
7844 if (!strcmp(tokens[tpos], "xor"))
7845 return instr_alu_xor_translate(p,
7852 if (!strcmp(tokens[tpos], "shl"))
7853 return instr_alu_shl_translate(p,
7860 if (!strcmp(tokens[tpos], "shr"))
7861 return instr_alu_shr_translate(p,
7868 if (!strcmp(tokens[tpos], "regprefetch"))
7869 return instr_regprefetch_translate(p,
7876 if (!strcmp(tokens[tpos], "regrd"))
7877 return instr_regrd_translate(p,
7884 if (!strcmp(tokens[tpos], "regwr"))
7885 return instr_regwr_translate(p,
7892 if (!strcmp(tokens[tpos], "regadd"))
7893 return instr_regadd_translate(p,
7900 if (!strcmp(tokens[tpos], "metprefetch"))
7901 return instr_metprefetch_translate(p,
7908 if (!strcmp(tokens[tpos], "meter"))
7909 return instr_meter_translate(p,
7916 if (!strcmp(tokens[tpos], "table"))
7917 return instr_table_translate(p,
7924 if (!strcmp(tokens[tpos], "extern"))
7925 return instr_extern_translate(p,
7932 if (!strcmp(tokens[tpos], "jmp"))
7933 return instr_jmp_translate(p,
7940 if (!strcmp(tokens[tpos], "jmpv"))
7941 return instr_jmp_valid_translate(p,
7948 if (!strcmp(tokens[tpos], "jmpnv"))
7949 return instr_jmp_invalid_translate(p,
7956 if (!strcmp(tokens[tpos], "jmph"))
7957 return instr_jmp_hit_translate(p,
7964 if (!strcmp(tokens[tpos], "jmpnh"))
7965 return instr_jmp_miss_translate(p,
7972 if (!strcmp(tokens[tpos], "jmpa"))
7973 return instr_jmp_action_hit_translate(p,
7980 if (!strcmp(tokens[tpos], "jmpna"))
7981 return instr_jmp_action_miss_translate(p,
7988 if (!strcmp(tokens[tpos], "jmpeq"))
7989 return instr_jmp_eq_translate(p,
7996 if (!strcmp(tokens[tpos], "jmpneq"))
7997 return instr_jmp_neq_translate(p,
8004 if (!strcmp(tokens[tpos], "jmplt"))
8005 return instr_jmp_lt_translate(p,
8012 if (!strcmp(tokens[tpos], "jmpgt"))
8013 return instr_jmp_gt_translate(p,
8020 if (!strcmp(tokens[tpos], "return"))
8021 return instr_return_translate(p,
8031 static struct instruction_data *
8032 label_find(struct instruction_data *data, uint32_t n, const char *label)
8036 for (i = 0; i < n; i++)
8037 if (!strcmp(label, data[i].label))
8044 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
8046 uint32_t count = 0, i;
8051 for (i = 0; i < n; i++)
8052 if (!strcmp(label, data[i].jmp_label))
8059 instr_label_check(struct instruction_data *instruction_data,
8060 uint32_t n_instructions)
8064 /* Check that all instruction labels are unique. */
8065 for (i = 0; i < n_instructions; i++) {
8066 struct instruction_data *data = &instruction_data[i];
8067 char *label = data->label;
8073 for (j = i + 1; j < n_instructions; j++)
8074 CHECK(strcmp(label, data[j].label), EINVAL);
8077 /* Get users for each instruction label. */
8078 for (i = 0; i < n_instructions; i++) {
8079 struct instruction_data *data = &instruction_data[i];
8080 char *label = data->label;
8082 data->n_users = label_is_used(instruction_data,
8091 instr_jmp_resolve(struct instruction *instructions,
8092 struct instruction_data *instruction_data,
8093 uint32_t n_instructions)
8097 for (i = 0; i < n_instructions; i++) {
8098 struct instruction *instr = &instructions[i];
8099 struct instruction_data *data = &instruction_data[i];
8100 struct instruction_data *found;
8102 if (!instruction_is_jmp(instr))
8105 found = label_find(instruction_data,
8108 CHECK(found, EINVAL);
8110 instr->jmp.ip = &instructions[found - instruction_data];
8117 instr_verify(struct rte_swx_pipeline *p __rte_unused,
8119 struct instruction *instr,
8120 struct instruction_data *data __rte_unused,
8121 uint32_t n_instructions)
8124 enum instruction_type type;
8127 /* Check that the first instruction is rx. */
8128 CHECK(instr[0].type == INSTR_RX, EINVAL);
8130 /* Check that there is at least one tx instruction. */
8131 for (i = 0; i < n_instructions; i++) {
8132 type = instr[i].type;
8134 if (instruction_is_tx(type))
8137 CHECK(i < n_instructions, EINVAL);
8139 /* Check that the last instruction is either tx or unconditional
8142 type = instr[n_instructions - 1].type;
8143 CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
8147 enum instruction_type type;
8150 /* Check that there is at least one return or tx instruction. */
8151 for (i = 0; i < n_instructions; i++) {
8152 type = instr[i].type;
8154 if ((type == INSTR_RETURN) || instruction_is_tx(type))
8157 CHECK(i < n_instructions, EINVAL);
8164 instr_compact(struct instruction *instructions,
8165 struct instruction_data *instruction_data,
8166 uint32_t n_instructions)
8168 uint32_t i, pos = 0;
8170 /* Eliminate the invalid instructions that have been optimized out. */
8171 for (i = 0; i < n_instructions; i++) {
8172 struct instruction *instr = &instructions[i];
8173 struct instruction_data *data = &instruction_data[i];
8179 memcpy(&instructions[pos], instr, sizeof(*instr));
8180 memcpy(&instruction_data[pos], data, sizeof(*data));
8190 instr_pattern_extract_many_search(struct instruction *instr,
8191 struct instruction_data *data,
8193 uint32_t *n_pattern_instr)
8197 for (i = 0; i < n_instr; i++) {
8198 if (data[i].invalid)
8201 if (instr[i].type != INSTR_HDR_EXTRACT)
8204 if (i == RTE_DIM(instr->io.hdr.header_id))
8207 if (i && data[i].n_users)
8214 *n_pattern_instr = i;
8219 instr_pattern_extract_many_replace(struct instruction *instr,
8220 struct instruction_data *data,
8225 for (i = 1; i < n_instr; i++) {
8227 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8228 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8229 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8231 data[i].invalid = 1;
8236 instr_pattern_extract_many_optimize(struct instruction *instructions,
8237 struct instruction_data *instruction_data,
8238 uint32_t n_instructions)
8242 for (i = 0; i < n_instructions; ) {
8243 struct instruction *instr = &instructions[i];
8244 struct instruction_data *data = &instruction_data[i];
8245 uint32_t n_instr = 0;
8249 detected = instr_pattern_extract_many_search(instr,
8254 instr_pattern_extract_many_replace(instr,
8261 /* No pattern starting at the current instruction. */
8265 /* Eliminate the invalid instructions that have been optimized out. */
8266 n_instructions = instr_compact(instructions,
8270 return n_instructions;
8274 instr_pattern_emit_many_tx_search(struct instruction *instr,
8275 struct instruction_data *data,
8277 uint32_t *n_pattern_instr)
8281 for (i = 0; i < n_instr; i++) {
8282 if (data[i].invalid)
8285 if (instr[i].type != INSTR_HDR_EMIT)
8288 if (i == RTE_DIM(instr->io.hdr.header_id))
8291 if (i && data[i].n_users)
8298 if (!instruction_is_tx(instr[i].type))
8301 if (data[i].n_users)
8306 *n_pattern_instr = i;
8311 instr_pattern_emit_many_tx_replace(struct instruction *instr,
8312 struct instruction_data *data,
8317 /* Any emit instruction in addition to the first one. */
8318 for (i = 1; i < n_instr - 1; i++) {
8320 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8321 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8322 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8324 data[i].invalid = 1;
8327 /* The TX instruction is the last one in the pattern. */
8329 instr[0].io.io.offset = instr[i].io.io.offset;
8330 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
8331 data[i].invalid = 1;
8335 instr_pattern_emit_many_tx_optimize(struct instruction *instructions,
8336 struct instruction_data *instruction_data,
8337 uint32_t n_instructions)
8341 for (i = 0; i < n_instructions; ) {
8342 struct instruction *instr = &instructions[i];
8343 struct instruction_data *data = &instruction_data[i];
8344 uint32_t n_instr = 0;
8347 /* Emit many + TX. */
8348 detected = instr_pattern_emit_many_tx_search(instr,
8353 instr_pattern_emit_many_tx_replace(instr,
8360 /* No pattern starting at the current instruction. */
8364 /* Eliminate the invalid instructions that have been optimized out. */
8365 n_instructions = instr_compact(instructions,
8369 return n_instructions;
8373 action_arg_src_mov_count(struct action *a,
8375 struct instruction *instructions,
8376 struct instruction_data *instruction_data,
8377 uint32_t n_instructions);
8380 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p,
8382 struct instruction *instr,
8383 struct instruction_data *data,
8385 struct instruction *instructions,
8386 struct instruction_data *instruction_data,
8387 uint32_t n_instructions,
8388 uint32_t *n_pattern_instr)
8391 uint32_t src_field_id, i, j;
8393 /* Prerequisites. */
8397 /* First instruction: MOV_HM. */
8398 if (data[0].invalid || (instr[0].type != INSTR_MOV_HM))
8401 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8405 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8406 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8409 if (src_field_id == a->st->n_fields)
8412 if (instr[0].mov.dst.offset ||
8413 (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) ||
8414 instr[0].mov.src.struct_id ||
8415 (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) ||
8416 (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits))
8419 if ((n_instr < h->st->n_fields + 1) ||
8420 (a->st->n_fields < src_field_id + h->st->n_fields + 1))
8423 /* Subsequent instructions: MOV_HM. */
8424 for (i = 1; i < h->st->n_fields; i++)
8425 if (data[i].invalid ||
8427 (instr[i].type != INSTR_MOV_HM) ||
8428 (instr[i].mov.dst.struct_id != h->struct_id) ||
8429 (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) ||
8430 (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) ||
8431 instr[i].mov.src.struct_id ||
8432 (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) ||
8433 (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) ||
8434 (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits))
8437 /* Last instruction: HDR_VALIDATE. */
8438 if ((instr[i].type != INSTR_HDR_VALIDATE) ||
8439 (instr[i].valid.header_id != h->id))
8442 /* Check that none of the action args that are used as source for this
8443 * DMA transfer are not used as source in any other mov instruction.
8445 for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) {
8448 n_users = action_arg_src_mov_count(a,
8457 *n_pattern_instr = 1 + i;
8462 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p,
8464 struct instruction *instr,
8465 struct instruction_data *data,
8469 uint32_t src_field_id, src_offset, i;
8471 /* Read from the instructions before they are modified. */
8472 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8476 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8477 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8480 if (src_field_id == a->st->n_fields)
8483 src_offset = instr[0].mov.src.offset;
8485 /* Modify the instructions. */
8486 instr[0].type = INSTR_DMA_HT;
8487 instr[0].dma.dst.header_id[0] = h->id;
8488 instr[0].dma.dst.struct_id[0] = h->struct_id;
8489 instr[0].dma.src.offset[0] = (uint8_t)src_offset;
8490 instr[0].dma.n_bytes[0] = h->st->n_bits / 8;
8492 for (i = 1; i < n_instr; i++)
8493 data[i].invalid = 1;
8495 /* Update the endianness of the action arguments to header endianness. */
8496 for (i = 0; i < h->st->n_fields; i++)
8497 a->args_endianness[src_field_id + i] = 1;
8501 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p,
8503 struct instruction *instructions,
8504 struct instruction_data *instruction_data,
8505 uint32_t n_instructions)
8510 return n_instructions;
8512 for (i = 0; i < n_instructions; ) {
8513 struct instruction *instr = &instructions[i];
8514 struct instruction_data *data = &instruction_data[i];
8515 uint32_t n_instr = 0;
8518 /* Mov all + validate. */
8519 detected = instr_pattern_mov_all_validate_search(p,
8529 instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr);
8534 /* No pattern starting at the current instruction. */
8538 /* Eliminate the invalid instructions that have been optimized out. */
8539 n_instructions = instr_compact(instructions,
8543 return n_instructions;
8547 instr_pattern_dma_many_search(struct instruction *instr,
8548 struct instruction_data *data,
8550 uint32_t *n_pattern_instr)
8554 for (i = 0; i < n_instr; i++) {
8555 if (data[i].invalid)
8558 if (instr[i].type != INSTR_DMA_HT)
8561 if (i == RTE_DIM(instr->dma.dst.header_id))
8564 if (i && data[i].n_users)
8571 *n_pattern_instr = i;
8576 instr_pattern_dma_many_replace(struct instruction *instr,
8577 struct instruction_data *data,
8582 for (i = 1; i < n_instr; i++) {
8584 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
8585 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
8586 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
8587 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
8589 data[i].invalid = 1;
8594 instr_pattern_dma_many_optimize(struct instruction *instructions,
8595 struct instruction_data *instruction_data,
8596 uint32_t n_instructions)
8600 for (i = 0; i < n_instructions; ) {
8601 struct instruction *instr = &instructions[i];
8602 struct instruction_data *data = &instruction_data[i];
8603 uint32_t n_instr = 0;
8607 detected = instr_pattern_dma_many_search(instr,
8612 instr_pattern_dma_many_replace(instr, data, n_instr);
8617 /* No pattern starting at the current instruction. */
8621 /* Eliminate the invalid instructions that have been optimized out. */
8622 n_instructions = instr_compact(instructions,
8626 return n_instructions;
8630 instr_optimize(struct rte_swx_pipeline *p,
8632 struct instruction *instructions,
8633 struct instruction_data *instruction_data,
8634 uint32_t n_instructions)
8637 n_instructions = instr_pattern_extract_many_optimize(instructions,
8641 /* Emit many + TX. */
8642 n_instructions = instr_pattern_emit_many_tx_optimize(instructions,
8646 /* Mov all + validate. */
8647 n_instructions = instr_pattern_mov_all_validate_optimize(p,
8654 n_instructions = instr_pattern_dma_many_optimize(instructions,
8658 return n_instructions;
8662 instruction_config(struct rte_swx_pipeline *p,
8664 const char **instructions,
8665 uint32_t n_instructions)
8667 struct instruction *instr = NULL;
8668 struct instruction_data *data = NULL;
8672 CHECK(n_instructions, EINVAL);
8673 CHECK(instructions, EINVAL);
8674 for (i = 0; i < n_instructions; i++)
8675 CHECK_INSTRUCTION(instructions[i], EINVAL);
8677 /* Memory allocation. */
8678 instr = calloc(n_instructions, sizeof(struct instruction));
8684 data = calloc(n_instructions, sizeof(struct instruction_data));
8690 for (i = 0; i < n_instructions; i++) {
8691 char *string = strdup(instructions[i]);
8697 err = instr_translate(p, a, string, &instr[i], &data[i]);
8706 err = instr_label_check(data, n_instructions);
8710 err = instr_verify(p, a, instr, data, n_instructions);
8714 n_instructions = instr_optimize(p, a, instr, data, n_instructions);
8716 err = instr_jmp_resolve(instr, data, n_instructions);
8721 a->instructions = instr;
8722 a->n_instructions = n_instructions;
8724 p->instructions = instr;
8725 p->n_instructions = n_instructions;
8737 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
8739 static instr_exec_t instruction_table[] = {
8740 [INSTR_RX] = instr_rx_exec,
8741 [INSTR_TX] = instr_tx_exec,
8742 [INSTR_TX_I] = instr_tx_i_exec,
8744 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
8745 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
8746 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
8747 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
8748 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
8749 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
8750 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
8751 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
8753 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
8754 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
8755 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
8756 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
8757 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
8758 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
8759 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
8760 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
8761 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
8763 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
8764 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
8766 [INSTR_MOV] = instr_mov_exec,
8767 [INSTR_MOV_MH] = instr_mov_mh_exec,
8768 [INSTR_MOV_HM] = instr_mov_hm_exec,
8769 [INSTR_MOV_HH] = instr_mov_hh_exec,
8770 [INSTR_MOV_I] = instr_mov_i_exec,
8772 [INSTR_DMA_HT] = instr_dma_ht_exec,
8773 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
8774 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
8775 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
8776 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
8777 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
8778 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
8779 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
8781 [INSTR_ALU_ADD] = instr_alu_add_exec,
8782 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
8783 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
8784 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
8785 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
8786 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
8788 [INSTR_ALU_SUB] = instr_alu_sub_exec,
8789 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
8790 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
8791 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
8792 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
8793 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
8795 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
8796 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
8797 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
8798 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
8800 [INSTR_ALU_AND] = instr_alu_and_exec,
8801 [INSTR_ALU_AND_MH] = instr_alu_and_mh_exec,
8802 [INSTR_ALU_AND_HM] = instr_alu_and_hm_exec,
8803 [INSTR_ALU_AND_HH] = instr_alu_and_hh_exec,
8804 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
8806 [INSTR_ALU_OR] = instr_alu_or_exec,
8807 [INSTR_ALU_OR_MH] = instr_alu_or_mh_exec,
8808 [INSTR_ALU_OR_HM] = instr_alu_or_hm_exec,
8809 [INSTR_ALU_OR_HH] = instr_alu_or_hh_exec,
8810 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
8812 [INSTR_ALU_XOR] = instr_alu_xor_exec,
8813 [INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec,
8814 [INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec,
8815 [INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec,
8816 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
8818 [INSTR_ALU_SHL] = instr_alu_shl_exec,
8819 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
8820 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
8821 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
8822 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
8823 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
8825 [INSTR_ALU_SHR] = instr_alu_shr_exec,
8826 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
8827 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
8828 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
8829 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
8830 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
8832 [INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec,
8833 [INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec,
8834 [INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec,
8836 [INSTR_REGRD_HRH] = instr_regrd_hrh_exec,
8837 [INSTR_REGRD_HRM] = instr_regrd_hrm_exec,
8838 [INSTR_REGRD_MRH] = instr_regrd_mrh_exec,
8839 [INSTR_REGRD_MRM] = instr_regrd_mrm_exec,
8840 [INSTR_REGRD_HRI] = instr_regrd_hri_exec,
8841 [INSTR_REGRD_MRI] = instr_regrd_mri_exec,
8843 [INSTR_REGWR_RHH] = instr_regwr_rhh_exec,
8844 [INSTR_REGWR_RHM] = instr_regwr_rhm_exec,
8845 [INSTR_REGWR_RMH] = instr_regwr_rmh_exec,
8846 [INSTR_REGWR_RMM] = instr_regwr_rmm_exec,
8847 [INSTR_REGWR_RHI] = instr_regwr_rhi_exec,
8848 [INSTR_REGWR_RMI] = instr_regwr_rmi_exec,
8849 [INSTR_REGWR_RIH] = instr_regwr_rih_exec,
8850 [INSTR_REGWR_RIM] = instr_regwr_rim_exec,
8851 [INSTR_REGWR_RII] = instr_regwr_rii_exec,
8853 [INSTR_REGADD_RHH] = instr_regadd_rhh_exec,
8854 [INSTR_REGADD_RHM] = instr_regadd_rhm_exec,
8855 [INSTR_REGADD_RMH] = instr_regadd_rmh_exec,
8856 [INSTR_REGADD_RMM] = instr_regadd_rmm_exec,
8857 [INSTR_REGADD_RHI] = instr_regadd_rhi_exec,
8858 [INSTR_REGADD_RMI] = instr_regadd_rmi_exec,
8859 [INSTR_REGADD_RIH] = instr_regadd_rih_exec,
8860 [INSTR_REGADD_RIM] = instr_regadd_rim_exec,
8861 [INSTR_REGADD_RII] = instr_regadd_rii_exec,
8863 [INSTR_METPREFETCH_H] = instr_metprefetch_h_exec,
8864 [INSTR_METPREFETCH_M] = instr_metprefetch_m_exec,
8865 [INSTR_METPREFETCH_I] = instr_metprefetch_i_exec,
8867 [INSTR_METER_HHM] = instr_meter_hhm_exec,
8868 [INSTR_METER_HHI] = instr_meter_hhi_exec,
8869 [INSTR_METER_HMM] = instr_meter_hmm_exec,
8870 [INSTR_METER_HMI] = instr_meter_hmi_exec,
8871 [INSTR_METER_MHM] = instr_meter_mhm_exec,
8872 [INSTR_METER_MHI] = instr_meter_mhi_exec,
8873 [INSTR_METER_MMM] = instr_meter_mmm_exec,
8874 [INSTR_METER_MMI] = instr_meter_mmi_exec,
8875 [INSTR_METER_IHM] = instr_meter_ihm_exec,
8876 [INSTR_METER_IHI] = instr_meter_ihi_exec,
8877 [INSTR_METER_IMM] = instr_meter_imm_exec,
8878 [INSTR_METER_IMI] = instr_meter_imi_exec,
8880 [INSTR_TABLE] = instr_table_exec,
8881 [INSTR_SELECTOR] = instr_selector_exec,
8882 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
8883 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
8885 [INSTR_JMP] = instr_jmp_exec,
8886 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
8887 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
8888 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
8889 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
8890 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
8891 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
8893 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
8894 [INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec,
8895 [INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec,
8896 [INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec,
8897 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
8899 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
8900 [INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec,
8901 [INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec,
8902 [INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec,
8903 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
8905 [INSTR_JMP_LT] = instr_jmp_lt_exec,
8906 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
8907 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
8908 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
8909 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
8910 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
8912 [INSTR_JMP_GT] = instr_jmp_gt_exec,
8913 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
8914 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
8915 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
8916 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
8917 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
8919 [INSTR_RETURN] = instr_return_exec,
8923 instr_exec(struct rte_swx_pipeline *p)
8925 struct thread *t = &p->threads[p->thread_id];
8926 struct instruction *ip = t->ip;
8927 instr_exec_t instr = instruction_table[ip->type];
8935 static struct action *
8936 action_find(struct rte_swx_pipeline *p, const char *name)
8938 struct action *elem;
8943 TAILQ_FOREACH(elem, &p->actions, node)
8944 if (strcmp(elem->name, name) == 0)
8950 static struct action *
8951 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
8953 struct action *action = NULL;
8955 TAILQ_FOREACH(action, &p->actions, node)
8956 if (action->id == id)
8962 static struct field *
8963 action_field_find(struct action *a, const char *name)
8965 return a->st ? struct_type_field_find(a->st, name) : NULL;
8968 static struct field *
8969 action_field_parse(struct action *action, const char *name)
8971 if (name[0] != 't' || name[1] != '.')
8974 return action_field_find(action, &name[2]);
8978 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
8980 const char *args_struct_type_name,
8981 const char **instructions,
8982 uint32_t n_instructions)
8984 struct struct_type *args_struct_type;
8990 CHECK_NAME(name, EINVAL);
8991 CHECK(!action_find(p, name), EEXIST);
8993 if (args_struct_type_name) {
8994 CHECK_NAME(args_struct_type_name, EINVAL);
8995 args_struct_type = struct_type_find(p, args_struct_type_name);
8996 CHECK(args_struct_type, EINVAL);
8998 args_struct_type = NULL;
9001 /* Node allocation. */
9002 a = calloc(1, sizeof(struct action));
9004 if (args_struct_type) {
9005 a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int));
9006 if (!a->args_endianness) {
9012 /* Node initialization. */
9013 strcpy(a->name, name);
9014 a->st = args_struct_type;
9015 a->id = p->n_actions;
9017 /* Instruction translation. */
9018 err = instruction_config(p, a, instructions, n_instructions);
9020 free(a->args_endianness);
9025 /* Node add to tailq. */
9026 TAILQ_INSERT_TAIL(&p->actions, a, node);
9033 action_build(struct rte_swx_pipeline *p)
9035 struct action *action;
9037 p->action_instructions = calloc(p->n_actions,
9038 sizeof(struct instruction *));
9039 CHECK(p->action_instructions, ENOMEM);
9041 TAILQ_FOREACH(action, &p->actions, node)
9042 p->action_instructions[action->id] = action->instructions;
9048 action_build_free(struct rte_swx_pipeline *p)
9050 free(p->action_instructions);
9051 p->action_instructions = NULL;
9055 action_free(struct rte_swx_pipeline *p)
9057 action_build_free(p);
9060 struct action *action;
9062 action = TAILQ_FIRST(&p->actions);
9066 TAILQ_REMOVE(&p->actions, action, node);
9067 free(action->instructions);
9073 action_arg_src_mov_count(struct action *a,
9075 struct instruction *instructions,
9076 struct instruction_data *instruction_data,
9077 uint32_t n_instructions)
9079 uint32_t offset, n_users = 0, i;
9082 (arg_id >= a->st->n_fields) ||
9084 !instruction_data ||
9088 offset = a->st->fields[arg_id].offset / 8;
9090 for (i = 0; i < n_instructions; i++) {
9091 struct instruction *instr = &instructions[i];
9092 struct instruction_data *data = &instruction_data[i];
9094 if (data->invalid ||
9095 ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) ||
9096 instr->mov.src.struct_id ||
9097 (instr->mov.src.offset != offset))
9109 static struct table_type *
9110 table_type_find(struct rte_swx_pipeline *p, const char *name)
9112 struct table_type *elem;
9114 TAILQ_FOREACH(elem, &p->table_types, node)
9115 if (strcmp(elem->name, name) == 0)
9121 static struct table_type *
9122 table_type_resolve(struct rte_swx_pipeline *p,
9123 const char *recommended_type_name,
9124 enum rte_swx_table_match_type match_type)
9126 struct table_type *elem;
9128 /* Only consider the recommended type if the match type is correct. */
9129 if (recommended_type_name)
9130 TAILQ_FOREACH(elem, &p->table_types, node)
9131 if (!strcmp(elem->name, recommended_type_name) &&
9132 (elem->match_type == match_type))
9135 /* Ignore the recommended type and get the first element with this match
9138 TAILQ_FOREACH(elem, &p->table_types, node)
9139 if (elem->match_type == match_type)
9145 static struct table *
9146 table_find(struct rte_swx_pipeline *p, const char *name)
9150 TAILQ_FOREACH(elem, &p->tables, node)
9151 if (strcmp(elem->name, name) == 0)
9157 static struct table *
9158 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9160 struct table *table = NULL;
9162 TAILQ_FOREACH(table, &p->tables, node)
9163 if (table->id == id)
9170 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
9172 enum rte_swx_table_match_type match_type,
9173 struct rte_swx_table_ops *ops)
9175 struct table_type *elem;
9179 CHECK_NAME(name, EINVAL);
9180 CHECK(!table_type_find(p, name), EEXIST);
9183 CHECK(ops->create, EINVAL);
9184 CHECK(ops->lkp, EINVAL);
9185 CHECK(ops->free, EINVAL);
9187 /* Node allocation. */
9188 elem = calloc(1, sizeof(struct table_type));
9189 CHECK(elem, ENOMEM);
9191 /* Node initialization. */
9192 strcpy(elem->name, name);
9193 elem->match_type = match_type;
9194 memcpy(&elem->ops, ops, sizeof(*ops));
9196 /* Node add to tailq. */
9197 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
9203 table_match_type_resolve(struct rte_swx_match_field_params *fields,
9205 enum rte_swx_table_match_type *match_type)
9207 uint32_t n_fields_em = 0, n_fields_lpm = 0, i;
9209 for (i = 0; i < n_fields; i++) {
9210 struct rte_swx_match_field_params *f = &fields[i];
9212 if (f->match_type == RTE_SWX_TABLE_MATCH_EXACT)
9215 if (f->match_type == RTE_SWX_TABLE_MATCH_LPM)
9219 if ((n_fields_lpm > 1) ||
9220 (n_fields_lpm && (n_fields_em != n_fields - 1)))
9223 *match_type = (n_fields_em == n_fields) ?
9224 RTE_SWX_TABLE_MATCH_EXACT :
9225 RTE_SWX_TABLE_MATCH_WILDCARD;
9231 table_match_fields_check(struct rte_swx_pipeline *p,
9232 struct rte_swx_pipeline_table_params *params,
9233 struct header **header)
9235 struct header *h0 = NULL;
9236 struct field *hf, *mf;
9237 uint32_t *offset = NULL, i;
9240 /* Return if no match fields. */
9241 if (!params->n_fields) {
9242 if (params->fields) {
9250 /* Memory allocation. */
9251 offset = calloc(params->n_fields, sizeof(uint32_t));
9257 /* Check that all the match fields belong to either the same header or
9260 hf = header_field_parse(p, params->fields[0].name, &h0);
9261 mf = metadata_field_parse(p, params->fields[0].name);
9267 offset[0] = h0 ? hf->offset : mf->offset;
9269 for (i = 1; i < params->n_fields; i++)
9273 hf = header_field_parse(p, params->fields[i].name, &h);
9274 if (!hf || (h->id != h0->id)) {
9279 offset[i] = hf->offset;
9281 mf = metadata_field_parse(p, params->fields[i].name);
9287 offset[i] = mf->offset;
9290 /* Check that there are no duplicated match fields. */
9291 for (i = 0; i < params->n_fields; i++) {
9294 for (j = 0; j < i; j++)
9295 if (offset[j] == offset[i]) {
9311 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
9313 struct rte_swx_pipeline_table_params *params,
9314 const char *recommended_table_type_name,
9318 struct table_type *type;
9320 struct action *default_action;
9321 struct header *header = NULL;
9322 uint32_t action_data_size_max = 0, i;
9327 CHECK_NAME(name, EINVAL);
9328 CHECK(!table_find(p, name), EEXIST);
9329 CHECK(!selector_find(p, name), EEXIST);
9331 CHECK(params, EINVAL);
9334 status = table_match_fields_check(p, params, &header);
9338 /* Action checks. */
9339 CHECK(params->n_actions, EINVAL);
9340 CHECK(params->action_names, EINVAL);
9341 for (i = 0; i < params->n_actions; i++) {
9342 const char *action_name = params->action_names[i];
9344 uint32_t action_data_size;
9346 CHECK_NAME(action_name, EINVAL);
9348 a = action_find(p, action_name);
9351 action_data_size = a->st ? a->st->n_bits / 8 : 0;
9352 if (action_data_size > action_data_size_max)
9353 action_data_size_max = action_data_size;
9356 CHECK_NAME(params->default_action_name, EINVAL);
9357 for (i = 0; i < p->n_actions; i++)
9358 if (!strcmp(params->action_names[i],
9359 params->default_action_name))
9361 CHECK(i < params->n_actions, EINVAL);
9362 default_action = action_find(p, params->default_action_name);
9363 CHECK((default_action->st && params->default_action_data) ||
9364 !params->default_action_data, EINVAL);
9366 /* Table type checks. */
9367 if (recommended_table_type_name)
9368 CHECK_NAME(recommended_table_type_name, EINVAL);
9370 if (params->n_fields) {
9371 enum rte_swx_table_match_type match_type;
9373 status = table_match_type_resolve(params->fields, params->n_fields, &match_type);
9377 type = table_type_resolve(p, recommended_table_type_name, match_type);
9378 CHECK(type, EINVAL);
9383 /* Memory allocation. */
9384 t = calloc(1, sizeof(struct table));
9387 t->fields = calloc(params->n_fields, sizeof(struct match_field));
9393 t->actions = calloc(params->n_actions, sizeof(struct action *));
9400 if (action_data_size_max) {
9401 t->default_action_data = calloc(1, action_data_size_max);
9402 if (!t->default_action_data) {
9410 /* Node initialization. */
9411 strcpy(t->name, name);
9412 if (args && args[0])
9413 strcpy(t->args, args);
9416 for (i = 0; i < params->n_fields; i++) {
9417 struct rte_swx_match_field_params *field = ¶ms->fields[i];
9418 struct match_field *f = &t->fields[i];
9420 f->match_type = field->match_type;
9422 header_field_parse(p, field->name, NULL) :
9423 metadata_field_parse(p, field->name);
9425 t->n_fields = params->n_fields;
9428 for (i = 0; i < params->n_actions; i++)
9429 t->actions[i] = action_find(p, params->action_names[i]);
9430 t->default_action = default_action;
9431 if (default_action->st)
9432 memcpy(t->default_action_data,
9433 params->default_action_data,
9434 default_action->st->n_bits / 8);
9435 t->n_actions = params->n_actions;
9436 t->default_action_is_const = params->default_action_is_const;
9437 t->action_data_size_max = action_data_size_max;
9440 t->id = p->n_tables;
9442 /* Node add to tailq. */
9443 TAILQ_INSERT_TAIL(&p->tables, t, node);
9449 static struct rte_swx_table_params *
9450 table_params_get(struct table *table)
9452 struct rte_swx_table_params *params;
9453 struct field *first, *last;
9455 uint32_t key_size, key_offset, action_data_size, i;
9457 /* Memory allocation. */
9458 params = calloc(1, sizeof(struct rte_swx_table_params));
9462 /* Find first (smallest offset) and last (biggest offset) match fields. */
9463 first = table->fields[0].field;
9464 last = table->fields[0].field;
9466 for (i = 0; i < table->n_fields; i++) {
9467 struct field *f = table->fields[i].field;
9469 if (f->offset < first->offset)
9472 if (f->offset > last->offset)
9476 /* Key offset and size. */
9477 key_offset = first->offset / 8;
9478 key_size = (last->offset + last->n_bits - first->offset) / 8;
9480 /* Memory allocation. */
9481 key_mask = calloc(1, key_size);
9488 for (i = 0; i < table->n_fields; i++) {
9489 struct field *f = table->fields[i].field;
9490 uint32_t start = (f->offset - first->offset) / 8;
9491 size_t size = f->n_bits / 8;
9493 memset(&key_mask[start], 0xFF, size);
9496 /* Action data size. */
9497 action_data_size = 0;
9498 for (i = 0; i < table->n_actions; i++) {
9499 struct action *action = table->actions[i];
9500 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
9502 if (ads > action_data_size)
9503 action_data_size = ads;
9507 params->match_type = table->type->match_type;
9508 params->key_size = key_size;
9509 params->key_offset = key_offset;
9510 params->key_mask0 = key_mask;
9511 params->action_data_size = action_data_size;
9512 params->n_keys_max = table->size;
9518 table_params_free(struct rte_swx_table_params *params)
9523 free(params->key_mask0);
9528 table_stub_lkp(void *table __rte_unused,
9529 void *mailbox __rte_unused,
9530 uint8_t **key __rte_unused,
9531 uint64_t *action_id __rte_unused,
9532 uint8_t **action_data __rte_unused,
9536 return 1; /* DONE. */
9540 table_build(struct rte_swx_pipeline *p)
9544 /* Per pipeline: table statistics. */
9545 p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics));
9546 CHECK(p->table_stats, ENOMEM);
9548 for (i = 0; i < p->n_tables; i++) {
9549 p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t));
9550 CHECK(p->table_stats[i].n_pkts_action, ENOMEM);
9553 /* Per thread: table runt-time. */
9554 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9555 struct thread *t = &p->threads[i];
9556 struct table *table;
9558 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
9559 CHECK(t->tables, ENOMEM);
9561 TAILQ_FOREACH(table, &p->tables, node) {
9562 struct table_runtime *r = &t->tables[table->id];
9567 size = table->type->ops.mailbox_size_get();
9570 r->func = table->type->ops.lkp;
9574 r->mailbox = calloc(1, size);
9575 CHECK(r->mailbox, ENOMEM);
9579 r->key = table->header ?
9580 &t->structs[table->header->struct_id] :
9581 &t->structs[p->metadata_struct_id];
9583 r->func = table_stub_lkp;
9592 table_build_free(struct rte_swx_pipeline *p)
9596 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9597 struct thread *t = &p->threads[i];
9603 for (j = 0; j < p->n_tables; j++) {
9604 struct table_runtime *r = &t->tables[j];
9613 if (p->table_stats) {
9614 for (i = 0; i < p->n_tables; i++)
9615 free(p->table_stats[i].n_pkts_action);
9617 free(p->table_stats);
9622 table_free(struct rte_swx_pipeline *p)
9624 table_build_free(p);
9630 elem = TAILQ_FIRST(&p->tables);
9634 TAILQ_REMOVE(&p->tables, elem, node);
9636 free(elem->actions);
9637 free(elem->default_action_data);
9643 struct table_type *elem;
9645 elem = TAILQ_FIRST(&p->table_types);
9649 TAILQ_REMOVE(&p->table_types, elem, node);
9657 static struct selector *
9658 selector_find(struct rte_swx_pipeline *p, const char *name)
9662 TAILQ_FOREACH(s, &p->selectors, node)
9663 if (strcmp(s->name, name) == 0)
9669 static struct selector *
9670 selector_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9672 struct selector *s = NULL;
9674 TAILQ_FOREACH(s, &p->selectors, node)
9682 selector_fields_check(struct rte_swx_pipeline *p,
9683 struct rte_swx_pipeline_selector_params *params,
9684 struct header **header)
9686 struct header *h0 = NULL;
9687 struct field *hf, *mf;
9690 /* Return if no selector fields. */
9691 if (!params->n_selector_fields || !params->selector_field_names)
9694 /* Check that all the selector fields either belong to the same header
9695 * or are all meta-data fields.
9697 hf = header_field_parse(p, params->selector_field_names[0], &h0);
9698 mf = metadata_field_parse(p, params->selector_field_names[0]);
9702 for (i = 1; i < params->n_selector_fields; i++)
9706 hf = header_field_parse(p, params->selector_field_names[i], &h);
9707 if (!hf || (h->id != h0->id))
9710 mf = metadata_field_parse(p, params->selector_field_names[i]);
9715 /* Check that there are no duplicated match fields. */
9716 for (i = 0; i < params->n_selector_fields; i++) {
9717 const char *field_name = params->selector_field_names[i];
9720 for (j = i + 1; j < params->n_selector_fields; j++)
9721 if (!strcmp(params->selector_field_names[j], field_name))
9733 rte_swx_pipeline_selector_config(struct rte_swx_pipeline *p,
9735 struct rte_swx_pipeline_selector_params *params)
9738 struct header *selector_header = NULL;
9739 struct field *group_id_field, *member_id_field;
9745 CHECK_NAME(name, EINVAL);
9746 CHECK(!table_find(p, name), EEXIST);
9747 CHECK(!selector_find(p, name), EEXIST);
9749 CHECK(params, EINVAL);
9751 CHECK_NAME(params->group_id_field_name, EINVAL);
9752 group_id_field = metadata_field_parse(p, params->group_id_field_name);
9753 CHECK(group_id_field, EINVAL);
9755 for (i = 0; i < params->n_selector_fields; i++) {
9756 const char *field_name = params->selector_field_names[i];
9758 CHECK_NAME(field_name, EINVAL);
9760 status = selector_fields_check(p, params, &selector_header);
9764 CHECK_NAME(params->member_id_field_name, EINVAL);
9765 member_id_field = metadata_field_parse(p, params->member_id_field_name);
9766 CHECK(member_id_field, EINVAL);
9768 CHECK(params->n_groups_max, EINVAL);
9770 CHECK(params->n_members_per_group_max, EINVAL);
9772 /* Memory allocation. */
9773 s = calloc(1, sizeof(struct selector));
9779 s->selector_fields = calloc(params->n_selector_fields, sizeof(struct field *));
9780 if (!s->selector_fields) {
9785 /* Node initialization. */
9786 strcpy(s->name, name);
9788 s->group_id_field = group_id_field;
9790 for (i = 0; i < params->n_selector_fields; i++) {
9791 const char *field_name = params->selector_field_names[i];
9793 s->selector_fields[i] = selector_header ?
9794 header_field_parse(p, field_name, NULL) :
9795 metadata_field_parse(p, field_name);
9798 s->n_selector_fields = params->n_selector_fields;
9800 s->selector_header = selector_header;
9802 s->member_id_field = member_id_field;
9804 s->n_groups_max = params->n_groups_max;
9806 s->n_members_per_group_max = params->n_members_per_group_max;
9808 s->id = p->n_selectors;
9810 /* Node add to tailq. */
9811 TAILQ_INSERT_TAIL(&p->selectors, s, node);
9820 free(s->selector_fields);
9828 selector_params_free(struct rte_swx_table_selector_params *params)
9833 free(params->selector_mask);
9838 static struct rte_swx_table_selector_params *
9839 selector_table_params_get(struct selector *s)
9841 struct rte_swx_table_selector_params *params = NULL;
9842 struct field *first, *last;
9845 /* Memory allocation. */
9846 params = calloc(1, sizeof(struct rte_swx_table_selector_params));
9851 params->group_id_offset = s->group_id_field->offset / 8;
9853 /* Find first (smallest offset) and last (biggest offset) selector fields. */
9854 first = s->selector_fields[0];
9855 last = s->selector_fields[0];
9857 for (i = 0; i < s->n_selector_fields; i++) {
9858 struct field *f = s->selector_fields[i];
9860 if (f->offset < first->offset)
9863 if (f->offset > last->offset)
9867 /* Selector offset and size. */
9868 params->selector_offset = first->offset / 8;
9869 params->selector_size = (last->offset + last->n_bits - first->offset) / 8;
9871 /* Memory allocation. */
9872 params->selector_mask = calloc(1, params->selector_size);
9873 if (!params->selector_mask)
9876 /* Selector mask. */
9877 for (i = 0; i < s->n_selector_fields; i++) {
9878 struct field *f = s->selector_fields[i];
9879 uint32_t start = (f->offset - first->offset) / 8;
9880 size_t size = f->n_bits / 8;
9882 memset(¶ms->selector_mask[start], 0xFF, size);
9886 params->member_id_offset = s->member_id_field->offset / 8;
9888 /* Maximum number of groups. */
9889 params->n_groups_max = s->n_groups_max;
9891 /* Maximum number of members per group. */
9892 params->n_members_per_group_max = s->n_members_per_group_max;
9897 selector_params_free(params);
9902 selector_build_free(struct rte_swx_pipeline *p)
9906 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9907 struct thread *t = &p->threads[i];
9913 for (j = 0; j < p->n_selectors; j++) {
9914 struct selector_runtime *r = &t->selectors[j];
9920 t->selectors = NULL;
9923 free(p->selector_stats);
9924 p->selector_stats = NULL;
9928 selector_build(struct rte_swx_pipeline *p)
9933 /* Per pipeline: selector statistics. */
9934 p->selector_stats = calloc(p->n_selectors, sizeof(struct selector_statistics));
9935 if (!p->selector_stats) {
9940 /* Per thread: selector run-time. */
9941 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9942 struct thread *t = &p->threads[i];
9945 t->selectors = calloc(p->n_selectors, sizeof(struct selector_runtime));
9946 if (!t->selectors) {
9951 TAILQ_FOREACH(s, &p->selectors, node) {
9952 struct selector_runtime *r = &t->selectors[s->id];
9956 size = rte_swx_table_selector_mailbox_size_get();
9958 r->mailbox = calloc(1, size);
9965 /* r->group_id_buffer. */
9966 r->group_id_buffer = &t->structs[p->metadata_struct_id];
9968 /* r->selector_buffer. */
9969 r->selector_buffer = s->selector_header ?
9970 &t->structs[s->selector_header->struct_id] :
9971 &t->structs[p->metadata_struct_id];
9973 /* r->member_id_buffer. */
9974 r->member_id_buffer = &t->structs[p->metadata_struct_id];
9981 selector_build_free(p);
9986 selector_free(struct rte_swx_pipeline *p)
9988 selector_build_free(p);
9990 /* Selector tables. */
9992 struct selector *elem;
9994 elem = TAILQ_FIRST(&p->selectors);
9998 TAILQ_REMOVE(&p->selectors, elem, node);
9999 free(elem->selector_fields);
10008 table_state_build(struct rte_swx_pipeline *p)
10010 struct table *table;
10011 struct selector *s;
10013 p->table_state = calloc(p->n_tables + p->n_selectors,
10014 sizeof(struct rte_swx_table_state));
10015 CHECK(p->table_state, ENOMEM);
10017 TAILQ_FOREACH(table, &p->tables, node) {
10018 struct rte_swx_table_state *ts = &p->table_state[table->id];
10021 struct rte_swx_table_params *params;
10024 params = table_params_get(table);
10025 CHECK(params, ENOMEM);
10027 ts->obj = table->type->ops.create(params,
10032 table_params_free(params);
10033 CHECK(ts->obj, ENODEV);
10036 /* ts->default_action_data. */
10037 if (table->action_data_size_max) {
10038 ts->default_action_data =
10039 malloc(table->action_data_size_max);
10040 CHECK(ts->default_action_data, ENOMEM);
10042 memcpy(ts->default_action_data,
10043 table->default_action_data,
10044 table->action_data_size_max);
10047 /* ts->default_action_id. */
10048 ts->default_action_id = table->default_action->id;
10051 TAILQ_FOREACH(s, &p->selectors, node) {
10052 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + s->id];
10053 struct rte_swx_table_selector_params *params;
10056 params = selector_table_params_get(s);
10057 CHECK(params, ENOMEM);
10059 ts->obj = rte_swx_table_selector_create(params, NULL, p->numa_node);
10061 selector_params_free(params);
10062 CHECK(ts->obj, ENODEV);
10069 table_state_build_free(struct rte_swx_pipeline *p)
10073 if (!p->table_state)
10076 for (i = 0; i < p->n_tables; i++) {
10077 struct rte_swx_table_state *ts = &p->table_state[i];
10078 struct table *table = table_find_by_id(p, i);
10081 if (table->type && ts->obj)
10082 table->type->ops.free(ts->obj);
10084 /* ts->default_action_data. */
10085 free(ts->default_action_data);
10088 for (i = 0; i < p->n_selectors; i++) {
10089 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + i];
10093 rte_swx_table_selector_free(ts->obj);
10096 free(p->table_state);
10097 p->table_state = NULL;
10101 table_state_free(struct rte_swx_pipeline *p)
10103 table_state_build_free(p);
10109 static struct regarray *
10110 regarray_find(struct rte_swx_pipeline *p, const char *name)
10112 struct regarray *elem;
10114 TAILQ_FOREACH(elem, &p->regarrays, node)
10115 if (!strcmp(elem->name, name))
10121 static struct regarray *
10122 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10124 struct regarray *elem = NULL;
10126 TAILQ_FOREACH(elem, &p->regarrays, node)
10127 if (elem->id == id)
10134 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p,
10139 struct regarray *r;
10143 CHECK_NAME(name, EINVAL);
10144 CHECK(!regarray_find(p, name), EEXIST);
10146 CHECK(size, EINVAL);
10147 size = rte_align32pow2(size);
10149 /* Memory allocation. */
10150 r = calloc(1, sizeof(struct regarray));
10153 /* Node initialization. */
10154 strcpy(r->name, name);
10155 r->init_val = init_val;
10157 r->id = p->n_regarrays;
10159 /* Node add to tailq. */
10160 TAILQ_INSERT_TAIL(&p->regarrays, r, node);
10167 regarray_build(struct rte_swx_pipeline *p)
10169 struct regarray *regarray;
10171 if (!p->n_regarrays)
10174 p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime));
10175 CHECK(p->regarray_runtime, ENOMEM);
10177 TAILQ_FOREACH(regarray, &p->regarrays, node) {
10178 struct regarray_runtime *r = &p->regarray_runtime[regarray->id];
10181 r->regarray = env_malloc(regarray->size * sizeof(uint64_t),
10182 RTE_CACHE_LINE_SIZE,
10184 CHECK(r->regarray, ENOMEM);
10186 if (regarray->init_val)
10187 for (i = 0; i < regarray->size; i++)
10188 r->regarray[i] = regarray->init_val;
10190 r->size_mask = regarray->size - 1;
10197 regarray_build_free(struct rte_swx_pipeline *p)
10201 if (!p->regarray_runtime)
10204 for (i = 0; i < p->n_regarrays; i++) {
10205 struct regarray *regarray = regarray_find_by_id(p, i);
10206 struct regarray_runtime *r = &p->regarray_runtime[i];
10208 env_free(r->regarray, regarray->size * sizeof(uint64_t));
10211 free(p->regarray_runtime);
10212 p->regarray_runtime = NULL;
10216 regarray_free(struct rte_swx_pipeline *p)
10218 regarray_build_free(p);
10221 struct regarray *elem;
10223 elem = TAILQ_FIRST(&p->regarrays);
10227 TAILQ_REMOVE(&p->regarrays, elem, node);
10235 static struct meter_profile *
10236 meter_profile_find(struct rte_swx_pipeline *p, const char *name)
10238 struct meter_profile *elem;
10240 TAILQ_FOREACH(elem, &p->meter_profiles, node)
10241 if (!strcmp(elem->name, name))
10247 static struct metarray *
10248 metarray_find(struct rte_swx_pipeline *p, const char *name)
10250 struct metarray *elem;
10252 TAILQ_FOREACH(elem, &p->metarrays, node)
10253 if (!strcmp(elem->name, name))
10259 static struct metarray *
10260 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10262 struct metarray *elem = NULL;
10264 TAILQ_FOREACH(elem, &p->metarrays, node)
10265 if (elem->id == id)
10272 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p,
10276 struct metarray *m;
10280 CHECK_NAME(name, EINVAL);
10281 CHECK(!metarray_find(p, name), EEXIST);
10283 CHECK(size, EINVAL);
10284 size = rte_align32pow2(size);
10286 /* Memory allocation. */
10287 m = calloc(1, sizeof(struct metarray));
10290 /* Node initialization. */
10291 strcpy(m->name, name);
10293 m->id = p->n_metarrays;
10295 /* Node add to tailq. */
10296 TAILQ_INSERT_TAIL(&p->metarrays, m, node);
10302 struct meter_profile meter_profile_default = {
10311 .cir_bytes_per_period = 1,
10313 .pir_bytes_per_period = 1,
10320 meter_init(struct meter *m)
10322 memset(m, 0, sizeof(struct meter));
10323 rte_meter_trtcm_config(&m->m, &meter_profile_default.profile);
10324 m->profile = &meter_profile_default;
10325 m->color_mask = RTE_COLOR_GREEN;
10327 meter_profile_default.n_users++;
10331 metarray_build(struct rte_swx_pipeline *p)
10333 struct metarray *m;
10335 if (!p->n_metarrays)
10338 p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime));
10339 CHECK(p->metarray_runtime, ENOMEM);
10341 TAILQ_FOREACH(m, &p->metarrays, node) {
10342 struct metarray_runtime *r = &p->metarray_runtime[m->id];
10345 r->metarray = env_malloc(m->size * sizeof(struct meter),
10346 RTE_CACHE_LINE_SIZE,
10348 CHECK(r->metarray, ENOMEM);
10350 for (i = 0; i < m->size; i++)
10351 meter_init(&r->metarray[i]);
10353 r->size_mask = m->size - 1;
10360 metarray_build_free(struct rte_swx_pipeline *p)
10364 if (!p->metarray_runtime)
10367 for (i = 0; i < p->n_metarrays; i++) {
10368 struct metarray *m = metarray_find_by_id(p, i);
10369 struct metarray_runtime *r = &p->metarray_runtime[i];
10371 env_free(r->metarray, m->size * sizeof(struct meter));
10374 free(p->metarray_runtime);
10375 p->metarray_runtime = NULL;
10379 metarray_free(struct rte_swx_pipeline *p)
10381 metarray_build_free(p);
10383 /* Meter arrays. */
10385 struct metarray *elem;
10387 elem = TAILQ_FIRST(&p->metarrays);
10391 TAILQ_REMOVE(&p->metarrays, elem, node);
10395 /* Meter profiles. */
10397 struct meter_profile *elem;
10399 elem = TAILQ_FIRST(&p->meter_profiles);
10403 TAILQ_REMOVE(&p->meter_profiles, elem, node);
10412 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
10414 struct rte_swx_pipeline *pipeline;
10416 /* Check input parameters. */
10419 /* Memory allocation. */
10420 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
10421 CHECK(pipeline, ENOMEM);
10423 /* Initialization. */
10424 TAILQ_INIT(&pipeline->struct_types);
10425 TAILQ_INIT(&pipeline->port_in_types);
10426 TAILQ_INIT(&pipeline->ports_in);
10427 TAILQ_INIT(&pipeline->port_out_types);
10428 TAILQ_INIT(&pipeline->ports_out);
10429 TAILQ_INIT(&pipeline->extern_types);
10430 TAILQ_INIT(&pipeline->extern_objs);
10431 TAILQ_INIT(&pipeline->extern_funcs);
10432 TAILQ_INIT(&pipeline->headers);
10433 TAILQ_INIT(&pipeline->actions);
10434 TAILQ_INIT(&pipeline->table_types);
10435 TAILQ_INIT(&pipeline->tables);
10436 TAILQ_INIT(&pipeline->selectors);
10437 TAILQ_INIT(&pipeline->regarrays);
10438 TAILQ_INIT(&pipeline->meter_profiles);
10439 TAILQ_INIT(&pipeline->metarrays);
10441 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
10442 pipeline->numa_node = numa_node;
10449 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
10454 free(p->instructions);
10458 table_state_free(p);
10464 extern_func_free(p);
10465 extern_obj_free(p);
10474 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
10475 const char **instructions,
10476 uint32_t n_instructions)
10481 err = instruction_config(p, NULL, instructions, n_instructions);
10485 /* Thread instruction pointer reset. */
10486 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10487 struct thread *t = &p->threads[i];
10489 thread_ip_reset(p, t);
10496 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
10501 CHECK(p->build_done == 0, EEXIST);
10503 status = port_in_build(p);
10507 status = port_out_build(p);
10511 status = struct_build(p);
10515 status = extern_obj_build(p);
10519 status = extern_func_build(p);
10523 status = header_build(p);
10527 status = metadata_build(p);
10531 status = action_build(p);
10535 status = table_build(p);
10539 status = selector_build(p);
10543 status = table_state_build(p);
10547 status = regarray_build(p);
10551 status = metarray_build(p);
10559 metarray_build_free(p);
10560 regarray_build_free(p);
10561 table_state_build_free(p);
10562 selector_build_free(p);
10563 table_build_free(p);
10564 action_build_free(p);
10565 metadata_build_free(p);
10566 header_build_free(p);
10567 extern_func_build_free(p);
10568 extern_obj_build_free(p);
10569 port_out_build_free(p);
10570 port_in_build_free(p);
10571 struct_build_free(p);
10577 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
10581 for (i = 0; i < n_instructions; i++)
10586 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
10590 for (i = 0; i < p->n_ports_out; i++) {
10591 struct port_out_runtime *port = &p->out[i];
10594 port->flush(port->obj);
10602 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
10603 struct rte_swx_ctl_pipeline_info *pipeline)
10605 struct action *action;
10606 struct table *table;
10607 uint32_t n_actions = 0, n_tables = 0;
10609 if (!p || !pipeline)
10612 TAILQ_FOREACH(action, &p->actions, node)
10615 TAILQ_FOREACH(table, &p->tables, node)
10618 pipeline->n_ports_in = p->n_ports_in;
10619 pipeline->n_ports_out = p->n_ports_out;
10620 pipeline->n_actions = n_actions;
10621 pipeline->n_tables = n_tables;
10622 pipeline->n_selectors = p->n_selectors;
10623 pipeline->n_regarrays = p->n_regarrays;
10624 pipeline->n_metarrays = p->n_metarrays;
10630 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
10632 if (!p || !numa_node)
10635 *numa_node = p->numa_node;
10640 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
10641 uint32_t action_id,
10642 struct rte_swx_ctl_action_info *action)
10644 struct action *a = NULL;
10646 if (!p || (action_id >= p->n_actions) || !action)
10649 a = action_find_by_id(p, action_id);
10653 strcpy(action->name, a->name);
10654 action->n_args = a->st ? a->st->n_fields : 0;
10659 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
10660 uint32_t action_id,
10661 uint32_t action_arg_id,
10662 struct rte_swx_ctl_action_arg_info *action_arg)
10664 struct action *a = NULL;
10665 struct field *arg = NULL;
10667 if (!p || (action_id >= p->n_actions) || !action_arg)
10670 a = action_find_by_id(p, action_id);
10671 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
10674 arg = &a->st->fields[action_arg_id];
10675 strcpy(action_arg->name, arg->name);
10676 action_arg->n_bits = arg->n_bits;
10677 action_arg->is_network_byte_order = a->args_endianness[action_arg_id];
10683 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
10685 struct rte_swx_ctl_table_info *table)
10687 struct table *t = NULL;
10692 t = table_find_by_id(p, table_id);
10696 strcpy(table->name, t->name);
10697 strcpy(table->args, t->args);
10698 table->n_match_fields = t->n_fields;
10699 table->n_actions = t->n_actions;
10700 table->default_action_is_const = t->default_action_is_const;
10701 table->size = t->size;
10706 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
10708 uint32_t match_field_id,
10709 struct rte_swx_ctl_table_match_field_info *match_field)
10712 struct match_field *f;
10714 if (!p || (table_id >= p->n_tables) || !match_field)
10717 t = table_find_by_id(p, table_id);
10718 if (!t || (match_field_id >= t->n_fields))
10721 f = &t->fields[match_field_id];
10722 match_field->match_type = f->match_type;
10723 match_field->is_header = t->header ? 1 : 0;
10724 match_field->n_bits = f->field->n_bits;
10725 match_field->offset = f->field->offset;
10731 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
10733 uint32_t table_action_id,
10734 struct rte_swx_ctl_table_action_info *table_action)
10738 if (!p || (table_id >= p->n_tables) || !table_action)
10741 t = table_find_by_id(p, table_id);
10742 if (!t || (table_action_id >= t->n_actions))
10745 table_action->action_id = t->actions[table_action_id]->id;
10751 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
10753 struct rte_swx_table_ops *table_ops,
10758 if (!p || (table_id >= p->n_tables))
10761 t = table_find_by_id(p, table_id);
10767 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
10777 rte_swx_ctl_selector_info_get(struct rte_swx_pipeline *p,
10778 uint32_t selector_id,
10779 struct rte_swx_ctl_selector_info *selector)
10781 struct selector *s = NULL;
10783 if (!p || !selector)
10786 s = selector_find_by_id(p, selector_id);
10790 strcpy(selector->name, s->name);
10792 selector->n_selector_fields = s->n_selector_fields;
10793 selector->n_groups_max = s->n_groups_max;
10794 selector->n_members_per_group_max = s->n_members_per_group_max;
10800 rte_swx_ctl_selector_group_id_field_info_get(struct rte_swx_pipeline *p,
10801 uint32_t selector_id,
10802 struct rte_swx_ctl_table_match_field_info *field)
10804 struct selector *s;
10806 if (!p || (selector_id >= p->n_selectors) || !field)
10809 s = selector_find_by_id(p, selector_id);
10813 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10814 field->is_header = 0;
10815 field->n_bits = s->group_id_field->n_bits;
10816 field->offset = s->group_id_field->offset;
10822 rte_swx_ctl_selector_field_info_get(struct rte_swx_pipeline *p,
10823 uint32_t selector_id,
10824 uint32_t selector_field_id,
10825 struct rte_swx_ctl_table_match_field_info *field)
10827 struct selector *s;
10830 if (!p || (selector_id >= p->n_selectors) || !field)
10833 s = selector_find_by_id(p, selector_id);
10834 if (!s || (selector_field_id >= s->n_selector_fields))
10837 f = s->selector_fields[selector_field_id];
10838 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10839 field->is_header = s->selector_header ? 1 : 0;
10840 field->n_bits = f->n_bits;
10841 field->offset = f->offset;
10847 rte_swx_ctl_selector_member_id_field_info_get(struct rte_swx_pipeline *p,
10848 uint32_t selector_id,
10849 struct rte_swx_ctl_table_match_field_info *field)
10851 struct selector *s;
10853 if (!p || (selector_id >= p->n_selectors) || !field)
10856 s = selector_find_by_id(p, selector_id);
10860 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10861 field->is_header = 0;
10862 field->n_bits = s->member_id_field->n_bits;
10863 field->offset = s->member_id_field->offset;
10869 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
10870 struct rte_swx_table_state **table_state)
10872 if (!p || !table_state || !p->build_done)
10875 *table_state = p->table_state;
10880 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
10881 struct rte_swx_table_state *table_state)
10883 if (!p || !table_state || !p->build_done)
10886 p->table_state = table_state;
10891 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
10893 struct rte_swx_port_in_stats *stats)
10895 struct port_in *port;
10900 port = port_in_find(p, port_id);
10904 port->type->ops.stats_read(port->obj, stats);
10909 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
10911 struct rte_swx_port_out_stats *stats)
10913 struct port_out *port;
10918 port = port_out_find(p, port_id);
10922 port->type->ops.stats_read(port->obj, stats);
10927 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p,
10928 const char *table_name,
10929 struct rte_swx_table_stats *stats)
10931 struct table *table;
10932 struct table_statistics *table_stats;
10934 if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action)
10937 table = table_find(p, table_name);
10941 table_stats = &p->table_stats[table->id];
10943 memcpy(&stats->n_pkts_action,
10944 &table_stats->n_pkts_action,
10945 p->n_actions * sizeof(uint64_t));
10947 stats->n_pkts_hit = table_stats->n_pkts_hit[1];
10948 stats->n_pkts_miss = table_stats->n_pkts_hit[0];
10954 rte_swx_ctl_pipeline_selector_stats_read(struct rte_swx_pipeline *p,
10955 const char *selector_name,
10956 struct rte_swx_pipeline_selector_stats *stats)
10958 struct selector *s;
10960 if (!p || !selector_name || !selector_name[0] || !stats)
10963 s = selector_find(p, selector_name);
10967 stats->n_pkts = p->selector_stats[s->id].n_pkts;
10973 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p,
10974 uint32_t regarray_id,
10975 struct rte_swx_ctl_regarray_info *regarray)
10977 struct regarray *r;
10979 if (!p || !regarray)
10982 r = regarray_find_by_id(p, regarray_id);
10986 strcpy(regarray->name, r->name);
10987 regarray->size = r->size;
10992 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p,
10993 const char *regarray_name,
10994 uint32_t regarray_index,
10997 struct regarray *regarray;
10998 struct regarray_runtime *r;
11000 if (!p || !regarray_name || !value)
11003 regarray = regarray_find(p, regarray_name);
11004 if (!regarray || (regarray_index >= regarray->size))
11007 r = &p->regarray_runtime[regarray->id];
11008 *value = r->regarray[regarray_index];
11013 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p,
11014 const char *regarray_name,
11015 uint32_t regarray_index,
11018 struct regarray *regarray;
11019 struct regarray_runtime *r;
11021 if (!p || !regarray_name)
11024 regarray = regarray_find(p, regarray_name);
11025 if (!regarray || (regarray_index >= regarray->size))
11028 r = &p->regarray_runtime[regarray->id];
11029 r->regarray[regarray_index] = value;
11034 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p,
11035 uint32_t metarray_id,
11036 struct rte_swx_ctl_metarray_info *metarray)
11038 struct metarray *m;
11040 if (!p || !metarray)
11043 m = metarray_find_by_id(p, metarray_id);
11047 strcpy(metarray->name, m->name);
11048 metarray->size = m->size;
11053 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p,
11055 struct rte_meter_trtcm_params *params)
11057 struct meter_profile *mp;
11061 CHECK_NAME(name, EINVAL);
11062 CHECK(params, EINVAL);
11063 CHECK(!meter_profile_find(p, name), EEXIST);
11065 /* Node allocation. */
11066 mp = calloc(1, sizeof(struct meter_profile));
11069 /* Node initialization. */
11070 strcpy(mp->name, name);
11071 memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params));
11072 status = rte_meter_trtcm_profile_config(&mp->profile, params);
11078 /* Node add to tailq. */
11079 TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node);
11085 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p,
11088 struct meter_profile *mp;
11091 CHECK_NAME(name, EINVAL);
11093 mp = meter_profile_find(p, name);
11095 CHECK(!mp->n_users, EBUSY);
11097 /* Remove node from tailq. */
11098 TAILQ_REMOVE(&p->meter_profiles, mp, node);
11105 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p,
11106 const char *metarray_name,
11107 uint32_t metarray_index)
11109 struct meter_profile *mp_old;
11110 struct metarray *metarray;
11111 struct metarray_runtime *metarray_runtime;
11115 CHECK_NAME(metarray_name, EINVAL);
11117 metarray = metarray_find(p, metarray_name);
11118 CHECK(metarray, EINVAL);
11119 CHECK(metarray_index < metarray->size, EINVAL);
11121 metarray_runtime = &p->metarray_runtime[metarray->id];
11122 m = &metarray_runtime->metarray[metarray_index];
11123 mp_old = m->profile;
11133 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p,
11134 const char *metarray_name,
11135 uint32_t metarray_index,
11136 const char *profile_name)
11138 struct meter_profile *mp, *mp_old;
11139 struct metarray *metarray;
11140 struct metarray_runtime *metarray_runtime;
11144 CHECK_NAME(metarray_name, EINVAL);
11146 metarray = metarray_find(p, metarray_name);
11147 CHECK(metarray, EINVAL);
11148 CHECK(metarray_index < metarray->size, EINVAL);
11150 mp = meter_profile_find(p, profile_name);
11153 metarray_runtime = &p->metarray_runtime[metarray->id];
11154 m = &metarray_runtime->metarray[metarray_index];
11155 mp_old = m->profile;
11157 memset(m, 0, sizeof(struct meter));
11158 rte_meter_trtcm_config(&m->m, &mp->profile);
11160 m->color_mask = RTE_COLORS;
11169 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p,
11170 const char *metarray_name,
11171 uint32_t metarray_index,
11172 struct rte_swx_ctl_meter_stats *stats)
11174 struct metarray *metarray;
11175 struct metarray_runtime *metarray_runtime;
11179 CHECK_NAME(metarray_name, EINVAL);
11181 metarray = metarray_find(p, metarray_name);
11182 CHECK(metarray, EINVAL);
11183 CHECK(metarray_index < metarray->size, EINVAL);
11185 CHECK(stats, EINVAL);
11187 metarray_runtime = &p->metarray_runtime[metarray->id];
11188 m = &metarray_runtime->metarray[metarray_index];
11190 memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts));
11191 memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes));