1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <arpa/inet.h>
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15 #include <rte_cycles.h>
16 #include <rte_meter.h>
18 #include <rte_swx_table_selector.h>
20 #include "rte_swx_pipeline.h"
21 #include "rte_swx_ctl.h"
23 #define CHECK(condition, err_code) \
29 #define CHECK_NAME(name, err_code) \
32 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
35 #define CHECK_INSTRUCTION(instr, err_code) \
38 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
39 RTE_SWX_INSTRUCTION_SIZE), \
47 #define TRACE(...) printf(__VA_ARGS__)
55 #define ntoh64(x) rte_be_to_cpu_64(x)
56 #define hton64(x) rte_cpu_to_be_64(x)
58 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE
60 #include <rte_malloc.h>
63 env_malloc(size_t size, size_t alignment, int numa_node)
65 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
69 env_free(void *start, size_t size __rte_unused)
79 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
83 if (numa_available() == -1)
86 start = numa_alloc_onnode(size, numa_node);
90 memset(start, 0, size);
95 env_free(void *start, size_t size)
97 if (numa_available() == -1)
100 numa_free(start, size);
109 char name[RTE_SWX_NAME_SIZE];
115 TAILQ_ENTRY(struct_type) node;
116 char name[RTE_SWX_NAME_SIZE];
117 struct field *fields;
122 TAILQ_HEAD(struct_type_tailq, struct_type);
127 struct port_in_type {
128 TAILQ_ENTRY(port_in_type) node;
129 char name[RTE_SWX_NAME_SIZE];
130 struct rte_swx_port_in_ops ops;
133 TAILQ_HEAD(port_in_type_tailq, port_in_type);
136 TAILQ_ENTRY(port_in) node;
137 struct port_in_type *type;
142 TAILQ_HEAD(port_in_tailq, port_in);
144 struct port_in_runtime {
145 rte_swx_port_in_pkt_rx_t pkt_rx;
152 struct port_out_type {
153 TAILQ_ENTRY(port_out_type) node;
154 char name[RTE_SWX_NAME_SIZE];
155 struct rte_swx_port_out_ops ops;
158 TAILQ_HEAD(port_out_type_tailq, port_out_type);
161 TAILQ_ENTRY(port_out) node;
162 struct port_out_type *type;
167 TAILQ_HEAD(port_out_tailq, port_out);
169 struct port_out_runtime {
170 rte_swx_port_out_pkt_tx_t pkt_tx;
171 rte_swx_port_out_flush_t flush;
178 struct extern_type_member_func {
179 TAILQ_ENTRY(extern_type_member_func) node;
180 char name[RTE_SWX_NAME_SIZE];
181 rte_swx_extern_type_member_func_t func;
185 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
188 TAILQ_ENTRY(extern_type) node;
189 char name[RTE_SWX_NAME_SIZE];
190 struct struct_type *mailbox_struct_type;
191 rte_swx_extern_type_constructor_t constructor;
192 rte_swx_extern_type_destructor_t destructor;
193 struct extern_type_member_func_tailq funcs;
197 TAILQ_HEAD(extern_type_tailq, extern_type);
200 TAILQ_ENTRY(extern_obj) node;
201 char name[RTE_SWX_NAME_SIZE];
202 struct extern_type *type;
208 TAILQ_HEAD(extern_obj_tailq, extern_obj);
210 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
211 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
214 struct extern_obj_runtime {
217 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
224 TAILQ_ENTRY(extern_func) node;
225 char name[RTE_SWX_NAME_SIZE];
226 struct struct_type *mailbox_struct_type;
227 rte_swx_extern_func_t func;
232 TAILQ_HEAD(extern_func_tailq, extern_func);
234 struct extern_func_runtime {
236 rte_swx_extern_func_t func;
243 TAILQ_ENTRY(header) node;
244 char name[RTE_SWX_NAME_SIZE];
245 struct struct_type *st;
250 TAILQ_HEAD(header_tailq, header);
252 struct header_runtime {
257 struct header_out_runtime {
267 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
268 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
269 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
270 * when transferred to packet meta-data and in NBO when transferred to packet
274 /* Notation conventions:
275 * -Header field: H = h.header.field (dst/src)
276 * -Meta-data field: M = m.field (dst/src)
277 * -Extern object mailbox field: E = e.field (dst/src)
278 * -Extern function mailbox field: F = f.field (dst/src)
279 * -Table action data field: T = t.field (src only)
280 * -Immediate value: I = 32-bit unsigned value (src only)
283 enum instruction_type {
290 INSTR_TX, /* port_out = M */
291 INSTR_TX_I, /* port_out = I */
293 /* extract h.header */
314 /* validate h.header */
317 /* invalidate h.header */
318 INSTR_HDR_INVALIDATE,
322 * dst = HMEF, src = HMEFTI
324 INSTR_MOV, /* dst = MEF, src = MEFT */
325 INSTR_MOV_MH, /* dst = MEF, src = H */
326 INSTR_MOV_HM, /* dst = H, src = MEFT */
327 INSTR_MOV_HH, /* dst = H, src = H */
328 INSTR_MOV_I, /* dst = HMEF, src = I */
330 /* dma h.header t.field
331 * memcpy(h.header, t.field, sizeof(h.header))
344 * dst = HMEF, src = HMEFTI
346 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
347 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
348 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
349 INSTR_ALU_ADD_HH, /* dst = H, src = H */
350 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
351 INSTR_ALU_ADD_HI, /* dst = H, src = I */
355 * dst = HMEF, src = HMEFTI
357 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
358 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
359 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
360 INSTR_ALU_SUB_HH, /* dst = H, src = H */
361 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
362 INSTR_ALU_SUB_HI, /* dst = H, src = I */
365 * dst = dst '+ src[0:1] '+ src[2:3] + ...
366 * dst = H, src = {H, h.header}
368 INSTR_ALU_CKADD_FIELD, /* src = H */
369 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
370 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
376 INSTR_ALU_CKSUB_FIELD,
380 * dst = HMEF, src = HMEFTI
382 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
383 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
384 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
385 INSTR_ALU_AND_HH, /* dst = H, src = H */
386 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
390 * dst = HMEF, src = HMEFTI
392 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
393 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
394 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
395 INSTR_ALU_OR_HH, /* dst = H, src = H */
396 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
400 * dst = HMEF, src = HMEFTI
402 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
403 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
404 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
405 INSTR_ALU_XOR_HH, /* dst = H, src = H */
406 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
410 * dst = HMEF, src = HMEFTI
412 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
413 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
414 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
415 INSTR_ALU_SHL_HH, /* dst = H, src = H */
416 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
417 INSTR_ALU_SHL_HI, /* dst = H, src = I */
421 * dst = HMEF, src = HMEFTI
423 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
424 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
425 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
426 INSTR_ALU_SHR_HH, /* dst = H, src = H */
427 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
428 INSTR_ALU_SHR_HI, /* dst = H, src = I */
430 /* regprefetch REGARRAY index
431 * prefetch REGARRAY[index]
434 INSTR_REGPREFETCH_RH, /* index = H */
435 INSTR_REGPREFETCH_RM, /* index = MEFT */
436 INSTR_REGPREFETCH_RI, /* index = I */
438 /* regrd dst REGARRAY index
439 * dst = REGARRAY[index]
440 * dst = HMEF, index = HMEFTI
442 INSTR_REGRD_HRH, /* dst = H, index = H */
443 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
444 INSTR_REGRD_HRI, /* dst = H, index = I */
445 INSTR_REGRD_MRH, /* dst = MEF, index = H */
446 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
447 INSTR_REGRD_MRI, /* dst = MEF, index = I */
449 /* regwr REGARRAY index src
450 * REGARRAY[index] = src
451 * index = HMEFTI, src = HMEFTI
453 INSTR_REGWR_RHH, /* index = H, src = H */
454 INSTR_REGWR_RHM, /* index = H, src = MEFT */
455 INSTR_REGWR_RHI, /* index = H, src = I */
456 INSTR_REGWR_RMH, /* index = MEFT, src = H */
457 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
458 INSTR_REGWR_RMI, /* index = MEFT, src = I */
459 INSTR_REGWR_RIH, /* index = I, src = H */
460 INSTR_REGWR_RIM, /* index = I, src = MEFT */
461 INSTR_REGWR_RII, /* index = I, src = I */
463 /* regadd REGARRAY index src
464 * REGARRAY[index] += src
465 * index = HMEFTI, src = HMEFTI
467 INSTR_REGADD_RHH, /* index = H, src = H */
468 INSTR_REGADD_RHM, /* index = H, src = MEFT */
469 INSTR_REGADD_RHI, /* index = H, src = I */
470 INSTR_REGADD_RMH, /* index = MEFT, src = H */
471 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
472 INSTR_REGADD_RMI, /* index = MEFT, src = I */
473 INSTR_REGADD_RIH, /* index = I, src = H */
474 INSTR_REGADD_RIM, /* index = I, src = MEFT */
475 INSTR_REGADD_RII, /* index = I, src = I */
477 /* metprefetch METARRAY index
478 * prefetch METARRAY[index]
481 INSTR_METPREFETCH_H, /* index = H */
482 INSTR_METPREFETCH_M, /* index = MEFT */
483 INSTR_METPREFETCH_I, /* index = I */
485 /* meter METARRAY index length color_in color_out
486 * color_out = meter(METARRAY[index], length, color_in)
487 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
489 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
490 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
491 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
492 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
493 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
494 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
495 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
496 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
497 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
498 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
499 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
500 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
506 /* extern e.obj.func */
517 /* jmpv LABEL h.header
518 * Jump if header is valid
522 /* jmpnv LABEL h.header
523 * Jump if header is invalid
528 * Jump if table lookup hit
533 * Jump if table lookup miss
540 INSTR_JMP_ACTION_HIT,
542 /* jmpna LABEL ACTION
543 * Jump if action not run
545 INSTR_JMP_ACTION_MISS,
548 * Jump if a is equal to b
549 * a = HMEFT, b = HMEFTI
551 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
552 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
553 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
554 INSTR_JMP_EQ_HH, /* a = H, b = H */
555 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
558 * Jump if a is not equal to b
559 * a = HMEFT, b = HMEFTI
561 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
562 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
563 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
564 INSTR_JMP_NEQ_HH, /* a = H, b = H */
565 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
568 * Jump if a is less than b
569 * a = HMEFT, b = HMEFTI
571 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
572 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
573 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
574 INSTR_JMP_LT_HH, /* a = H, b = H */
575 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
576 INSTR_JMP_LT_HI, /* a = H, b = I */
579 * Jump if a is greater than b
580 * a = HMEFT, b = HMEFTI
582 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
583 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
584 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
585 INSTR_JMP_GT_HH, /* a = H, b = H */
586 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
587 INSTR_JMP_GT_HI, /* a = H, b = I */
595 struct instr_operand {
616 uint8_t header_id[8];
617 uint8_t struct_id[8];
622 struct instr_hdr_validity {
630 struct instr_extern_obj {
635 struct instr_extern_func {
639 struct instr_dst_src {
640 struct instr_operand dst;
642 struct instr_operand src;
647 struct instr_regarray {
652 struct instr_operand idx;
657 struct instr_operand dstsrc;
667 struct instr_operand idx;
671 struct instr_operand length;
674 struct instr_operand color_in;
675 uint32_t color_in_val;
678 struct instr_operand color_out;
683 uint8_t header_id[8];
684 uint8_t struct_id[8];
695 struct instruction *ip;
698 struct instr_operand a;
704 struct instr_operand b;
710 enum instruction_type type;
713 struct instr_hdr_validity valid;
714 struct instr_dst_src mov;
715 struct instr_regarray regarray;
716 struct instr_meter meter;
717 struct instr_dma dma;
718 struct instr_dst_src alu;
719 struct instr_table table;
720 struct instr_extern_obj ext_obj;
721 struct instr_extern_func ext_func;
722 struct instr_jmp jmp;
726 struct instruction_data {
727 char label[RTE_SWX_NAME_SIZE];
728 char jmp_label[RTE_SWX_NAME_SIZE];
729 uint32_t n_users; /* user = jmp instruction to this instruction. */
737 TAILQ_ENTRY(action) node;
738 char name[RTE_SWX_NAME_SIZE];
739 struct struct_type *st;
740 int *args_endianness; /* 0 = Host Byte Order (HBO). */
741 struct instruction *instructions;
742 uint32_t n_instructions;
746 TAILQ_HEAD(action_tailq, action);
752 TAILQ_ENTRY(table_type) node;
753 char name[RTE_SWX_NAME_SIZE];
754 enum rte_swx_table_match_type match_type;
755 struct rte_swx_table_ops ops;
758 TAILQ_HEAD(table_type_tailq, table_type);
761 enum rte_swx_table_match_type match_type;
766 TAILQ_ENTRY(table) node;
767 char name[RTE_SWX_NAME_SIZE];
768 char args[RTE_SWX_NAME_SIZE];
769 struct table_type *type; /* NULL when n_fields == 0. */
772 struct match_field *fields;
774 struct header *header; /* Only valid when n_fields > 0. */
777 struct action **actions;
778 struct action *default_action;
779 uint8_t *default_action_data;
781 int default_action_is_const;
782 uint32_t action_data_size_max;
788 TAILQ_HEAD(table_tailq, table);
790 struct table_runtime {
791 rte_swx_table_lookup_t func;
796 struct table_statistics {
797 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
798 uint64_t *n_pkts_action;
805 TAILQ_ENTRY(selector) node;
806 char name[RTE_SWX_NAME_SIZE];
808 struct field *group_id_field;
809 struct field **selector_fields;
810 uint32_t n_selector_fields;
811 struct header *selector_header;
812 struct field *member_id_field;
814 uint32_t n_groups_max;
815 uint32_t n_members_per_group_max;
820 TAILQ_HEAD(selector_tailq, selector);
822 struct selector_runtime {
824 uint8_t **group_id_buffer;
825 uint8_t **selector_buffer;
826 uint8_t **member_id_buffer;
829 struct selector_statistics {
837 TAILQ_ENTRY(regarray) node;
838 char name[RTE_SWX_NAME_SIZE];
844 TAILQ_HEAD(regarray_tailq, regarray);
846 struct regarray_runtime {
854 struct meter_profile {
855 TAILQ_ENTRY(meter_profile) node;
856 char name[RTE_SWX_NAME_SIZE];
857 struct rte_meter_trtcm_params params;
858 struct rte_meter_trtcm_profile profile;
862 TAILQ_HEAD(meter_profile_tailq, meter_profile);
865 TAILQ_ENTRY(metarray) node;
866 char name[RTE_SWX_NAME_SIZE];
871 TAILQ_HEAD(metarray_tailq, metarray);
874 struct rte_meter_trtcm m;
875 struct meter_profile *profile;
876 enum rte_color color_mask;
879 uint64_t n_pkts[RTE_COLORS];
880 uint64_t n_bytes[RTE_COLORS];
883 struct metarray_runtime {
884 struct meter *metarray;
893 struct rte_swx_pkt pkt;
899 /* Packet headers. */
900 struct header_runtime *headers; /* Extracted or generated headers. */
901 struct header_out_runtime *headers_out; /* Emitted headers. */
902 uint8_t *header_storage;
903 uint8_t *header_out_storage;
904 uint64_t valid_headers;
905 uint32_t n_headers_out;
907 /* Packet meta-data. */
911 struct table_runtime *tables;
912 struct selector_runtime *selectors;
913 struct rte_swx_table_state *table_state;
915 int hit; /* 0 = Miss, 1 = Hit. */
917 /* Extern objects and functions. */
918 struct extern_obj_runtime *extern_objs;
919 struct extern_func_runtime *extern_funcs;
922 struct instruction *ip;
923 struct instruction *ret;
926 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
927 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
928 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
930 #define HEADER_VALID(thread, header_id) \
931 MASK64_BIT_GET((thread)->valid_headers, header_id)
933 #define ALU(thread, ip, operator) \
935 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
936 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
937 uint64_t dst64 = *dst64_ptr; \
938 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
939 uint64_t dst = dst64 & dst64_mask; \
941 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
942 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
943 uint64_t src64 = *src64_ptr; \
944 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
945 uint64_t src = src64 & src64_mask; \
947 uint64_t result = dst operator src; \
949 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
952 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
954 #define ALU_MH(thread, ip, operator) \
956 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
957 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
958 uint64_t dst64 = *dst64_ptr; \
959 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
960 uint64_t dst = dst64 & dst64_mask; \
962 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
963 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
964 uint64_t src64 = *src64_ptr; \
965 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
967 uint64_t result = dst operator src; \
969 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
972 #define ALU_HM(thread, ip, operator) \
974 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
975 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
976 uint64_t dst64 = *dst64_ptr; \
977 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
978 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
980 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
981 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
982 uint64_t src64 = *src64_ptr; \
983 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
984 uint64_t src = src64 & src64_mask; \
986 uint64_t result = dst operator src; \
987 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
989 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
992 #define ALU_HM_FAST(thread, ip, operator) \
994 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
995 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
996 uint64_t dst64 = *dst64_ptr; \
997 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
998 uint64_t dst = dst64 & dst64_mask; \
1000 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1001 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1002 uint64_t src64 = *src64_ptr; \
1003 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1004 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1006 uint64_t result = dst operator src; \
1008 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1011 #define ALU_HH(thread, ip, operator) \
1013 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1014 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1015 uint64_t dst64 = *dst64_ptr; \
1016 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1017 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1019 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1020 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1021 uint64_t src64 = *src64_ptr; \
1022 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1024 uint64_t result = dst operator src; \
1025 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1027 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1030 #define ALU_HH_FAST(thread, ip, operator) \
1032 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1033 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1034 uint64_t dst64 = *dst64_ptr; \
1035 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1036 uint64_t dst = dst64 & dst64_mask; \
1038 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1039 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1040 uint64_t src64 = *src64_ptr; \
1041 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1043 uint64_t result = dst operator src; \
1045 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1052 #define ALU_HM_FAST ALU
1054 #define ALU_HH_FAST ALU
1058 #define ALU_I(thread, ip, operator) \
1060 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1061 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1062 uint64_t dst64 = *dst64_ptr; \
1063 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1064 uint64_t dst = dst64 & dst64_mask; \
1066 uint64_t src = (ip)->alu.src_val; \
1068 uint64_t result = dst operator src; \
1070 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1073 #define ALU_MI ALU_I
1075 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1077 #define ALU_HI(thread, ip, operator) \
1079 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1080 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1081 uint64_t dst64 = *dst64_ptr; \
1082 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1083 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1085 uint64_t src = (ip)->alu.src_val; \
1087 uint64_t result = dst operator src; \
1088 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1090 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1095 #define ALU_HI ALU_I
1099 #define MOV(thread, ip) \
1101 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1102 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1103 uint64_t dst64 = *dst64_ptr; \
1104 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1106 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1107 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1108 uint64_t src64 = *src64_ptr; \
1109 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1110 uint64_t src = src64 & src64_mask; \
1112 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1115 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1117 #define MOV_MH(thread, ip) \
1119 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1120 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1121 uint64_t dst64 = *dst64_ptr; \
1122 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1124 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1125 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1126 uint64_t src64 = *src64_ptr; \
1127 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1129 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1132 #define MOV_HM(thread, ip) \
1134 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1135 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1136 uint64_t dst64 = *dst64_ptr; \
1137 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1139 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1140 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1141 uint64_t src64 = *src64_ptr; \
1142 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1143 uint64_t src = src64 & src64_mask; \
1145 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1146 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1149 #define MOV_HH(thread, ip) \
1151 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1152 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1153 uint64_t dst64 = *dst64_ptr; \
1154 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1156 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1157 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1158 uint64_t src64 = *src64_ptr; \
1160 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1161 src = src >> (64 - (ip)->mov.dst.n_bits); \
1162 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1173 #define MOV_I(thread, ip) \
1175 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1176 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1177 uint64_t dst64 = *dst64_ptr; \
1178 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1180 uint64_t src = (ip)->mov.src_val; \
1182 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1185 #define JMP_CMP(thread, ip, operator) \
1187 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1188 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1189 uint64_t a64 = *a64_ptr; \
1190 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1191 uint64_t a = a64 & a64_mask; \
1193 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1194 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1195 uint64_t b64 = *b64_ptr; \
1196 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1197 uint64_t b = b64 & b64_mask; \
1199 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1202 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1204 #define JMP_CMP_MH(thread, ip, operator) \
1206 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1207 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1208 uint64_t a64 = *a64_ptr; \
1209 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1210 uint64_t a = a64 & a64_mask; \
1212 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1213 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1214 uint64_t b64 = *b64_ptr; \
1215 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1217 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1220 #define JMP_CMP_HM(thread, ip, operator) \
1222 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1223 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1224 uint64_t a64 = *a64_ptr; \
1225 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1227 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1228 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1229 uint64_t b64 = *b64_ptr; \
1230 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1231 uint64_t b = b64 & b64_mask; \
1233 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1236 #define JMP_CMP_HH(thread, ip, operator) \
1238 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1239 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1240 uint64_t a64 = *a64_ptr; \
1241 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1243 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1244 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1245 uint64_t b64 = *b64_ptr; \
1246 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1248 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1251 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1253 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1254 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1255 uint64_t a64 = *a64_ptr; \
1256 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1258 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1259 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1260 uint64_t b64 = *b64_ptr; \
1261 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1263 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1268 #define JMP_CMP_MH JMP_CMP
1269 #define JMP_CMP_HM JMP_CMP
1270 #define JMP_CMP_HH JMP_CMP
1271 #define JMP_CMP_HH_FAST JMP_CMP
1275 #define JMP_CMP_I(thread, ip, operator) \
1277 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1278 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1279 uint64_t a64 = *a64_ptr; \
1280 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1281 uint64_t a = a64 & a64_mask; \
1283 uint64_t b = (ip)->jmp.b_val; \
1285 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1288 #define JMP_CMP_MI JMP_CMP_I
1290 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1292 #define JMP_CMP_HI(thread, ip, operator) \
1294 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1295 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1296 uint64_t a64 = *a64_ptr; \
1297 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1299 uint64_t b = (ip)->jmp.b_val; \
1301 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1306 #define JMP_CMP_HI JMP_CMP_I
1310 #define METADATA_READ(thread, offset, n_bits) \
1312 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1313 uint64_t m64 = *m64_ptr; \
1314 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1318 #define METADATA_WRITE(thread, offset, n_bits, value) \
1320 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1321 uint64_t m64 = *m64_ptr; \
1322 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1324 uint64_t m_new = value; \
1326 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1329 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1330 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1333 struct rte_swx_pipeline {
1334 struct struct_type_tailq struct_types;
1335 struct port_in_type_tailq port_in_types;
1336 struct port_in_tailq ports_in;
1337 struct port_out_type_tailq port_out_types;
1338 struct port_out_tailq ports_out;
1339 struct extern_type_tailq extern_types;
1340 struct extern_obj_tailq extern_objs;
1341 struct extern_func_tailq extern_funcs;
1342 struct header_tailq headers;
1343 struct struct_type *metadata_st;
1344 uint32_t metadata_struct_id;
1345 struct action_tailq actions;
1346 struct table_type_tailq table_types;
1347 struct table_tailq tables;
1348 struct selector_tailq selectors;
1349 struct regarray_tailq regarrays;
1350 struct meter_profile_tailq meter_profiles;
1351 struct metarray_tailq metarrays;
1353 struct port_in_runtime *in;
1354 struct port_out_runtime *out;
1355 struct instruction **action_instructions;
1356 struct rte_swx_table_state *table_state;
1357 struct table_statistics *table_stats;
1358 struct selector_statistics *selector_stats;
1359 struct regarray_runtime *regarray_runtime;
1360 struct metarray_runtime *metarray_runtime;
1361 struct instruction *instructions;
1362 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1365 uint32_t n_ports_in;
1366 uint32_t n_ports_out;
1367 uint32_t n_extern_objs;
1368 uint32_t n_extern_funcs;
1371 uint32_t n_selectors;
1372 uint32_t n_regarrays;
1373 uint32_t n_metarrays;
1377 uint32_t n_instructions;
1385 static struct struct_type *
1386 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1388 struct struct_type *elem;
1390 TAILQ_FOREACH(elem, &p->struct_types, node)
1391 if (strcmp(elem->name, name) == 0)
1397 static struct field *
1398 struct_type_field_find(struct struct_type *st, const char *name)
1402 for (i = 0; i < st->n_fields; i++) {
1403 struct field *f = &st->fields[i];
1405 if (strcmp(f->name, name) == 0)
1413 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1415 struct rte_swx_field_params *fields,
1418 struct struct_type *st;
1422 CHECK_NAME(name, EINVAL);
1423 CHECK(fields, EINVAL);
1424 CHECK(n_fields, EINVAL);
1426 for (i = 0; i < n_fields; i++) {
1427 struct rte_swx_field_params *f = &fields[i];
1430 CHECK_NAME(f->name, EINVAL);
1431 CHECK(f->n_bits, EINVAL);
1432 CHECK(f->n_bits <= 64, EINVAL);
1433 CHECK((f->n_bits & 7) == 0, EINVAL);
1435 for (j = 0; j < i; j++) {
1436 struct rte_swx_field_params *f_prev = &fields[j];
1438 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1442 CHECK(!struct_type_find(p, name), EEXIST);
1444 /* Node allocation. */
1445 st = calloc(1, sizeof(struct struct_type));
1448 st->fields = calloc(n_fields, sizeof(struct field));
1454 /* Node initialization. */
1455 strcpy(st->name, name);
1456 for (i = 0; i < n_fields; i++) {
1457 struct field *dst = &st->fields[i];
1458 struct rte_swx_field_params *src = &fields[i];
1460 strcpy(dst->name, src->name);
1461 dst->n_bits = src->n_bits;
1462 dst->offset = st->n_bits;
1464 st->n_bits += src->n_bits;
1466 st->n_fields = n_fields;
1468 /* Node add to tailq. */
1469 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1475 struct_build(struct rte_swx_pipeline *p)
1479 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1480 struct thread *t = &p->threads[i];
1482 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1483 CHECK(t->structs, ENOMEM);
1490 struct_build_free(struct rte_swx_pipeline *p)
1494 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1495 struct thread *t = &p->threads[i];
1503 struct_free(struct rte_swx_pipeline *p)
1505 struct_build_free(p);
1509 struct struct_type *elem;
1511 elem = TAILQ_FIRST(&p->struct_types);
1515 TAILQ_REMOVE(&p->struct_types, elem, node);
1524 static struct port_in_type *
1525 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1527 struct port_in_type *elem;
1532 TAILQ_FOREACH(elem, &p->port_in_types, node)
1533 if (strcmp(elem->name, name) == 0)
1540 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1542 struct rte_swx_port_in_ops *ops)
1544 struct port_in_type *elem;
1547 CHECK_NAME(name, EINVAL);
1549 CHECK(ops->create, EINVAL);
1550 CHECK(ops->free, EINVAL);
1551 CHECK(ops->pkt_rx, EINVAL);
1552 CHECK(ops->stats_read, EINVAL);
1554 CHECK(!port_in_type_find(p, name), EEXIST);
1556 /* Node allocation. */
1557 elem = calloc(1, sizeof(struct port_in_type));
1558 CHECK(elem, ENOMEM);
1560 /* Node initialization. */
1561 strcpy(elem->name, name);
1562 memcpy(&elem->ops, ops, sizeof(*ops));
1564 /* Node add to tailq. */
1565 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1570 static struct port_in *
1571 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1573 struct port_in *port;
1575 TAILQ_FOREACH(port, &p->ports_in, node)
1576 if (port->id == port_id)
1583 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1585 const char *port_type_name,
1588 struct port_in_type *type = NULL;
1589 struct port_in *port = NULL;
1594 CHECK(!port_in_find(p, port_id), EINVAL);
1596 CHECK_NAME(port_type_name, EINVAL);
1597 type = port_in_type_find(p, port_type_name);
1598 CHECK(type, EINVAL);
1600 obj = type->ops.create(args);
1603 /* Node allocation. */
1604 port = calloc(1, sizeof(struct port_in));
1605 CHECK(port, ENOMEM);
1607 /* Node initialization. */
1612 /* Node add to tailq. */
1613 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1614 if (p->n_ports_in < port_id + 1)
1615 p->n_ports_in = port_id + 1;
1621 port_in_build(struct rte_swx_pipeline *p)
1623 struct port_in *port;
1626 CHECK(p->n_ports_in, EINVAL);
1627 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1629 for (i = 0; i < p->n_ports_in; i++)
1630 CHECK(port_in_find(p, i), EINVAL);
1632 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1633 CHECK(p->in, ENOMEM);
1635 TAILQ_FOREACH(port, &p->ports_in, node) {
1636 struct port_in_runtime *in = &p->in[port->id];
1638 in->pkt_rx = port->type->ops.pkt_rx;
1639 in->obj = port->obj;
1646 port_in_build_free(struct rte_swx_pipeline *p)
1653 port_in_free(struct rte_swx_pipeline *p)
1655 port_in_build_free(p);
1659 struct port_in *port;
1661 port = TAILQ_FIRST(&p->ports_in);
1665 TAILQ_REMOVE(&p->ports_in, port, node);
1666 port->type->ops.free(port->obj);
1670 /* Input port types. */
1672 struct port_in_type *elem;
1674 elem = TAILQ_FIRST(&p->port_in_types);
1678 TAILQ_REMOVE(&p->port_in_types, elem, node);
1686 static struct port_out_type *
1687 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1689 struct port_out_type *elem;
1694 TAILQ_FOREACH(elem, &p->port_out_types, node)
1695 if (!strcmp(elem->name, name))
1702 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1704 struct rte_swx_port_out_ops *ops)
1706 struct port_out_type *elem;
1709 CHECK_NAME(name, EINVAL);
1711 CHECK(ops->create, EINVAL);
1712 CHECK(ops->free, EINVAL);
1713 CHECK(ops->pkt_tx, EINVAL);
1714 CHECK(ops->stats_read, EINVAL);
1716 CHECK(!port_out_type_find(p, name), EEXIST);
1718 /* Node allocation. */
1719 elem = calloc(1, sizeof(struct port_out_type));
1720 CHECK(elem, ENOMEM);
1722 /* Node initialization. */
1723 strcpy(elem->name, name);
1724 memcpy(&elem->ops, ops, sizeof(*ops));
1726 /* Node add to tailq. */
1727 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1732 static struct port_out *
1733 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1735 struct port_out *port;
1737 TAILQ_FOREACH(port, &p->ports_out, node)
1738 if (port->id == port_id)
1745 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1747 const char *port_type_name,
1750 struct port_out_type *type = NULL;
1751 struct port_out *port = NULL;
1756 CHECK(!port_out_find(p, port_id), EINVAL);
1758 CHECK_NAME(port_type_name, EINVAL);
1759 type = port_out_type_find(p, port_type_name);
1760 CHECK(type, EINVAL);
1762 obj = type->ops.create(args);
1765 /* Node allocation. */
1766 port = calloc(1, sizeof(struct port_out));
1767 CHECK(port, ENOMEM);
1769 /* Node initialization. */
1774 /* Node add to tailq. */
1775 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1776 if (p->n_ports_out < port_id + 1)
1777 p->n_ports_out = port_id + 1;
1783 port_out_build(struct rte_swx_pipeline *p)
1785 struct port_out *port;
1788 CHECK(p->n_ports_out, EINVAL);
1790 for (i = 0; i < p->n_ports_out; i++)
1791 CHECK(port_out_find(p, i), EINVAL);
1793 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1794 CHECK(p->out, ENOMEM);
1796 TAILQ_FOREACH(port, &p->ports_out, node) {
1797 struct port_out_runtime *out = &p->out[port->id];
1799 out->pkt_tx = port->type->ops.pkt_tx;
1800 out->flush = port->type->ops.flush;
1801 out->obj = port->obj;
1808 port_out_build_free(struct rte_swx_pipeline *p)
1815 port_out_free(struct rte_swx_pipeline *p)
1817 port_out_build_free(p);
1821 struct port_out *port;
1823 port = TAILQ_FIRST(&p->ports_out);
1827 TAILQ_REMOVE(&p->ports_out, port, node);
1828 port->type->ops.free(port->obj);
1832 /* Output port types. */
1834 struct port_out_type *elem;
1836 elem = TAILQ_FIRST(&p->port_out_types);
1840 TAILQ_REMOVE(&p->port_out_types, elem, node);
1848 static struct extern_type *
1849 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1851 struct extern_type *elem;
1853 TAILQ_FOREACH(elem, &p->extern_types, node)
1854 if (strcmp(elem->name, name) == 0)
1860 static struct extern_type_member_func *
1861 extern_type_member_func_find(struct extern_type *type, const char *name)
1863 struct extern_type_member_func *elem;
1865 TAILQ_FOREACH(elem, &type->funcs, node)
1866 if (strcmp(elem->name, name) == 0)
1872 static struct extern_obj *
1873 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1875 struct extern_obj *elem;
1877 TAILQ_FOREACH(elem, &p->extern_objs, node)
1878 if (strcmp(elem->name, name) == 0)
1884 static struct extern_type_member_func *
1885 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1887 struct extern_obj **obj)
1889 struct extern_obj *object;
1890 struct extern_type_member_func *func;
1891 char *object_name, *func_name;
1893 if (name[0] != 'e' || name[1] != '.')
1896 object_name = strdup(&name[2]);
1900 func_name = strchr(object_name, '.');
1909 object = extern_obj_find(p, object_name);
1915 func = extern_type_member_func_find(object->type, func_name);
1928 static struct field *
1929 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1931 struct extern_obj **object)
1933 struct extern_obj *obj;
1935 char *obj_name, *field_name;
1937 if ((name[0] != 'e') || (name[1] != '.'))
1940 obj_name = strdup(&name[2]);
1944 field_name = strchr(obj_name, '.');
1953 obj = extern_obj_find(p, obj_name);
1959 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1973 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1975 const char *mailbox_struct_type_name,
1976 rte_swx_extern_type_constructor_t constructor,
1977 rte_swx_extern_type_destructor_t destructor)
1979 struct extern_type *elem;
1980 struct struct_type *mailbox_struct_type;
1984 CHECK_NAME(name, EINVAL);
1985 CHECK(!extern_type_find(p, name), EEXIST);
1987 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1988 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1989 CHECK(mailbox_struct_type, EINVAL);
1991 CHECK(constructor, EINVAL);
1992 CHECK(destructor, EINVAL);
1994 /* Node allocation. */
1995 elem = calloc(1, sizeof(struct extern_type));
1996 CHECK(elem, ENOMEM);
1998 /* Node initialization. */
1999 strcpy(elem->name, name);
2000 elem->mailbox_struct_type = mailbox_struct_type;
2001 elem->constructor = constructor;
2002 elem->destructor = destructor;
2003 TAILQ_INIT(&elem->funcs);
2005 /* Node add to tailq. */
2006 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
2012 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
2013 const char *extern_type_name,
2015 rte_swx_extern_type_member_func_t member_func)
2017 struct extern_type *type;
2018 struct extern_type_member_func *type_member;
2022 CHECK_NAME(extern_type_name, EINVAL);
2023 type = extern_type_find(p, extern_type_name);
2024 CHECK(type, EINVAL);
2025 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
2027 CHECK_NAME(name, EINVAL);
2028 CHECK(!extern_type_member_func_find(type, name), EEXIST);
2030 CHECK(member_func, EINVAL);
2032 /* Node allocation. */
2033 type_member = calloc(1, sizeof(struct extern_type_member_func));
2034 CHECK(type_member, ENOMEM);
2036 /* Node initialization. */
2037 strcpy(type_member->name, name);
2038 type_member->func = member_func;
2039 type_member->id = type->n_funcs;
2041 /* Node add to tailq. */
2042 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
2049 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
2050 const char *extern_type_name,
2054 struct extern_type *type;
2055 struct extern_obj *obj;
2060 CHECK_NAME(extern_type_name, EINVAL);
2061 type = extern_type_find(p, extern_type_name);
2062 CHECK(type, EINVAL);
2064 CHECK_NAME(name, EINVAL);
2065 CHECK(!extern_obj_find(p, name), EEXIST);
2067 /* Node allocation. */
2068 obj = calloc(1, sizeof(struct extern_obj));
2071 /* Object construction. */
2072 obj_handle = type->constructor(args);
2078 /* Node initialization. */
2079 strcpy(obj->name, name);
2081 obj->obj = obj_handle;
2082 obj->struct_id = p->n_structs;
2083 obj->id = p->n_extern_objs;
2085 /* Node add to tailq. */
2086 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
2094 extern_obj_build(struct rte_swx_pipeline *p)
2098 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2099 struct thread *t = &p->threads[i];
2100 struct extern_obj *obj;
2102 t->extern_objs = calloc(p->n_extern_objs,
2103 sizeof(struct extern_obj_runtime));
2104 CHECK(t->extern_objs, ENOMEM);
2106 TAILQ_FOREACH(obj, &p->extern_objs, node) {
2107 struct extern_obj_runtime *r =
2108 &t->extern_objs[obj->id];
2109 struct extern_type_member_func *func;
2110 uint32_t mailbox_size =
2111 obj->type->mailbox_struct_type->n_bits / 8;
2115 r->mailbox = calloc(1, mailbox_size);
2116 CHECK(r->mailbox, ENOMEM);
2118 TAILQ_FOREACH(func, &obj->type->funcs, node)
2119 r->funcs[func->id] = func->func;
2121 t->structs[obj->struct_id] = r->mailbox;
2129 extern_obj_build_free(struct rte_swx_pipeline *p)
2133 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2134 struct thread *t = &p->threads[i];
2137 if (!t->extern_objs)
2140 for (j = 0; j < p->n_extern_objs; j++) {
2141 struct extern_obj_runtime *r = &t->extern_objs[j];
2146 free(t->extern_objs);
2147 t->extern_objs = NULL;
2152 extern_obj_free(struct rte_swx_pipeline *p)
2154 extern_obj_build_free(p);
2156 /* Extern objects. */
2158 struct extern_obj *elem;
2160 elem = TAILQ_FIRST(&p->extern_objs);
2164 TAILQ_REMOVE(&p->extern_objs, elem, node);
2166 elem->type->destructor(elem->obj);
2172 struct extern_type *elem;
2174 elem = TAILQ_FIRST(&p->extern_types);
2178 TAILQ_REMOVE(&p->extern_types, elem, node);
2181 struct extern_type_member_func *func;
2183 func = TAILQ_FIRST(&elem->funcs);
2187 TAILQ_REMOVE(&elem->funcs, func, node);
2198 static struct extern_func *
2199 extern_func_find(struct rte_swx_pipeline *p, const char *name)
2201 struct extern_func *elem;
2203 TAILQ_FOREACH(elem, &p->extern_funcs, node)
2204 if (strcmp(elem->name, name) == 0)
2210 static struct extern_func *
2211 extern_func_parse(struct rte_swx_pipeline *p,
2214 if (name[0] != 'f' || name[1] != '.')
2217 return extern_func_find(p, &name[2]);
2220 static struct field *
2221 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
2223 struct extern_func **function)
2225 struct extern_func *func;
2227 char *func_name, *field_name;
2229 if ((name[0] != 'f') || (name[1] != '.'))
2232 func_name = strdup(&name[2]);
2236 field_name = strchr(func_name, '.');
2245 func = extern_func_find(p, func_name);
2251 f = struct_type_field_find(func->mailbox_struct_type, field_name);
2265 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
2267 const char *mailbox_struct_type_name,
2268 rte_swx_extern_func_t func)
2270 struct extern_func *f;
2271 struct struct_type *mailbox_struct_type;
2275 CHECK_NAME(name, EINVAL);
2276 CHECK(!extern_func_find(p, name), EEXIST);
2278 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2279 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2280 CHECK(mailbox_struct_type, EINVAL);
2282 CHECK(func, EINVAL);
2284 /* Node allocation. */
2285 f = calloc(1, sizeof(struct extern_func));
2286 CHECK(func, ENOMEM);
2288 /* Node initialization. */
2289 strcpy(f->name, name);
2290 f->mailbox_struct_type = mailbox_struct_type;
2292 f->struct_id = p->n_structs;
2293 f->id = p->n_extern_funcs;
2295 /* Node add to tailq. */
2296 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
2297 p->n_extern_funcs++;
2304 extern_func_build(struct rte_swx_pipeline *p)
2308 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2309 struct thread *t = &p->threads[i];
2310 struct extern_func *func;
2312 /* Memory allocation. */
2313 t->extern_funcs = calloc(p->n_extern_funcs,
2314 sizeof(struct extern_func_runtime));
2315 CHECK(t->extern_funcs, ENOMEM);
2317 /* Extern function. */
2318 TAILQ_FOREACH(func, &p->extern_funcs, node) {
2319 struct extern_func_runtime *r =
2320 &t->extern_funcs[func->id];
2321 uint32_t mailbox_size =
2322 func->mailbox_struct_type->n_bits / 8;
2324 r->func = func->func;
2326 r->mailbox = calloc(1, mailbox_size);
2327 CHECK(r->mailbox, ENOMEM);
2329 t->structs[func->struct_id] = r->mailbox;
2337 extern_func_build_free(struct rte_swx_pipeline *p)
2341 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2342 struct thread *t = &p->threads[i];
2345 if (!t->extern_funcs)
2348 for (j = 0; j < p->n_extern_funcs; j++) {
2349 struct extern_func_runtime *r = &t->extern_funcs[j];
2354 free(t->extern_funcs);
2355 t->extern_funcs = NULL;
2360 extern_func_free(struct rte_swx_pipeline *p)
2362 extern_func_build_free(p);
2365 struct extern_func *elem;
2367 elem = TAILQ_FIRST(&p->extern_funcs);
2371 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2379 static struct header *
2380 header_find(struct rte_swx_pipeline *p, const char *name)
2382 struct header *elem;
2384 TAILQ_FOREACH(elem, &p->headers, node)
2385 if (strcmp(elem->name, name) == 0)
2391 static struct header *
2392 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id)
2394 struct header *elem;
2396 TAILQ_FOREACH(elem, &p->headers, node)
2397 if (elem->struct_id == struct_id)
2403 static struct header *
2404 header_parse(struct rte_swx_pipeline *p,
2407 if (name[0] != 'h' || name[1] != '.')
2410 return header_find(p, &name[2]);
2413 static struct field *
2414 header_field_parse(struct rte_swx_pipeline *p,
2416 struct header **header)
2420 char *header_name, *field_name;
2422 if ((name[0] != 'h') || (name[1] != '.'))
2425 header_name = strdup(&name[2]);
2429 field_name = strchr(header_name, '.');
2438 h = header_find(p, header_name);
2444 f = struct_type_field_find(h->st, field_name);
2458 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2460 const char *struct_type_name)
2462 struct struct_type *st;
2464 size_t n_headers_max;
2467 CHECK_NAME(name, EINVAL);
2468 CHECK_NAME(struct_type_name, EINVAL);
2470 CHECK(!header_find(p, name), EEXIST);
2472 st = struct_type_find(p, struct_type_name);
2475 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2476 CHECK(p->n_headers < n_headers_max, ENOSPC);
2478 /* Node allocation. */
2479 h = calloc(1, sizeof(struct header));
2482 /* Node initialization. */
2483 strcpy(h->name, name);
2485 h->struct_id = p->n_structs;
2486 h->id = p->n_headers;
2488 /* Node add to tailq. */
2489 TAILQ_INSERT_TAIL(&p->headers, h, node);
2497 header_build(struct rte_swx_pipeline *p)
2500 uint32_t n_bytes = 0, i;
2502 TAILQ_FOREACH(h, &p->headers, node) {
2503 n_bytes += h->st->n_bits / 8;
2506 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2507 struct thread *t = &p->threads[i];
2508 uint32_t offset = 0;
2510 t->headers = calloc(p->n_headers,
2511 sizeof(struct header_runtime));
2512 CHECK(t->headers, ENOMEM);
2514 t->headers_out = calloc(p->n_headers,
2515 sizeof(struct header_out_runtime));
2516 CHECK(t->headers_out, ENOMEM);
2518 t->header_storage = calloc(1, n_bytes);
2519 CHECK(t->header_storage, ENOMEM);
2521 t->header_out_storage = calloc(1, n_bytes);
2522 CHECK(t->header_out_storage, ENOMEM);
2524 TAILQ_FOREACH(h, &p->headers, node) {
2525 uint8_t *header_storage;
2526 uint32_t n_bytes = h->st->n_bits / 8;
2528 header_storage = &t->header_storage[offset];
2531 t->headers[h->id].ptr0 = header_storage;
2532 t->headers[h->id].n_bytes = n_bytes;
2534 t->structs[h->struct_id] = header_storage;
2542 header_build_free(struct rte_swx_pipeline *p)
2546 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2547 struct thread *t = &p->threads[i];
2549 free(t->headers_out);
2550 t->headers_out = NULL;
2555 free(t->header_out_storage);
2556 t->header_out_storage = NULL;
2558 free(t->header_storage);
2559 t->header_storage = NULL;
2564 header_free(struct rte_swx_pipeline *p)
2566 header_build_free(p);
2569 struct header *elem;
2571 elem = TAILQ_FIRST(&p->headers);
2575 TAILQ_REMOVE(&p->headers, elem, node);
2583 static struct field *
2584 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2586 if (!p->metadata_st)
2589 if (name[0] != 'm' || name[1] != '.')
2592 return struct_type_field_find(p->metadata_st, &name[2]);
2596 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2597 const char *struct_type_name)
2599 struct struct_type *st = NULL;
2603 CHECK_NAME(struct_type_name, EINVAL);
2604 st = struct_type_find(p, struct_type_name);
2606 CHECK(!p->metadata_st, EINVAL);
2608 p->metadata_st = st;
2609 p->metadata_struct_id = p->n_structs;
2617 metadata_build(struct rte_swx_pipeline *p)
2619 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2622 /* Thread-level initialization. */
2623 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2624 struct thread *t = &p->threads[i];
2627 metadata = calloc(1, n_bytes);
2628 CHECK(metadata, ENOMEM);
2630 t->metadata = metadata;
2631 t->structs[p->metadata_struct_id] = metadata;
2638 metadata_build_free(struct rte_swx_pipeline *p)
2642 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2643 struct thread *t = &p->threads[i];
2651 metadata_free(struct rte_swx_pipeline *p)
2653 metadata_build_free(p);
2660 instruction_is_tx(enum instruction_type type)
2673 instruction_is_jmp(struct instruction *instr)
2675 switch (instr->type) {
2677 case INSTR_JMP_VALID:
2678 case INSTR_JMP_INVALID:
2680 case INSTR_JMP_MISS:
2681 case INSTR_JMP_ACTION_HIT:
2682 case INSTR_JMP_ACTION_MISS:
2684 case INSTR_JMP_EQ_MH:
2685 case INSTR_JMP_EQ_HM:
2686 case INSTR_JMP_EQ_HH:
2687 case INSTR_JMP_EQ_I:
2689 case INSTR_JMP_NEQ_MH:
2690 case INSTR_JMP_NEQ_HM:
2691 case INSTR_JMP_NEQ_HH:
2692 case INSTR_JMP_NEQ_I:
2694 case INSTR_JMP_LT_MH:
2695 case INSTR_JMP_LT_HM:
2696 case INSTR_JMP_LT_HH:
2697 case INSTR_JMP_LT_MI:
2698 case INSTR_JMP_LT_HI:
2700 case INSTR_JMP_GT_MH:
2701 case INSTR_JMP_GT_HM:
2702 case INSTR_JMP_GT_HH:
2703 case INSTR_JMP_GT_MI:
2704 case INSTR_JMP_GT_HI:
2712 static struct field *
2713 action_field_parse(struct action *action, const char *name);
2715 static struct field *
2716 struct_field_parse(struct rte_swx_pipeline *p,
2717 struct action *action,
2719 uint32_t *struct_id)
2726 struct header *header;
2728 f = header_field_parse(p, name, &header);
2732 *struct_id = header->struct_id;
2738 f = metadata_field_parse(p, name);
2742 *struct_id = p->metadata_struct_id;
2751 f = action_field_parse(action, name);
2761 struct extern_obj *obj;
2763 f = extern_obj_mailbox_field_parse(p, name, &obj);
2767 *struct_id = obj->struct_id;
2773 struct extern_func *func;
2775 f = extern_func_mailbox_field_parse(p, name, &func);
2779 *struct_id = func->struct_id;
2789 pipeline_port_inc(struct rte_swx_pipeline *p)
2791 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2795 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2797 t->ip = p->instructions;
2801 thread_ip_set(struct thread *t, struct instruction *ip)
2807 thread_ip_action_call(struct rte_swx_pipeline *p,
2812 t->ip = p->action_instructions[action_id];
2816 thread_ip_inc(struct rte_swx_pipeline *p);
2819 thread_ip_inc(struct rte_swx_pipeline *p)
2821 struct thread *t = &p->threads[p->thread_id];
2827 thread_ip_inc_cond(struct thread *t, int cond)
2833 thread_yield(struct rte_swx_pipeline *p)
2835 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2839 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2841 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2848 instr_rx_translate(struct rte_swx_pipeline *p,
2849 struct action *action,
2852 struct instruction *instr,
2853 struct instruction_data *data __rte_unused)
2857 CHECK(!action, EINVAL);
2858 CHECK(n_tokens == 2, EINVAL);
2860 f = metadata_field_parse(p, tokens[1]);
2863 instr->type = INSTR_RX;
2864 instr->io.io.offset = f->offset / 8;
2865 instr->io.io.n_bits = f->n_bits;
2870 instr_rx_exec(struct rte_swx_pipeline *p);
2873 instr_rx_exec(struct rte_swx_pipeline *p)
2875 struct thread *t = &p->threads[p->thread_id];
2876 struct instruction *ip = t->ip;
2877 struct port_in_runtime *port = &p->in[p->port_id];
2878 struct rte_swx_pkt *pkt = &t->pkt;
2882 pkt_received = port->pkt_rx(port->obj, pkt);
2883 t->ptr = &pkt->pkt[pkt->offset];
2884 rte_prefetch0(t->ptr);
2886 TRACE("[Thread %2u] rx %s from port %u\n",
2888 pkt_received ? "1 pkt" : "0 pkts",
2892 t->valid_headers = 0;
2893 t->n_headers_out = 0;
2896 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2899 t->table_state = p->table_state;
2902 pipeline_port_inc(p);
2903 thread_ip_inc_cond(t, pkt_received);
2911 instr_tx_translate(struct rte_swx_pipeline *p,
2912 struct action *action __rte_unused,
2915 struct instruction *instr,
2916 struct instruction_data *data __rte_unused)
2918 char *port = tokens[1];
2922 CHECK(n_tokens == 2, EINVAL);
2924 f = metadata_field_parse(p, port);
2926 instr->type = INSTR_TX;
2927 instr->io.io.offset = f->offset / 8;
2928 instr->io.io.n_bits = f->n_bits;
2933 port_val = strtoul(port, &port, 0);
2934 CHECK(!port[0], EINVAL);
2936 instr->type = INSTR_TX_I;
2937 instr->io.io.val = port_val;
2942 instr_drop_translate(struct rte_swx_pipeline *p,
2943 struct action *action __rte_unused,
2944 char **tokens __rte_unused,
2946 struct instruction *instr,
2947 struct instruction_data *data __rte_unused)
2949 CHECK(n_tokens == 1, EINVAL);
2952 instr->type = INSTR_TX_I;
2953 instr->io.io.val = p->n_ports_out - 1;
2958 emit_handler(struct thread *t)
2960 struct header_out_runtime *h0 = &t->headers_out[0];
2961 struct header_out_runtime *h1 = &t->headers_out[1];
2962 uint32_t offset = 0, i;
2964 /* No header change or header decapsulation. */
2965 if ((t->n_headers_out == 1) &&
2966 (h0->ptr + h0->n_bytes == t->ptr)) {
2967 TRACE("Emit handler: no header change or header decap.\n");
2969 t->pkt.offset -= h0->n_bytes;
2970 t->pkt.length += h0->n_bytes;
2975 /* Header encapsulation (optionally, with prior header decasulation). */
2976 if ((t->n_headers_out == 2) &&
2977 (h1->ptr + h1->n_bytes == t->ptr) &&
2978 (h0->ptr == h0->ptr0)) {
2981 TRACE("Emit handler: header encapsulation.\n");
2983 offset = h0->n_bytes + h1->n_bytes;
2984 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2985 t->pkt.offset -= offset;
2986 t->pkt.length += offset;
2991 /* Header insertion. */
2994 /* Header extraction. */
2997 /* For any other case. */
2998 TRACE("Emit handler: complex case.\n");
3000 for (i = 0; i < t->n_headers_out; i++) {
3001 struct header_out_runtime *h = &t->headers_out[i];
3003 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
3004 offset += h->n_bytes;
3008 memcpy(t->ptr - offset, t->header_out_storage, offset);
3009 t->pkt.offset -= offset;
3010 t->pkt.length += offset;
3015 instr_tx_exec(struct rte_swx_pipeline *p);
3018 instr_tx_exec(struct rte_swx_pipeline *p)
3020 struct thread *t = &p->threads[p->thread_id];
3021 struct instruction *ip = t->ip;
3022 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
3023 struct port_out_runtime *port = &p->out[port_id];
3024 struct rte_swx_pkt *pkt = &t->pkt;
3026 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
3034 port->pkt_tx(port->obj, pkt);
3037 thread_ip_reset(p, t);
3042 instr_tx_i_exec(struct rte_swx_pipeline *p)
3044 struct thread *t = &p->threads[p->thread_id];
3045 struct instruction *ip = t->ip;
3046 uint64_t port_id = ip->io.io.val;
3047 struct port_out_runtime *port = &p->out[port_id];
3048 struct rte_swx_pkt *pkt = &t->pkt;
3050 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
3058 port->pkt_tx(port->obj, pkt);
3061 thread_ip_reset(p, t);
3069 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
3070 struct action *action,
3073 struct instruction *instr,
3074 struct instruction_data *data __rte_unused)
3078 CHECK(!action, EINVAL);
3079 CHECK(n_tokens == 2, EINVAL);
3081 h = header_parse(p, tokens[1]);
3084 instr->type = INSTR_HDR_EXTRACT;
3085 instr->io.hdr.header_id[0] = h->id;
3086 instr->io.hdr.struct_id[0] = h->struct_id;
3087 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3092 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
3095 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
3097 struct thread *t = &p->threads[p->thread_id];
3098 struct instruction *ip = t->ip;
3099 uint64_t valid_headers = t->valid_headers;
3100 uint8_t *ptr = t->ptr;
3101 uint32_t offset = t->pkt.offset;
3102 uint32_t length = t->pkt.length;
3105 for (i = 0; i < n_extract; i++) {
3106 uint32_t header_id = ip->io.hdr.header_id[i];
3107 uint32_t struct_id = ip->io.hdr.struct_id[i];
3108 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3110 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
3116 t->structs[struct_id] = ptr;
3117 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3126 t->valid_headers = valid_headers;
3129 t->pkt.offset = offset;
3130 t->pkt.length = length;
3135 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
3137 __instr_hdr_extract_exec(p, 1);
3144 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
3146 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3149 __instr_hdr_extract_exec(p, 2);
3156 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
3158 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3161 __instr_hdr_extract_exec(p, 3);
3168 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
3170 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3173 __instr_hdr_extract_exec(p, 4);
3180 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
3182 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3185 __instr_hdr_extract_exec(p, 5);
3192 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
3194 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3197 __instr_hdr_extract_exec(p, 6);
3204 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
3206 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3209 __instr_hdr_extract_exec(p, 7);
3216 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
3218 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3221 __instr_hdr_extract_exec(p, 8);
3231 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
3232 struct action *action __rte_unused,
3235 struct instruction *instr,
3236 struct instruction_data *data __rte_unused)
3240 CHECK(n_tokens == 2, EINVAL);
3242 h = header_parse(p, tokens[1]);
3245 instr->type = INSTR_HDR_EMIT;
3246 instr->io.hdr.header_id[0] = h->id;
3247 instr->io.hdr.struct_id[0] = h->struct_id;
3248 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3253 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
3256 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
3258 struct thread *t = &p->threads[p->thread_id];
3259 struct instruction *ip = t->ip;
3260 uint64_t valid_headers = t->valid_headers;
3261 uint32_t n_headers_out = t->n_headers_out;
3262 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
3263 uint8_t *ho_ptr = NULL;
3264 uint32_t ho_nbytes = 0, first = 1, i;
3266 for (i = 0; i < n_emit; i++) {
3267 uint32_t header_id = ip->io.hdr.header_id[i];
3268 uint32_t struct_id = ip->io.hdr.struct_id[i];
3270 struct header_runtime *hi = &t->headers[header_id];
3271 uint8_t *hi_ptr0 = hi->ptr0;
3272 uint32_t n_bytes = hi->n_bytes;
3274 uint8_t *hi_ptr = t->structs[struct_id];
3276 if (!MASK64_BIT_GET(valid_headers, header_id))
3279 TRACE("[Thread %2u]: emit header %u\n",
3287 if (!t->n_headers_out) {
3288 ho = &t->headers_out[0];
3294 ho_nbytes = n_bytes;
3301 ho_nbytes = ho->n_bytes;
3305 if (ho_ptr + ho_nbytes == hi_ptr) {
3306 ho_nbytes += n_bytes;
3308 ho->n_bytes = ho_nbytes;
3315 ho_nbytes = n_bytes;
3321 ho->n_bytes = ho_nbytes;
3322 t->n_headers_out = n_headers_out;
3326 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
3328 __instr_hdr_emit_exec(p, 1);
3335 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
3337 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3340 __instr_hdr_emit_exec(p, 1);
3345 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
3347 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3350 __instr_hdr_emit_exec(p, 2);
3355 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
3357 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3360 __instr_hdr_emit_exec(p, 3);
3365 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
3367 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3370 __instr_hdr_emit_exec(p, 4);
3375 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
3377 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3380 __instr_hdr_emit_exec(p, 5);
3385 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
3387 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3390 __instr_hdr_emit_exec(p, 6);
3395 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
3397 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3400 __instr_hdr_emit_exec(p, 7);
3405 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
3407 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
3410 __instr_hdr_emit_exec(p, 8);
3418 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
3419 struct action *action __rte_unused,
3422 struct instruction *instr,
3423 struct instruction_data *data __rte_unused)
3427 CHECK(n_tokens == 2, EINVAL);
3429 h = header_parse(p, tokens[1]);
3432 instr->type = INSTR_HDR_VALIDATE;
3433 instr->valid.header_id = h->id;
3438 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
3440 struct thread *t = &p->threads[p->thread_id];
3441 struct instruction *ip = t->ip;
3442 uint32_t header_id = ip->valid.header_id;
3444 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
3447 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
3457 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
3458 struct action *action __rte_unused,
3461 struct instruction *instr,
3462 struct instruction_data *data __rte_unused)
3466 CHECK(n_tokens == 2, EINVAL);
3468 h = header_parse(p, tokens[1]);
3471 instr->type = INSTR_HDR_INVALIDATE;
3472 instr->valid.header_id = h->id;
3477 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3479 struct thread *t = &p->threads[p->thread_id];
3480 struct instruction *ip = t->ip;
3481 uint32_t header_id = ip->valid.header_id;
3483 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3486 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3495 static struct table *
3496 table_find(struct rte_swx_pipeline *p, const char *name);
3498 static struct selector *
3499 selector_find(struct rte_swx_pipeline *p, const char *name);
3502 instr_table_translate(struct rte_swx_pipeline *p,
3503 struct action *action,
3506 struct instruction *instr,
3507 struct instruction_data *data __rte_unused)
3512 CHECK(!action, EINVAL);
3513 CHECK(n_tokens == 2, EINVAL);
3515 t = table_find(p, tokens[1]);
3517 instr->type = INSTR_TABLE;
3518 instr->table.table_id = t->id;
3522 s = selector_find(p, tokens[1]);
3524 instr->type = INSTR_SELECTOR;
3525 instr->table.table_id = s->id;
3533 instr_table_exec(struct rte_swx_pipeline *p)
3535 struct thread *t = &p->threads[p->thread_id];
3536 struct instruction *ip = t->ip;
3537 uint32_t table_id = ip->table.table_id;
3538 struct rte_swx_table_state *ts = &t->table_state[table_id];
3539 struct table_runtime *table = &t->tables[table_id];
3540 struct table_statistics *stats = &p->table_stats[table_id];
3541 uint64_t action_id, n_pkts_hit, n_pkts_action;
3542 uint8_t *action_data;
3546 done = table->func(ts->obj,
3554 TRACE("[Thread %2u] table %u (not finalized)\n",
3562 action_id = hit ? action_id : ts->default_action_id;
3563 action_data = hit ? action_data : ts->default_action_data;
3564 n_pkts_hit = stats->n_pkts_hit[hit];
3565 n_pkts_action = stats->n_pkts_action[action_id];
3567 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3570 hit ? "hit" : "miss",
3571 (uint32_t)action_id);
3573 t->action_id = action_id;
3574 t->structs[0] = action_data;
3576 stats->n_pkts_hit[hit] = n_pkts_hit + 1;
3577 stats->n_pkts_action[action_id] = n_pkts_action + 1;
3580 thread_ip_action_call(p, t, action_id);
3584 instr_selector_exec(struct rte_swx_pipeline *p)
3586 struct thread *t = &p->threads[p->thread_id];
3587 struct instruction *ip = t->ip;
3588 uint32_t selector_id = ip->table.table_id;
3589 struct rte_swx_table_state *ts = &t->table_state[p->n_tables + selector_id];
3590 struct selector_runtime *selector = &t->selectors[selector_id];
3591 struct selector_statistics *stats = &p->selector_stats[selector_id];
3592 uint64_t n_pkts = stats->n_pkts;
3596 done = rte_swx_table_selector_select(ts->obj,
3598 selector->group_id_buffer,
3599 selector->selector_buffer,
3600 selector->member_id_buffer);
3603 TRACE("[Thread %2u] selector %u (not finalized)\n",
3612 TRACE("[Thread %2u] selector %u\n",
3616 stats->n_pkts = n_pkts + 1;
3626 instr_extern_translate(struct rte_swx_pipeline *p,
3627 struct action *action __rte_unused,
3630 struct instruction *instr,
3631 struct instruction_data *data __rte_unused)
3633 char *token = tokens[1];
3635 CHECK(n_tokens == 2, EINVAL);
3637 if (token[0] == 'e') {
3638 struct extern_obj *obj;
3639 struct extern_type_member_func *func;
3641 func = extern_obj_member_func_parse(p, token, &obj);
3642 CHECK(func, EINVAL);
3644 instr->type = INSTR_EXTERN_OBJ;
3645 instr->ext_obj.ext_obj_id = obj->id;
3646 instr->ext_obj.func_id = func->id;
3651 if (token[0] == 'f') {
3652 struct extern_func *func;
3654 func = extern_func_parse(p, token);
3655 CHECK(func, EINVAL);
3657 instr->type = INSTR_EXTERN_FUNC;
3658 instr->ext_func.ext_func_id = func->id;
3667 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3669 struct thread *t = &p->threads[p->thread_id];
3670 struct instruction *ip = t->ip;
3671 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3672 uint32_t func_id = ip->ext_obj.func_id;
3673 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3674 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3676 TRACE("[Thread %2u] extern obj %u member func %u\n",
3681 /* Extern object member function execute. */
3682 uint32_t done = func(obj->obj, obj->mailbox);
3685 thread_ip_inc_cond(t, done);
3686 thread_yield_cond(p, done ^ 1);
3690 instr_extern_func_exec(struct rte_swx_pipeline *p)
3692 struct thread *t = &p->threads[p->thread_id];
3693 struct instruction *ip = t->ip;
3694 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3695 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3696 rte_swx_extern_func_t func = ext_func->func;
3698 TRACE("[Thread %2u] extern func %u\n",
3702 /* Extern function execute. */
3703 uint32_t done = func(ext_func->mailbox);
3706 thread_ip_inc_cond(t, done);
3707 thread_yield_cond(p, done ^ 1);
3714 instr_mov_translate(struct rte_swx_pipeline *p,
3715 struct action *action,
3718 struct instruction *instr,
3719 struct instruction_data *data __rte_unused)
3721 char *dst = tokens[1], *src = tokens[2];
3722 struct field *fdst, *fsrc;
3724 uint32_t dst_struct_id = 0, src_struct_id = 0;
3726 CHECK(n_tokens == 3, EINVAL);
3728 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3729 CHECK(fdst, EINVAL);
3731 /* MOV, MOV_MH, MOV_HM or MOV_HH. */
3732 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3734 instr->type = INSTR_MOV;
3735 if (dst[0] != 'h' && src[0] == 'h')
3736 instr->type = INSTR_MOV_MH;
3737 if (dst[0] == 'h' && src[0] != 'h')
3738 instr->type = INSTR_MOV_HM;
3739 if (dst[0] == 'h' && src[0] == 'h')
3740 instr->type = INSTR_MOV_HH;
3742 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3743 instr->mov.dst.n_bits = fdst->n_bits;
3744 instr->mov.dst.offset = fdst->offset / 8;
3745 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3746 instr->mov.src.n_bits = fsrc->n_bits;
3747 instr->mov.src.offset = fsrc->offset / 8;
3752 src_val = strtoull(src, &src, 0);
3753 CHECK(!src[0], EINVAL);
3756 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3758 instr->type = INSTR_MOV_I;
3759 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3760 instr->mov.dst.n_bits = fdst->n_bits;
3761 instr->mov.dst.offset = fdst->offset / 8;
3762 instr->mov.src_val = src_val;
3767 instr_mov_exec(struct rte_swx_pipeline *p)
3769 struct thread *t = &p->threads[p->thread_id];
3770 struct instruction *ip = t->ip;
3772 TRACE("[Thread %2u] mov\n",
3782 instr_mov_mh_exec(struct rte_swx_pipeline *p)
3784 struct thread *t = &p->threads[p->thread_id];
3785 struct instruction *ip = t->ip;
3787 TRACE("[Thread %2u] mov (mh)\n",
3797 instr_mov_hm_exec(struct rte_swx_pipeline *p)
3799 struct thread *t = &p->threads[p->thread_id];
3800 struct instruction *ip = t->ip;
3802 TRACE("[Thread %2u] mov (hm)\n",
3812 instr_mov_hh_exec(struct rte_swx_pipeline *p)
3814 struct thread *t = &p->threads[p->thread_id];
3815 struct instruction *ip = t->ip;
3817 TRACE("[Thread %2u] mov (hh)\n",
3827 instr_mov_i_exec(struct rte_swx_pipeline *p)
3829 struct thread *t = &p->threads[p->thread_id];
3830 struct instruction *ip = t->ip;
3832 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3846 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3849 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3851 struct thread *t = &p->threads[p->thread_id];
3852 struct instruction *ip = t->ip;
3853 uint8_t *action_data = t->structs[0];
3854 uint64_t valid_headers = t->valid_headers;
3857 for (i = 0; i < n_dma; i++) {
3858 uint32_t header_id = ip->dma.dst.header_id[i];
3859 uint32_t struct_id = ip->dma.dst.struct_id[i];
3860 uint32_t offset = ip->dma.src.offset[i];
3861 uint32_t n_bytes = ip->dma.n_bytes[i];
3863 struct header_runtime *h = &t->headers[header_id];
3864 uint8_t *h_ptr0 = h->ptr0;
3865 uint8_t *h_ptr = t->structs[struct_id];
3867 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3869 void *src = &action_data[offset];
3871 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3874 memcpy(dst, src, n_bytes);
3875 t->structs[struct_id] = dst;
3876 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3879 t->valid_headers = valid_headers;
3883 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3885 __instr_dma_ht_exec(p, 1);
3892 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3894 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3897 __instr_dma_ht_exec(p, 2);
3904 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3906 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3909 __instr_dma_ht_exec(p, 3);
3916 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3918 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3921 __instr_dma_ht_exec(p, 4);
3928 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3930 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3933 __instr_dma_ht_exec(p, 5);
3940 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3942 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3945 __instr_dma_ht_exec(p, 6);
3952 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3954 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3957 __instr_dma_ht_exec(p, 7);
3964 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3966 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3969 __instr_dma_ht_exec(p, 8);
3979 instr_alu_add_translate(struct rte_swx_pipeline *p,
3980 struct action *action,
3983 struct instruction *instr,
3984 struct instruction_data *data __rte_unused)
3986 char *dst = tokens[1], *src = tokens[2];
3987 struct field *fdst, *fsrc;
3989 uint32_t dst_struct_id = 0, src_struct_id = 0;
3991 CHECK(n_tokens == 3, EINVAL);
3993 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3994 CHECK(fdst, EINVAL);
3996 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3997 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3999 instr->type = INSTR_ALU_ADD;
4000 if (dst[0] == 'h' && src[0] != 'h')
4001 instr->type = INSTR_ALU_ADD_HM;
4002 if (dst[0] != 'h' && src[0] == 'h')
4003 instr->type = INSTR_ALU_ADD_MH;
4004 if (dst[0] == 'h' && src[0] == 'h')
4005 instr->type = INSTR_ALU_ADD_HH;
4007 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4008 instr->alu.dst.n_bits = fdst->n_bits;
4009 instr->alu.dst.offset = fdst->offset / 8;
4010 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4011 instr->alu.src.n_bits = fsrc->n_bits;
4012 instr->alu.src.offset = fsrc->offset / 8;
4016 /* ADD_MI, ADD_HI. */
4017 src_val = strtoull(src, &src, 0);
4018 CHECK(!src[0], EINVAL);
4020 instr->type = INSTR_ALU_ADD_MI;
4022 instr->type = INSTR_ALU_ADD_HI;
4024 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4025 instr->alu.dst.n_bits = fdst->n_bits;
4026 instr->alu.dst.offset = fdst->offset / 8;
4027 instr->alu.src_val = src_val;
4032 instr_alu_sub_translate(struct rte_swx_pipeline *p,
4033 struct action *action,
4036 struct instruction *instr,
4037 struct instruction_data *data __rte_unused)
4039 char *dst = tokens[1], *src = tokens[2];
4040 struct field *fdst, *fsrc;
4042 uint32_t dst_struct_id = 0, src_struct_id = 0;
4044 CHECK(n_tokens == 3, EINVAL);
4046 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4047 CHECK(fdst, EINVAL);
4049 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
4050 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4052 instr->type = INSTR_ALU_SUB;
4053 if (dst[0] == 'h' && src[0] != 'h')
4054 instr->type = INSTR_ALU_SUB_HM;
4055 if (dst[0] != 'h' && src[0] == 'h')
4056 instr->type = INSTR_ALU_SUB_MH;
4057 if (dst[0] == 'h' && src[0] == 'h')
4058 instr->type = INSTR_ALU_SUB_HH;
4060 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4061 instr->alu.dst.n_bits = fdst->n_bits;
4062 instr->alu.dst.offset = fdst->offset / 8;
4063 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4064 instr->alu.src.n_bits = fsrc->n_bits;
4065 instr->alu.src.offset = fsrc->offset / 8;
4069 /* SUB_MI, SUB_HI. */
4070 src_val = strtoull(src, &src, 0);
4071 CHECK(!src[0], EINVAL);
4073 instr->type = INSTR_ALU_SUB_MI;
4075 instr->type = INSTR_ALU_SUB_HI;
4077 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4078 instr->alu.dst.n_bits = fdst->n_bits;
4079 instr->alu.dst.offset = fdst->offset / 8;
4080 instr->alu.src_val = src_val;
4085 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
4086 struct action *action __rte_unused,
4089 struct instruction *instr,
4090 struct instruction_data *data __rte_unused)
4092 char *dst = tokens[1], *src = tokens[2];
4093 struct header *hdst, *hsrc;
4094 struct field *fdst, *fsrc;
4096 CHECK(n_tokens == 3, EINVAL);
4098 fdst = header_field_parse(p, dst, &hdst);
4099 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4102 fsrc = header_field_parse(p, src, &hsrc);
4104 instr->type = INSTR_ALU_CKADD_FIELD;
4105 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4106 instr->alu.dst.n_bits = fdst->n_bits;
4107 instr->alu.dst.offset = fdst->offset / 8;
4108 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4109 instr->alu.src.n_bits = fsrc->n_bits;
4110 instr->alu.src.offset = fsrc->offset / 8;
4114 /* CKADD_STRUCT, CKADD_STRUCT20. */
4115 hsrc = header_parse(p, src);
4116 CHECK(hsrc, EINVAL);
4118 instr->type = INSTR_ALU_CKADD_STRUCT;
4119 if ((hsrc->st->n_bits / 8) == 20)
4120 instr->type = INSTR_ALU_CKADD_STRUCT20;
4122 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4123 instr->alu.dst.n_bits = fdst->n_bits;
4124 instr->alu.dst.offset = fdst->offset / 8;
4125 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4126 instr->alu.src.n_bits = hsrc->st->n_bits;
4127 instr->alu.src.offset = 0; /* Unused. */
4132 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
4133 struct action *action __rte_unused,
4136 struct instruction *instr,
4137 struct instruction_data *data __rte_unused)
4139 char *dst = tokens[1], *src = tokens[2];
4140 struct header *hdst, *hsrc;
4141 struct field *fdst, *fsrc;
4143 CHECK(n_tokens == 3, EINVAL);
4145 fdst = header_field_parse(p, dst, &hdst);
4146 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4148 fsrc = header_field_parse(p, src, &hsrc);
4149 CHECK(fsrc, EINVAL);
4151 instr->type = INSTR_ALU_CKSUB_FIELD;
4152 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4153 instr->alu.dst.n_bits = fdst->n_bits;
4154 instr->alu.dst.offset = fdst->offset / 8;
4155 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4156 instr->alu.src.n_bits = fsrc->n_bits;
4157 instr->alu.src.offset = fsrc->offset / 8;
4162 instr_alu_shl_translate(struct rte_swx_pipeline *p,
4163 struct action *action,
4166 struct instruction *instr,
4167 struct instruction_data *data __rte_unused)
4169 char *dst = tokens[1], *src = tokens[2];
4170 struct field *fdst, *fsrc;
4172 uint32_t dst_struct_id = 0, src_struct_id = 0;
4174 CHECK(n_tokens == 3, EINVAL);
4176 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4177 CHECK(fdst, EINVAL);
4179 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
4180 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4182 instr->type = INSTR_ALU_SHL;
4183 if (dst[0] == 'h' && src[0] != 'h')
4184 instr->type = INSTR_ALU_SHL_HM;
4185 if (dst[0] != 'h' && src[0] == 'h')
4186 instr->type = INSTR_ALU_SHL_MH;
4187 if (dst[0] == 'h' && src[0] == 'h')
4188 instr->type = INSTR_ALU_SHL_HH;
4190 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4191 instr->alu.dst.n_bits = fdst->n_bits;
4192 instr->alu.dst.offset = fdst->offset / 8;
4193 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4194 instr->alu.src.n_bits = fsrc->n_bits;
4195 instr->alu.src.offset = fsrc->offset / 8;
4199 /* SHL_MI, SHL_HI. */
4200 src_val = strtoull(src, &src, 0);
4201 CHECK(!src[0], EINVAL);
4203 instr->type = INSTR_ALU_SHL_MI;
4205 instr->type = INSTR_ALU_SHL_HI;
4207 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4208 instr->alu.dst.n_bits = fdst->n_bits;
4209 instr->alu.dst.offset = fdst->offset / 8;
4210 instr->alu.src_val = src_val;
4215 instr_alu_shr_translate(struct rte_swx_pipeline *p,
4216 struct action *action,
4219 struct instruction *instr,
4220 struct instruction_data *data __rte_unused)
4222 char *dst = tokens[1], *src = tokens[2];
4223 struct field *fdst, *fsrc;
4225 uint32_t dst_struct_id = 0, src_struct_id = 0;
4227 CHECK(n_tokens == 3, EINVAL);
4229 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4230 CHECK(fdst, EINVAL);
4232 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
4233 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4235 instr->type = INSTR_ALU_SHR;
4236 if (dst[0] == 'h' && src[0] != 'h')
4237 instr->type = INSTR_ALU_SHR_HM;
4238 if (dst[0] != 'h' && src[0] == 'h')
4239 instr->type = INSTR_ALU_SHR_MH;
4240 if (dst[0] == 'h' && src[0] == 'h')
4241 instr->type = INSTR_ALU_SHR_HH;
4243 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4244 instr->alu.dst.n_bits = fdst->n_bits;
4245 instr->alu.dst.offset = fdst->offset / 8;
4246 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4247 instr->alu.src.n_bits = fsrc->n_bits;
4248 instr->alu.src.offset = fsrc->offset / 8;
4252 /* SHR_MI, SHR_HI. */
4253 src_val = strtoull(src, &src, 0);
4254 CHECK(!src[0], EINVAL);
4256 instr->type = INSTR_ALU_SHR_MI;
4258 instr->type = INSTR_ALU_SHR_HI;
4260 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4261 instr->alu.dst.n_bits = fdst->n_bits;
4262 instr->alu.dst.offset = fdst->offset / 8;
4263 instr->alu.src_val = src_val;
4268 instr_alu_and_translate(struct rte_swx_pipeline *p,
4269 struct action *action,
4272 struct instruction *instr,
4273 struct instruction_data *data __rte_unused)
4275 char *dst = tokens[1], *src = tokens[2];
4276 struct field *fdst, *fsrc;
4278 uint32_t dst_struct_id = 0, src_struct_id = 0;
4280 CHECK(n_tokens == 3, EINVAL);
4282 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4283 CHECK(fdst, EINVAL);
4285 /* AND, AND_MH, AND_HM, AND_HH. */
4286 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4288 instr->type = INSTR_ALU_AND;
4289 if (dst[0] != 'h' && src[0] == 'h')
4290 instr->type = INSTR_ALU_AND_MH;
4291 if (dst[0] == 'h' && src[0] != 'h')
4292 instr->type = INSTR_ALU_AND_HM;
4293 if (dst[0] == 'h' && src[0] == 'h')
4294 instr->type = INSTR_ALU_AND_HH;
4296 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4297 instr->alu.dst.n_bits = fdst->n_bits;
4298 instr->alu.dst.offset = fdst->offset / 8;
4299 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4300 instr->alu.src.n_bits = fsrc->n_bits;
4301 instr->alu.src.offset = fsrc->offset / 8;
4306 src_val = strtoull(src, &src, 0);
4307 CHECK(!src[0], EINVAL);
4310 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4312 instr->type = INSTR_ALU_AND_I;
4313 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4314 instr->alu.dst.n_bits = fdst->n_bits;
4315 instr->alu.dst.offset = fdst->offset / 8;
4316 instr->alu.src_val = src_val;
4321 instr_alu_or_translate(struct rte_swx_pipeline *p,
4322 struct action *action,
4325 struct instruction *instr,
4326 struct instruction_data *data __rte_unused)
4328 char *dst = tokens[1], *src = tokens[2];
4329 struct field *fdst, *fsrc;
4331 uint32_t dst_struct_id = 0, src_struct_id = 0;
4333 CHECK(n_tokens == 3, EINVAL);
4335 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4336 CHECK(fdst, EINVAL);
4338 /* OR, OR_MH, OR_HM, OR_HH. */
4339 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4341 instr->type = INSTR_ALU_OR;
4342 if (dst[0] != 'h' && src[0] == 'h')
4343 instr->type = INSTR_ALU_OR_MH;
4344 if (dst[0] == 'h' && src[0] != 'h')
4345 instr->type = INSTR_ALU_OR_HM;
4346 if (dst[0] == 'h' && src[0] == 'h')
4347 instr->type = INSTR_ALU_OR_HH;
4349 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4350 instr->alu.dst.n_bits = fdst->n_bits;
4351 instr->alu.dst.offset = fdst->offset / 8;
4352 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4353 instr->alu.src.n_bits = fsrc->n_bits;
4354 instr->alu.src.offset = fsrc->offset / 8;
4359 src_val = strtoull(src, &src, 0);
4360 CHECK(!src[0], EINVAL);
4363 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4365 instr->type = INSTR_ALU_OR_I;
4366 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4367 instr->alu.dst.n_bits = fdst->n_bits;
4368 instr->alu.dst.offset = fdst->offset / 8;
4369 instr->alu.src_val = src_val;
4374 instr_alu_xor_translate(struct rte_swx_pipeline *p,
4375 struct action *action,
4378 struct instruction *instr,
4379 struct instruction_data *data __rte_unused)
4381 char *dst = tokens[1], *src = tokens[2];
4382 struct field *fdst, *fsrc;
4384 uint32_t dst_struct_id = 0, src_struct_id = 0;
4386 CHECK(n_tokens == 3, EINVAL);
4388 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4389 CHECK(fdst, EINVAL);
4391 /* XOR, XOR_MH, XOR_HM, XOR_HH. */
4392 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4394 instr->type = INSTR_ALU_XOR;
4395 if (dst[0] != 'h' && src[0] == 'h')
4396 instr->type = INSTR_ALU_XOR_MH;
4397 if (dst[0] == 'h' && src[0] != 'h')
4398 instr->type = INSTR_ALU_XOR_HM;
4399 if (dst[0] == 'h' && src[0] == 'h')
4400 instr->type = INSTR_ALU_XOR_HH;
4402 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4403 instr->alu.dst.n_bits = fdst->n_bits;
4404 instr->alu.dst.offset = fdst->offset / 8;
4405 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4406 instr->alu.src.n_bits = fsrc->n_bits;
4407 instr->alu.src.offset = fsrc->offset / 8;
4412 src_val = strtoull(src, &src, 0);
4413 CHECK(!src[0], EINVAL);
4416 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4418 instr->type = INSTR_ALU_XOR_I;
4419 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4420 instr->alu.dst.n_bits = fdst->n_bits;
4421 instr->alu.dst.offset = fdst->offset / 8;
4422 instr->alu.src_val = src_val;
4427 instr_alu_add_exec(struct rte_swx_pipeline *p)
4429 struct thread *t = &p->threads[p->thread_id];
4430 struct instruction *ip = t->ip;
4432 TRACE("[Thread %2u] add\n", p->thread_id);
4442 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
4444 struct thread *t = &p->threads[p->thread_id];
4445 struct instruction *ip = t->ip;
4447 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
4457 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
4459 struct thread *t = &p->threads[p->thread_id];
4460 struct instruction *ip = t->ip;
4462 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
4472 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
4474 struct thread *t = &p->threads[p->thread_id];
4475 struct instruction *ip = t->ip;
4477 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
4487 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
4489 struct thread *t = &p->threads[p->thread_id];
4490 struct instruction *ip = t->ip;
4492 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
4502 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
4504 struct thread *t = &p->threads[p->thread_id];
4505 struct instruction *ip = t->ip;
4507 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
4517 instr_alu_sub_exec(struct rte_swx_pipeline *p)
4519 struct thread *t = &p->threads[p->thread_id];
4520 struct instruction *ip = t->ip;
4522 TRACE("[Thread %2u] sub\n", p->thread_id);
4532 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4534 struct thread *t = &p->threads[p->thread_id];
4535 struct instruction *ip = t->ip;
4537 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4547 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4549 struct thread *t = &p->threads[p->thread_id];
4550 struct instruction *ip = t->ip;
4552 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4562 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4564 struct thread *t = &p->threads[p->thread_id];
4565 struct instruction *ip = t->ip;
4567 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4577 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4579 struct thread *t = &p->threads[p->thread_id];
4580 struct instruction *ip = t->ip;
4582 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4592 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4594 struct thread *t = &p->threads[p->thread_id];
4595 struct instruction *ip = t->ip;
4597 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4607 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4609 struct thread *t = &p->threads[p->thread_id];
4610 struct instruction *ip = t->ip;
4612 TRACE("[Thread %2u] shl\n", p->thread_id);
4622 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4624 struct thread *t = &p->threads[p->thread_id];
4625 struct instruction *ip = t->ip;
4627 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4637 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4639 struct thread *t = &p->threads[p->thread_id];
4640 struct instruction *ip = t->ip;
4642 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4652 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4654 struct thread *t = &p->threads[p->thread_id];
4655 struct instruction *ip = t->ip;
4657 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4667 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4669 struct thread *t = &p->threads[p->thread_id];
4670 struct instruction *ip = t->ip;
4672 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4682 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4684 struct thread *t = &p->threads[p->thread_id];
4685 struct instruction *ip = t->ip;
4687 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4697 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4699 struct thread *t = &p->threads[p->thread_id];
4700 struct instruction *ip = t->ip;
4702 TRACE("[Thread %2u] shr\n", p->thread_id);
4712 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4714 struct thread *t = &p->threads[p->thread_id];
4715 struct instruction *ip = t->ip;
4717 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4727 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4729 struct thread *t = &p->threads[p->thread_id];
4730 struct instruction *ip = t->ip;
4732 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4742 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4744 struct thread *t = &p->threads[p->thread_id];
4745 struct instruction *ip = t->ip;
4747 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4757 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4759 struct thread *t = &p->threads[p->thread_id];
4760 struct instruction *ip = t->ip;
4762 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4772 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4774 struct thread *t = &p->threads[p->thread_id];
4775 struct instruction *ip = t->ip;
4777 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4787 instr_alu_and_exec(struct rte_swx_pipeline *p)
4789 struct thread *t = &p->threads[p->thread_id];
4790 struct instruction *ip = t->ip;
4792 TRACE("[Thread %2u] and\n", p->thread_id);
4802 instr_alu_and_mh_exec(struct rte_swx_pipeline *p)
4804 struct thread *t = &p->threads[p->thread_id];
4805 struct instruction *ip = t->ip;
4807 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
4817 instr_alu_and_hm_exec(struct rte_swx_pipeline *p)
4819 struct thread *t = &p->threads[p->thread_id];
4820 struct instruction *ip = t->ip;
4822 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
4825 ALU_HM_FAST(t, ip, &);
4832 instr_alu_and_hh_exec(struct rte_swx_pipeline *p)
4834 struct thread *t = &p->threads[p->thread_id];
4835 struct instruction *ip = t->ip;
4837 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
4840 ALU_HH_FAST(t, ip, &);
4847 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4849 struct thread *t = &p->threads[p->thread_id];
4850 struct instruction *ip = t->ip;
4852 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4862 instr_alu_or_exec(struct rte_swx_pipeline *p)
4864 struct thread *t = &p->threads[p->thread_id];
4865 struct instruction *ip = t->ip;
4867 TRACE("[Thread %2u] or\n", p->thread_id);
4877 instr_alu_or_mh_exec(struct rte_swx_pipeline *p)
4879 struct thread *t = &p->threads[p->thread_id];
4880 struct instruction *ip = t->ip;
4882 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
4892 instr_alu_or_hm_exec(struct rte_swx_pipeline *p)
4894 struct thread *t = &p->threads[p->thread_id];
4895 struct instruction *ip = t->ip;
4897 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
4900 ALU_HM_FAST(t, ip, |);
4907 instr_alu_or_hh_exec(struct rte_swx_pipeline *p)
4909 struct thread *t = &p->threads[p->thread_id];
4910 struct instruction *ip = t->ip;
4912 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
4915 ALU_HH_FAST(t, ip, |);
4922 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4924 struct thread *t = &p->threads[p->thread_id];
4925 struct instruction *ip = t->ip;
4927 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4937 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4939 struct thread *t = &p->threads[p->thread_id];
4940 struct instruction *ip = t->ip;
4942 TRACE("[Thread %2u] xor\n", p->thread_id);
4952 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p)
4954 struct thread *t = &p->threads[p->thread_id];
4955 struct instruction *ip = t->ip;
4957 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
4967 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p)
4969 struct thread *t = &p->threads[p->thread_id];
4970 struct instruction *ip = t->ip;
4972 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
4975 ALU_HM_FAST(t, ip, ^);
4982 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p)
4984 struct thread *t = &p->threads[p->thread_id];
4985 struct instruction *ip = t->ip;
4987 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
4990 ALU_HH_FAST(t, ip, ^);
4997 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4999 struct thread *t = &p->threads[p->thread_id];
5000 struct instruction *ip = t->ip;
5002 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
5012 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
5014 struct thread *t = &p->threads[p->thread_id];
5015 struct instruction *ip = t->ip;
5016 uint8_t *dst_struct, *src_struct;
5017 uint16_t *dst16_ptr, dst;
5018 uint64_t *src64_ptr, src64, src64_mask, src;
5021 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
5024 dst_struct = t->structs[ip->alu.dst.struct_id];
5025 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5028 src_struct = t->structs[ip->alu.src.struct_id];
5029 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5031 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5032 src = src64 & src64_mask;
5037 /* The first input (r) is a 16-bit number. The second and the third
5038 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
5039 * three numbers (output r) is a 34-bit number.
5041 r += (src >> 32) + (src & 0xFFFFFFFF);
5043 /* The first input is a 16-bit number. The second input is an 18-bit
5044 * number. In the worst case scenario, the sum of the two numbers is a
5047 r = (r & 0xFFFF) + (r >> 16);
5049 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5050 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
5052 r = (r & 0xFFFF) + (r >> 16);
5054 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5055 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5056 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
5057 * therefore the output r is always a 16-bit number.
5059 r = (r & 0xFFFF) + (r >> 16);
5064 *dst16_ptr = (uint16_t)r;
5071 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
5073 struct thread *t = &p->threads[p->thread_id];
5074 struct instruction *ip = t->ip;
5075 uint8_t *dst_struct, *src_struct;
5076 uint16_t *dst16_ptr, dst;
5077 uint64_t *src64_ptr, src64, src64_mask, src;
5080 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
5083 dst_struct = t->structs[ip->alu.dst.struct_id];
5084 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5087 src_struct = t->structs[ip->alu.src.struct_id];
5088 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5090 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5091 src = src64 & src64_mask;
5096 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
5097 * the following sequence of operations in 2's complement arithmetic:
5098 * a '- b = (a - b) % 0xFFFF.
5100 * In order to prevent an underflow for the below subtraction, in which
5101 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
5102 * minuend), we first add a multiple of the 0xFFFF modulus to the
5103 * minuend. The number we add to the minuend needs to be a 34-bit number
5104 * or higher, so for readability reasons we picked the 36-bit multiple.
5105 * We are effectively turning the 16-bit minuend into a 36-bit number:
5106 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
5108 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
5110 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
5111 * result (the output r) is a 36-bit number.
5113 r -= (src >> 32) + (src & 0xFFFFFFFF);
5115 /* The first input is a 16-bit number. The second input is a 20-bit
5116 * number. Their sum is a 21-bit number.
5118 r = (r & 0xFFFF) + (r >> 16);
5120 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5121 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
5123 r = (r & 0xFFFF) + (r >> 16);
5125 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5126 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5127 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5128 * generated, therefore the output r is always a 16-bit number.
5130 r = (r & 0xFFFF) + (r >> 16);
5135 *dst16_ptr = (uint16_t)r;
5142 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
5144 struct thread *t = &p->threads[p->thread_id];
5145 struct instruction *ip = t->ip;
5146 uint8_t *dst_struct, *src_struct;
5147 uint16_t *dst16_ptr;
5148 uint32_t *src32_ptr;
5151 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
5154 dst_struct = t->structs[ip->alu.dst.struct_id];
5155 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5157 src_struct = t->structs[ip->alu.src.struct_id];
5158 src32_ptr = (uint32_t *)&src_struct[0];
5160 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
5161 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
5162 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
5163 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
5164 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
5166 /* The first input is a 16-bit number. The second input is a 19-bit
5167 * number. Their sum is a 20-bit number.
5169 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5171 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5172 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
5174 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5176 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5177 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5178 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
5179 * generated, therefore the output r is always a 16-bit number.
5181 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5184 r0 = r0 ? r0 : 0xFFFF;
5186 *dst16_ptr = (uint16_t)r0;
5193 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
5195 struct thread *t = &p->threads[p->thread_id];
5196 struct instruction *ip = t->ip;
5197 uint8_t *dst_struct, *src_struct;
5198 uint16_t *dst16_ptr;
5199 uint32_t *src32_ptr;
5203 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
5206 dst_struct = t->structs[ip->alu.dst.struct_id];
5207 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5209 src_struct = t->structs[ip->alu.src.struct_id];
5210 src32_ptr = (uint32_t *)&src_struct[0];
5212 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
5213 * Therefore, in the worst case scenario, a 35-bit number is added to a
5214 * 16-bit number (the input r), so the output r is 36-bit number.
5216 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
5219 /* The first input is a 16-bit number. The second input is a 20-bit
5220 * number. Their sum is a 21-bit number.
5222 r = (r & 0xFFFF) + (r >> 16);
5224 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5225 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
5227 r = (r & 0xFFFF) + (r >> 16);
5229 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5230 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5231 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5232 * generated, therefore the output r is always a 16-bit number.
5234 r = (r & 0xFFFF) + (r >> 16);
5239 *dst16_ptr = (uint16_t)r;
5248 static struct regarray *
5249 regarray_find(struct rte_swx_pipeline *p, const char *name);
5252 instr_regprefetch_translate(struct rte_swx_pipeline *p,
5253 struct action *action,
5256 struct instruction *instr,
5257 struct instruction_data *data __rte_unused)
5259 char *regarray = tokens[1], *idx = tokens[2];
5262 uint32_t idx_struct_id, idx_val;
5264 CHECK(n_tokens == 3, EINVAL);
5266 r = regarray_find(p, regarray);
5269 /* REGPREFETCH_RH, REGPREFETCH_RM. */
5270 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5272 instr->type = INSTR_REGPREFETCH_RM;
5274 instr->type = INSTR_REGPREFETCH_RH;
5276 instr->regarray.regarray_id = r->id;
5277 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5278 instr->regarray.idx.n_bits = fidx->n_bits;
5279 instr->regarray.idx.offset = fidx->offset / 8;
5280 instr->regarray.dstsrc_val = 0; /* Unused. */
5284 /* REGPREFETCH_RI. */
5285 idx_val = strtoul(idx, &idx, 0);
5286 CHECK(!idx[0], EINVAL);
5288 instr->type = INSTR_REGPREFETCH_RI;
5289 instr->regarray.regarray_id = r->id;
5290 instr->regarray.idx_val = idx_val;
5291 instr->regarray.dstsrc_val = 0; /* Unused. */
5296 instr_regrd_translate(struct rte_swx_pipeline *p,
5297 struct action *action,
5300 struct instruction *instr,
5301 struct instruction_data *data __rte_unused)
5303 char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
5305 struct field *fdst, *fidx;
5306 uint32_t dst_struct_id, idx_struct_id, idx_val;
5308 CHECK(n_tokens == 4, EINVAL);
5310 r = regarray_find(p, regarray);
5313 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
5314 CHECK(fdst, EINVAL);
5316 /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
5317 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5319 instr->type = INSTR_REGRD_MRM;
5320 if (dst[0] == 'h' && idx[0] != 'h')
5321 instr->type = INSTR_REGRD_HRM;
5322 if (dst[0] != 'h' && idx[0] == 'h')
5323 instr->type = INSTR_REGRD_MRH;
5324 if (dst[0] == 'h' && idx[0] == 'h')
5325 instr->type = INSTR_REGRD_HRH;
5327 instr->regarray.regarray_id = r->id;
5328 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5329 instr->regarray.idx.n_bits = fidx->n_bits;
5330 instr->regarray.idx.offset = fidx->offset / 8;
5331 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5332 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5333 instr->regarray.dstsrc.offset = fdst->offset / 8;
5337 /* REGRD_MRI, REGRD_HRI. */
5338 idx_val = strtoul(idx, &idx, 0);
5339 CHECK(!idx[0], EINVAL);
5341 instr->type = INSTR_REGRD_MRI;
5343 instr->type = INSTR_REGRD_HRI;
5345 instr->regarray.regarray_id = r->id;
5346 instr->regarray.idx_val = idx_val;
5347 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5348 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5349 instr->regarray.dstsrc.offset = fdst->offset / 8;
5354 instr_regwr_translate(struct rte_swx_pipeline *p,
5355 struct action *action,
5358 struct instruction *instr,
5359 struct instruction_data *data __rte_unused)
5361 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5363 struct field *fidx, *fsrc;
5365 uint32_t idx_struct_id, idx_val, src_struct_id;
5367 CHECK(n_tokens == 4, EINVAL);
5369 r = regarray_find(p, regarray);
5372 /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
5373 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5374 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5376 instr->type = INSTR_REGWR_RMM;
5377 if (idx[0] == 'h' && src[0] != 'h')
5378 instr->type = INSTR_REGWR_RHM;
5379 if (idx[0] != 'h' && src[0] == 'h')
5380 instr->type = INSTR_REGWR_RMH;
5381 if (idx[0] == 'h' && src[0] == 'h')
5382 instr->type = INSTR_REGWR_RHH;
5384 instr->regarray.regarray_id = r->id;
5385 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5386 instr->regarray.idx.n_bits = fidx->n_bits;
5387 instr->regarray.idx.offset = fidx->offset / 8;
5388 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5389 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5390 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5394 /* REGWR_RHI, REGWR_RMI. */
5395 if (fidx && !fsrc) {
5396 src_val = strtoull(src, &src, 0);
5397 CHECK(!src[0], EINVAL);
5399 instr->type = INSTR_REGWR_RMI;
5401 instr->type = INSTR_REGWR_RHI;
5403 instr->regarray.regarray_id = r->id;
5404 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5405 instr->regarray.idx.n_bits = fidx->n_bits;
5406 instr->regarray.idx.offset = fidx->offset / 8;
5407 instr->regarray.dstsrc_val = src_val;
5411 /* REGWR_RIH, REGWR_RIM. */
5412 if (!fidx && fsrc) {
5413 idx_val = strtoul(idx, &idx, 0);
5414 CHECK(!idx[0], EINVAL);
5416 instr->type = INSTR_REGWR_RIM;
5418 instr->type = INSTR_REGWR_RIH;
5420 instr->regarray.regarray_id = r->id;
5421 instr->regarray.idx_val = idx_val;
5422 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5423 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5424 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5429 src_val = strtoull(src, &src, 0);
5430 CHECK(!src[0], EINVAL);
5432 idx_val = strtoul(idx, &idx, 0);
5433 CHECK(!idx[0], EINVAL);
5435 instr->type = INSTR_REGWR_RII;
5436 instr->regarray.idx_val = idx_val;
5437 instr->regarray.dstsrc_val = src_val;
5443 instr_regadd_translate(struct rte_swx_pipeline *p,
5444 struct action *action,
5447 struct instruction *instr,
5448 struct instruction_data *data __rte_unused)
5450 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5452 struct field *fidx, *fsrc;
5454 uint32_t idx_struct_id, idx_val, src_struct_id;
5456 CHECK(n_tokens == 4, EINVAL);
5458 r = regarray_find(p, regarray);
5461 /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
5462 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5463 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5465 instr->type = INSTR_REGADD_RMM;
5466 if (idx[0] == 'h' && src[0] != 'h')
5467 instr->type = INSTR_REGADD_RHM;
5468 if (idx[0] != 'h' && src[0] == 'h')
5469 instr->type = INSTR_REGADD_RMH;
5470 if (idx[0] == 'h' && src[0] == 'h')
5471 instr->type = INSTR_REGADD_RHH;
5473 instr->regarray.regarray_id = r->id;
5474 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5475 instr->regarray.idx.n_bits = fidx->n_bits;
5476 instr->regarray.idx.offset = fidx->offset / 8;
5477 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5478 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5479 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5483 /* REGADD_RHI, REGADD_RMI. */
5484 if (fidx && !fsrc) {
5485 src_val = strtoull(src, &src, 0);
5486 CHECK(!src[0], EINVAL);
5488 instr->type = INSTR_REGADD_RMI;
5490 instr->type = INSTR_REGADD_RHI;
5492 instr->regarray.regarray_id = r->id;
5493 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5494 instr->regarray.idx.n_bits = fidx->n_bits;
5495 instr->regarray.idx.offset = fidx->offset / 8;
5496 instr->regarray.dstsrc_val = src_val;
5500 /* REGADD_RIH, REGADD_RIM. */
5501 if (!fidx && fsrc) {
5502 idx_val = strtoul(idx, &idx, 0);
5503 CHECK(!idx[0], EINVAL);
5505 instr->type = INSTR_REGADD_RIM;
5507 instr->type = INSTR_REGADD_RIH;
5509 instr->regarray.regarray_id = r->id;
5510 instr->regarray.idx_val = idx_val;
5511 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5512 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5513 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5518 src_val = strtoull(src, &src, 0);
5519 CHECK(!src[0], EINVAL);
5521 idx_val = strtoul(idx, &idx, 0);
5522 CHECK(!idx[0], EINVAL);
5524 instr->type = INSTR_REGADD_RII;
5525 instr->regarray.idx_val = idx_val;
5526 instr->regarray.dstsrc_val = src_val;
5530 static inline uint64_t *
5531 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
5533 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5537 static inline uint64_t
5538 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5540 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5542 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5543 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5544 uint64_t idx64 = *idx64_ptr;
5545 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
5546 uint64_t idx = idx64 & idx64_mask & r->size_mask;
5551 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5553 static inline uint64_t
5554 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5556 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5558 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5559 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5560 uint64_t idx64 = *idx64_ptr;
5561 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
5568 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
5572 static inline uint64_t
5573 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
5575 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5577 uint64_t idx = ip->regarray.idx_val & r->size_mask;
5582 static inline uint64_t
5583 instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
5585 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5586 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5587 uint64_t src64 = *src64_ptr;
5588 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5589 uint64_t src = src64 & src64_mask;
5594 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5596 static inline uint64_t
5597 instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
5599 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5600 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5601 uint64_t src64 = *src64_ptr;
5602 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
5609 #define instr_regarray_src_nbo instr_regarray_src_hbo
5614 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5616 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5617 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5618 uint64_t dst64 = *dst64_ptr;
5619 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5621 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5625 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5628 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5630 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5631 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5632 uint64_t dst64 = *dst64_ptr;
5633 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5635 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
5636 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5641 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
5646 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
5648 struct thread *t = &p->threads[p->thread_id];
5649 struct instruction *ip = t->ip;
5650 uint64_t *regarray, idx;
5652 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
5655 regarray = instr_regarray_regarray(p, ip);
5656 idx = instr_regarray_idx_nbo(p, t, ip);
5657 rte_prefetch0(®array[idx]);
5664 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
5666 struct thread *t = &p->threads[p->thread_id];
5667 struct instruction *ip = t->ip;
5668 uint64_t *regarray, idx;
5670 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
5673 regarray = instr_regarray_regarray(p, ip);
5674 idx = instr_regarray_idx_hbo(p, t, ip);
5675 rte_prefetch0(®array[idx]);
5682 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
5684 struct thread *t = &p->threads[p->thread_id];
5685 struct instruction *ip = t->ip;
5686 uint64_t *regarray, idx;
5688 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
5691 regarray = instr_regarray_regarray(p, ip);
5692 idx = instr_regarray_idx_imm(p, ip);
5693 rte_prefetch0(®array[idx]);
5700 instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
5702 struct thread *t = &p->threads[p->thread_id];
5703 struct instruction *ip = t->ip;
5704 uint64_t *regarray, idx;
5706 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
5709 regarray = instr_regarray_regarray(p, ip);
5710 idx = instr_regarray_idx_nbo(p, t, ip);
5711 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5718 instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
5720 struct thread *t = &p->threads[p->thread_id];
5721 struct instruction *ip = t->ip;
5722 uint64_t *regarray, idx;
5724 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
5727 regarray = instr_regarray_regarray(p, ip);
5728 idx = instr_regarray_idx_hbo(p, t, ip);
5729 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5736 instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
5738 struct thread *t = &p->threads[p->thread_id];
5739 struct instruction *ip = t->ip;
5740 uint64_t *regarray, idx;
5742 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
5745 regarray = instr_regarray_regarray(p, ip);
5746 idx = instr_regarray_idx_nbo(p, t, ip);
5747 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5754 instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
5756 struct thread *t = &p->threads[p->thread_id];
5757 struct instruction *ip = t->ip;
5758 uint64_t *regarray, idx;
5761 regarray = instr_regarray_regarray(p, ip);
5762 idx = instr_regarray_idx_hbo(p, t, ip);
5763 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5770 instr_regrd_hri_exec(struct rte_swx_pipeline *p)
5772 struct thread *t = &p->threads[p->thread_id];
5773 struct instruction *ip = t->ip;
5774 uint64_t *regarray, idx;
5776 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
5779 regarray = instr_regarray_regarray(p, ip);
5780 idx = instr_regarray_idx_imm(p, ip);
5781 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5788 instr_regrd_mri_exec(struct rte_swx_pipeline *p)
5790 struct thread *t = &p->threads[p->thread_id];
5791 struct instruction *ip = t->ip;
5792 uint64_t *regarray, idx;
5794 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
5797 regarray = instr_regarray_regarray(p, ip);
5798 idx = instr_regarray_idx_imm(p, ip);
5799 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5806 instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
5808 struct thread *t = &p->threads[p->thread_id];
5809 struct instruction *ip = t->ip;
5810 uint64_t *regarray, idx, src;
5812 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
5815 regarray = instr_regarray_regarray(p, ip);
5816 idx = instr_regarray_idx_nbo(p, t, ip);
5817 src = instr_regarray_src_nbo(t, ip);
5818 regarray[idx] = src;
5825 instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
5827 struct thread *t = &p->threads[p->thread_id];
5828 struct instruction *ip = t->ip;
5829 uint64_t *regarray, idx, src;
5831 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
5834 regarray = instr_regarray_regarray(p, ip);
5835 idx = instr_regarray_idx_nbo(p, t, ip);
5836 src = instr_regarray_src_hbo(t, ip);
5837 regarray[idx] = src;
5844 instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
5846 struct thread *t = &p->threads[p->thread_id];
5847 struct instruction *ip = t->ip;
5848 uint64_t *regarray, idx, src;
5850 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
5853 regarray = instr_regarray_regarray(p, ip);
5854 idx = instr_regarray_idx_hbo(p, t, ip);
5855 src = instr_regarray_src_nbo(t, ip);
5856 regarray[idx] = src;
5863 instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
5865 struct thread *t = &p->threads[p->thread_id];
5866 struct instruction *ip = t->ip;
5867 uint64_t *regarray, idx, src;
5869 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
5872 regarray = instr_regarray_regarray(p, ip);
5873 idx = instr_regarray_idx_hbo(p, t, ip);
5874 src = instr_regarray_src_hbo(t, ip);
5875 regarray[idx] = src;
5882 instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
5884 struct thread *t = &p->threads[p->thread_id];
5885 struct instruction *ip = t->ip;
5886 uint64_t *regarray, idx, src;
5888 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
5891 regarray = instr_regarray_regarray(p, ip);
5892 idx = instr_regarray_idx_nbo(p, t, ip);
5893 src = ip->regarray.dstsrc_val;
5894 regarray[idx] = src;
5901 instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
5903 struct thread *t = &p->threads[p->thread_id];
5904 struct instruction *ip = t->ip;
5905 uint64_t *regarray, idx, src;
5907 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
5910 regarray = instr_regarray_regarray(p, ip);
5911 idx = instr_regarray_idx_hbo(p, t, ip);
5912 src = ip->regarray.dstsrc_val;
5913 regarray[idx] = src;
5920 instr_regwr_rih_exec(struct rte_swx_pipeline *p)
5922 struct thread *t = &p->threads[p->thread_id];
5923 struct instruction *ip = t->ip;
5924 uint64_t *regarray, idx, src;
5926 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
5929 regarray = instr_regarray_regarray(p, ip);
5930 idx = instr_regarray_idx_imm(p, ip);
5931 src = instr_regarray_src_nbo(t, ip);
5932 regarray[idx] = src;
5939 instr_regwr_rim_exec(struct rte_swx_pipeline *p)
5941 struct thread *t = &p->threads[p->thread_id];
5942 struct instruction *ip = t->ip;
5943 uint64_t *regarray, idx, src;
5945 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
5948 regarray = instr_regarray_regarray(p, ip);
5949 idx = instr_regarray_idx_imm(p, ip);
5950 src = instr_regarray_src_hbo(t, ip);
5951 regarray[idx] = src;
5958 instr_regwr_rii_exec(struct rte_swx_pipeline *p)
5960 struct thread *t = &p->threads[p->thread_id];
5961 struct instruction *ip = t->ip;
5962 uint64_t *regarray, idx, src;
5964 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
5967 regarray = instr_regarray_regarray(p, ip);
5968 idx = instr_regarray_idx_imm(p, ip);
5969 src = ip->regarray.dstsrc_val;
5970 regarray[idx] = src;
5977 instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
5979 struct thread *t = &p->threads[p->thread_id];
5980 struct instruction *ip = t->ip;
5981 uint64_t *regarray, idx, src;
5983 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
5986 regarray = instr_regarray_regarray(p, ip);
5987 idx = instr_regarray_idx_nbo(p, t, ip);
5988 src = instr_regarray_src_nbo(t, ip);
5989 regarray[idx] += src;
5996 instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
5998 struct thread *t = &p->threads[p->thread_id];
5999 struct instruction *ip = t->ip;
6000 uint64_t *regarray, idx, src;
6002 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
6005 regarray = instr_regarray_regarray(p, ip);
6006 idx = instr_regarray_idx_nbo(p, t, ip);
6007 src = instr_regarray_src_hbo(t, ip);
6008 regarray[idx] += src;
6015 instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
6017 struct thread *t = &p->threads[p->thread_id];
6018 struct instruction *ip = t->ip;
6019 uint64_t *regarray, idx, src;
6021 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
6024 regarray = instr_regarray_regarray(p, ip);
6025 idx = instr_regarray_idx_hbo(p, t, ip);
6026 src = instr_regarray_src_nbo(t, ip);
6027 regarray[idx] += src;
6034 instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
6036 struct thread *t = &p->threads[p->thread_id];
6037 struct instruction *ip = t->ip;
6038 uint64_t *regarray, idx, src;
6040 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
6043 regarray = instr_regarray_regarray(p, ip);
6044 idx = instr_regarray_idx_hbo(p, t, ip);
6045 src = instr_regarray_src_hbo(t, ip);
6046 regarray[idx] += src;
6053 instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
6055 struct thread *t = &p->threads[p->thread_id];
6056 struct instruction *ip = t->ip;
6057 uint64_t *regarray, idx, src;
6059 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
6062 regarray = instr_regarray_regarray(p, ip);
6063 idx = instr_regarray_idx_nbo(p, t, ip);
6064 src = ip->regarray.dstsrc_val;
6065 regarray[idx] += src;
6072 instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
6074 struct thread *t = &p->threads[p->thread_id];
6075 struct instruction *ip = t->ip;
6076 uint64_t *regarray, idx, src;
6078 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
6081 regarray = instr_regarray_regarray(p, ip);
6082 idx = instr_regarray_idx_hbo(p, t, ip);
6083 src = ip->regarray.dstsrc_val;
6084 regarray[idx] += src;
6091 instr_regadd_rih_exec(struct rte_swx_pipeline *p)
6093 struct thread *t = &p->threads[p->thread_id];
6094 struct instruction *ip = t->ip;
6095 uint64_t *regarray, idx, src;
6097 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
6100 regarray = instr_regarray_regarray(p, ip);
6101 idx = instr_regarray_idx_imm(p, ip);
6102 src = instr_regarray_src_nbo(t, ip);
6103 regarray[idx] += src;
6110 instr_regadd_rim_exec(struct rte_swx_pipeline *p)
6112 struct thread *t = &p->threads[p->thread_id];
6113 struct instruction *ip = t->ip;
6114 uint64_t *regarray, idx, src;
6116 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
6119 regarray = instr_regarray_regarray(p, ip);
6120 idx = instr_regarray_idx_imm(p, ip);
6121 src = instr_regarray_src_hbo(t, ip);
6122 regarray[idx] += src;
6129 instr_regadd_rii_exec(struct rte_swx_pipeline *p)
6131 struct thread *t = &p->threads[p->thread_id];
6132 struct instruction *ip = t->ip;
6133 uint64_t *regarray, idx, src;
6135 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
6138 regarray = instr_regarray_regarray(p, ip);
6139 idx = instr_regarray_idx_imm(p, ip);
6140 src = ip->regarray.dstsrc_val;
6141 regarray[idx] += src;
6150 static struct metarray *
6151 metarray_find(struct rte_swx_pipeline *p, const char *name);
6154 instr_metprefetch_translate(struct rte_swx_pipeline *p,
6155 struct action *action,
6158 struct instruction *instr,
6159 struct instruction_data *data __rte_unused)
6161 char *metarray = tokens[1], *idx = tokens[2];
6164 uint32_t idx_struct_id, idx_val;
6166 CHECK(n_tokens == 3, EINVAL);
6168 m = metarray_find(p, metarray);
6171 /* METPREFETCH_H, METPREFETCH_M. */
6172 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6174 instr->type = INSTR_METPREFETCH_M;
6176 instr->type = INSTR_METPREFETCH_H;
6178 instr->meter.metarray_id = m->id;
6179 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6180 instr->meter.idx.n_bits = fidx->n_bits;
6181 instr->meter.idx.offset = fidx->offset / 8;
6185 /* METPREFETCH_I. */
6186 idx_val = strtoul(idx, &idx, 0);
6187 CHECK(!idx[0], EINVAL);
6189 instr->type = INSTR_METPREFETCH_I;
6190 instr->meter.metarray_id = m->id;
6191 instr->meter.idx_val = idx_val;
6196 instr_meter_translate(struct rte_swx_pipeline *p,
6197 struct action *action,
6200 struct instruction *instr,
6201 struct instruction_data *data __rte_unused)
6203 char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
6204 char *color_in = tokens[4], *color_out = tokens[5];
6206 struct field *fidx, *flength, *fcin, *fcout;
6207 uint32_t idx_struct_id, length_struct_id;
6208 uint32_t color_in_struct_id, color_out_struct_id;
6210 CHECK(n_tokens == 6, EINVAL);
6212 m = metarray_find(p, metarray);
6215 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6217 flength = struct_field_parse(p, action, length, &length_struct_id);
6218 CHECK(flength, EINVAL);
6220 fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
6222 fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
6223 CHECK(fcout, EINVAL);
6225 /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
6227 instr->type = INSTR_METER_MMM;
6228 if (idx[0] == 'h' && length[0] == 'h')
6229 instr->type = INSTR_METER_HHM;
6230 if (idx[0] == 'h' && length[0] != 'h')
6231 instr->type = INSTR_METER_HMM;
6232 if (idx[0] != 'h' && length[0] == 'h')
6233 instr->type = INSTR_METER_MHM;
6235 instr->meter.metarray_id = m->id;
6237 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6238 instr->meter.idx.n_bits = fidx->n_bits;
6239 instr->meter.idx.offset = fidx->offset / 8;
6241 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6242 instr->meter.length.n_bits = flength->n_bits;
6243 instr->meter.length.offset = flength->offset / 8;
6245 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6246 instr->meter.color_in.n_bits = fcin->n_bits;
6247 instr->meter.color_in.offset = fcin->offset / 8;
6249 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6250 instr->meter.color_out.n_bits = fcout->n_bits;
6251 instr->meter.color_out.offset = fcout->offset / 8;
6256 /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
6257 if (fidx && !fcin) {
6258 uint32_t color_in_val = strtoul(color_in, &color_in, 0);
6259 CHECK(!color_in[0], EINVAL);
6261 instr->type = INSTR_METER_MMI;
6262 if (idx[0] == 'h' && length[0] == 'h')
6263 instr->type = INSTR_METER_HHI;
6264 if (idx[0] == 'h' && length[0] != 'h')
6265 instr->type = INSTR_METER_HMI;
6266 if (idx[0] != 'h' && length[0] == 'h')
6267 instr->type = INSTR_METER_MHI;
6269 instr->meter.metarray_id = m->id;
6271 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6272 instr->meter.idx.n_bits = fidx->n_bits;
6273 instr->meter.idx.offset = fidx->offset / 8;
6275 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6276 instr->meter.length.n_bits = flength->n_bits;
6277 instr->meter.length.offset = flength->offset / 8;
6279 instr->meter.color_in_val = color_in_val;
6281 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6282 instr->meter.color_out.n_bits = fcout->n_bits;
6283 instr->meter.color_out.offset = fcout->offset / 8;
6288 /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
6289 if (!fidx && fcin) {
6292 idx_val = strtoul(idx, &idx, 0);
6293 CHECK(!idx[0], EINVAL);
6295 instr->type = INSTR_METER_IMM;
6296 if (length[0] == 'h')
6297 instr->type = INSTR_METER_IHM;
6299 instr->meter.metarray_id = m->id;
6301 instr->meter.idx_val = idx_val;
6303 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6304 instr->meter.length.n_bits = flength->n_bits;
6305 instr->meter.length.offset = flength->offset / 8;
6307 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6308 instr->meter.color_in.n_bits = fcin->n_bits;
6309 instr->meter.color_in.offset = fcin->offset / 8;
6311 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6312 instr->meter.color_out.n_bits = fcout->n_bits;
6313 instr->meter.color_out.offset = fcout->offset / 8;
6318 /* index = I, length = HMEFT, color_in = I, color_out = MEF. */
6319 if (!fidx && !fcin) {
6320 uint32_t idx_val, color_in_val;
6322 idx_val = strtoul(idx, &idx, 0);
6323 CHECK(!idx[0], EINVAL);
6325 color_in_val = strtoul(color_in, &color_in, 0);
6326 CHECK(!color_in[0], EINVAL);
6328 instr->type = INSTR_METER_IMI;
6329 if (length[0] == 'h')
6330 instr->type = INSTR_METER_IHI;
6332 instr->meter.metarray_id = m->id;
6334 instr->meter.idx_val = idx_val;
6336 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6337 instr->meter.length.n_bits = flength->n_bits;
6338 instr->meter.length.offset = flength->offset / 8;
6340 instr->meter.color_in_val = color_in_val;
6342 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6343 instr->meter.color_out.n_bits = fcout->n_bits;
6344 instr->meter.color_out.offset = fcout->offset / 8;
6352 static inline struct meter *
6353 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6355 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6357 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6358 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6359 uint64_t idx64 = *idx64_ptr;
6360 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
6361 uint64_t idx = idx64 & idx64_mask & r->size_mask;
6363 return &r->metarray[idx];
6366 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6368 static inline struct meter *
6369 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6371 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6373 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6374 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6375 uint64_t idx64 = *idx64_ptr;
6376 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
6378 return &r->metarray[idx];
6383 #define instr_meter_idx_nbo instr_meter_idx_hbo
6387 static inline struct meter *
6388 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
6390 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6392 uint64_t idx = ip->meter.idx_val & r->size_mask;
6394 return &r->metarray[idx];
6397 static inline uint32_t
6398 instr_meter_length_hbo(struct thread *t, struct instruction *ip)
6400 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6401 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6402 uint64_t src64 = *src64_ptr;
6403 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
6404 uint64_t src = src64 & src64_mask;
6406 return (uint32_t)src;
6409 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6411 static inline uint32_t
6412 instr_meter_length_nbo(struct thread *t, struct instruction *ip)
6414 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6415 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6416 uint64_t src64 = *src64_ptr;
6417 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
6419 return (uint32_t)src;
6424 #define instr_meter_length_nbo instr_meter_length_hbo
6428 static inline enum rte_color
6429 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
6431 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
6432 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
6433 uint64_t src64 = *src64_ptr;
6434 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
6435 uint64_t src = src64 & src64_mask;
6437 return (enum rte_color)src;
6441 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
6443 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
6444 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
6445 uint64_t dst64 = *dst64_ptr;
6446 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
6448 uint64_t src = (uint64_t)color_out;
6450 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
6454 instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
6456 struct thread *t = &p->threads[p->thread_id];
6457 struct instruction *ip = t->ip;
6460 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
6463 m = instr_meter_idx_nbo(p, t, ip);
6471 instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
6473 struct thread *t = &p->threads[p->thread_id];
6474 struct instruction *ip = t->ip;
6477 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
6480 m = instr_meter_idx_hbo(p, t, ip);
6488 instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
6490 struct thread *t = &p->threads[p->thread_id];
6491 struct instruction *ip = t->ip;
6494 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
6497 m = instr_meter_idx_imm(p, ip);
6505 instr_meter_hhm_exec(struct rte_swx_pipeline *p)
6507 struct thread *t = &p->threads[p->thread_id];
6508 struct instruction *ip = t->ip;
6510 uint64_t time, n_pkts, n_bytes;
6512 enum rte_color color_in, color_out;
6514 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
6517 m = instr_meter_idx_nbo(p, t, ip);
6518 rte_prefetch0(m->n_pkts);
6519 time = rte_get_tsc_cycles();
6520 length = instr_meter_length_nbo(t, ip);
6521 color_in = instr_meter_color_in_hbo(t, ip);
6523 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6524 &m->profile->profile,
6529 color_out &= m->color_mask;
6531 n_pkts = m->n_pkts[color_out];
6532 n_bytes = m->n_bytes[color_out];
6534 instr_meter_color_out_hbo_set(t, ip, color_out);
6536 m->n_pkts[color_out] = n_pkts + 1;
6537 m->n_bytes[color_out] = n_bytes + length;
6544 instr_meter_hhi_exec(struct rte_swx_pipeline *p)
6546 struct thread *t = &p->threads[p->thread_id];
6547 struct instruction *ip = t->ip;
6549 uint64_t time, n_pkts, n_bytes;
6551 enum rte_color color_in, color_out;
6553 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
6556 m = instr_meter_idx_nbo(p, t, ip);
6557 rte_prefetch0(m->n_pkts);
6558 time = rte_get_tsc_cycles();
6559 length = instr_meter_length_nbo(t, ip);
6560 color_in = (enum rte_color)ip->meter.color_in_val;
6562 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6563 &m->profile->profile,
6568 color_out &= m->color_mask;
6570 n_pkts = m->n_pkts[color_out];
6571 n_bytes = m->n_bytes[color_out];
6573 instr_meter_color_out_hbo_set(t, ip, color_out);
6575 m->n_pkts[color_out] = n_pkts + 1;
6576 m->n_bytes[color_out] = n_bytes + length;
6583 instr_meter_hmm_exec(struct rte_swx_pipeline *p)
6585 struct thread *t = &p->threads[p->thread_id];
6586 struct instruction *ip = t->ip;
6588 uint64_t time, n_pkts, n_bytes;
6590 enum rte_color color_in, color_out;
6592 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
6595 m = instr_meter_idx_nbo(p, t, ip);
6596 rte_prefetch0(m->n_pkts);
6597 time = rte_get_tsc_cycles();
6598 length = instr_meter_length_hbo(t, ip);
6599 color_in = instr_meter_color_in_hbo(t, ip);
6601 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6602 &m->profile->profile,
6607 color_out &= m->color_mask;
6609 n_pkts = m->n_pkts[color_out];
6610 n_bytes = m->n_bytes[color_out];
6612 instr_meter_color_out_hbo_set(t, ip, color_out);
6614 m->n_pkts[color_out] = n_pkts + 1;
6615 m->n_bytes[color_out] = n_bytes + length;
6621 instr_meter_hmi_exec(struct rte_swx_pipeline *p)
6623 struct thread *t = &p->threads[p->thread_id];
6624 struct instruction *ip = t->ip;
6626 uint64_t time, n_pkts, n_bytes;
6628 enum rte_color color_in, color_out;
6630 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
6633 m = instr_meter_idx_nbo(p, t, ip);
6634 rte_prefetch0(m->n_pkts);
6635 time = rte_get_tsc_cycles();
6636 length = instr_meter_length_hbo(t, ip);
6637 color_in = (enum rte_color)ip->meter.color_in_val;
6639 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6640 &m->profile->profile,
6645 color_out &= m->color_mask;
6647 n_pkts = m->n_pkts[color_out];
6648 n_bytes = m->n_bytes[color_out];
6650 instr_meter_color_out_hbo_set(t, ip, color_out);
6652 m->n_pkts[color_out] = n_pkts + 1;
6653 m->n_bytes[color_out] = n_bytes + length;
6660 instr_meter_mhm_exec(struct rte_swx_pipeline *p)
6662 struct thread *t = &p->threads[p->thread_id];
6663 struct instruction *ip = t->ip;
6665 uint64_t time, n_pkts, n_bytes;
6667 enum rte_color color_in, color_out;
6669 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
6672 m = instr_meter_idx_hbo(p, t, ip);
6673 rte_prefetch0(m->n_pkts);
6674 time = rte_get_tsc_cycles();
6675 length = instr_meter_length_nbo(t, ip);
6676 color_in = instr_meter_color_in_hbo(t, ip);
6678 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6679 &m->profile->profile,
6684 color_out &= m->color_mask;
6686 n_pkts = m->n_pkts[color_out];
6687 n_bytes = m->n_bytes[color_out];
6689 instr_meter_color_out_hbo_set(t, ip, color_out);
6691 m->n_pkts[color_out] = n_pkts + 1;
6692 m->n_bytes[color_out] = n_bytes + length;
6699 instr_meter_mhi_exec(struct rte_swx_pipeline *p)
6701 struct thread *t = &p->threads[p->thread_id];
6702 struct instruction *ip = t->ip;
6704 uint64_t time, n_pkts, n_bytes;
6706 enum rte_color color_in, color_out;
6708 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
6711 m = instr_meter_idx_hbo(p, t, ip);
6712 rte_prefetch0(m->n_pkts);
6713 time = rte_get_tsc_cycles();
6714 length = instr_meter_length_nbo(t, ip);
6715 color_in = (enum rte_color)ip->meter.color_in_val;
6717 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6718 &m->profile->profile,
6723 color_out &= m->color_mask;
6725 n_pkts = m->n_pkts[color_out];
6726 n_bytes = m->n_bytes[color_out];
6728 instr_meter_color_out_hbo_set(t, ip, color_out);
6730 m->n_pkts[color_out] = n_pkts + 1;
6731 m->n_bytes[color_out] = n_bytes + length;
6738 instr_meter_mmm_exec(struct rte_swx_pipeline *p)
6740 struct thread *t = &p->threads[p->thread_id];
6741 struct instruction *ip = t->ip;
6743 uint64_t time, n_pkts, n_bytes;
6745 enum rte_color color_in, color_out;
6747 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
6750 m = instr_meter_idx_hbo(p, t, ip);
6751 rte_prefetch0(m->n_pkts);
6752 time = rte_get_tsc_cycles();
6753 length = instr_meter_length_hbo(t, ip);
6754 color_in = instr_meter_color_in_hbo(t, ip);
6756 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6757 &m->profile->profile,
6762 color_out &= m->color_mask;
6764 n_pkts = m->n_pkts[color_out];
6765 n_bytes = m->n_bytes[color_out];
6767 instr_meter_color_out_hbo_set(t, ip, color_out);
6769 m->n_pkts[color_out] = n_pkts + 1;
6770 m->n_bytes[color_out] = n_bytes + length;
6777 instr_meter_mmi_exec(struct rte_swx_pipeline *p)
6779 struct thread *t = &p->threads[p->thread_id];
6780 struct instruction *ip = t->ip;
6782 uint64_t time, n_pkts, n_bytes;
6784 enum rte_color color_in, color_out;
6786 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
6789 m = instr_meter_idx_hbo(p, t, ip);
6790 rte_prefetch0(m->n_pkts);
6791 time = rte_get_tsc_cycles();
6792 length = instr_meter_length_hbo(t, ip);
6793 color_in = (enum rte_color)ip->meter.color_in_val;
6795 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6796 &m->profile->profile,
6801 color_out &= m->color_mask;
6803 n_pkts = m->n_pkts[color_out];
6804 n_bytes = m->n_bytes[color_out];
6806 instr_meter_color_out_hbo_set(t, ip, color_out);
6808 m->n_pkts[color_out] = n_pkts + 1;
6809 m->n_bytes[color_out] = n_bytes + length;
6816 instr_meter_ihm_exec(struct rte_swx_pipeline *p)
6818 struct thread *t = &p->threads[p->thread_id];
6819 struct instruction *ip = t->ip;
6821 uint64_t time, n_pkts, n_bytes;
6823 enum rte_color color_in, color_out;
6825 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
6828 m = instr_meter_idx_imm(p, ip);
6829 rte_prefetch0(m->n_pkts);
6830 time = rte_get_tsc_cycles();
6831 length = instr_meter_length_nbo(t, ip);
6832 color_in = instr_meter_color_in_hbo(t, ip);
6834 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6835 &m->profile->profile,
6840 color_out &= m->color_mask;
6842 n_pkts = m->n_pkts[color_out];
6843 n_bytes = m->n_bytes[color_out];
6845 instr_meter_color_out_hbo_set(t, ip, color_out);
6847 m->n_pkts[color_out] = n_pkts + 1;
6848 m->n_bytes[color_out] = n_bytes + length;
6855 instr_meter_ihi_exec(struct rte_swx_pipeline *p)
6857 struct thread *t = &p->threads[p->thread_id];
6858 struct instruction *ip = t->ip;
6860 uint64_t time, n_pkts, n_bytes;
6862 enum rte_color color_in, color_out;
6864 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
6867 m = instr_meter_idx_imm(p, ip);
6868 rte_prefetch0(m->n_pkts);
6869 time = rte_get_tsc_cycles();
6870 length = instr_meter_length_nbo(t, ip);
6871 color_in = (enum rte_color)ip->meter.color_in_val;
6873 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6874 &m->profile->profile,
6879 color_out &= m->color_mask;
6881 n_pkts = m->n_pkts[color_out];
6882 n_bytes = m->n_bytes[color_out];
6884 instr_meter_color_out_hbo_set(t, ip, color_out);
6886 m->n_pkts[color_out] = n_pkts + 1;
6887 m->n_bytes[color_out] = n_bytes + length;
6894 instr_meter_imm_exec(struct rte_swx_pipeline *p)
6896 struct thread *t = &p->threads[p->thread_id];
6897 struct instruction *ip = t->ip;
6899 uint64_t time, n_pkts, n_bytes;
6901 enum rte_color color_in, color_out;
6903 TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
6906 m = instr_meter_idx_imm(p, ip);
6907 rte_prefetch0(m->n_pkts);
6908 time = rte_get_tsc_cycles();
6909 length = instr_meter_length_hbo(t, ip);
6910 color_in = instr_meter_color_in_hbo(t, ip);
6912 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6913 &m->profile->profile,
6918 color_out &= m->color_mask;
6920 n_pkts = m->n_pkts[color_out];
6921 n_bytes = m->n_bytes[color_out];
6923 instr_meter_color_out_hbo_set(t, ip, color_out);
6925 m->n_pkts[color_out] = n_pkts + 1;
6926 m->n_bytes[color_out] = n_bytes + length;
6932 instr_meter_imi_exec(struct rte_swx_pipeline *p)
6934 struct thread *t = &p->threads[p->thread_id];
6935 struct instruction *ip = t->ip;
6937 uint64_t time, n_pkts, n_bytes;
6939 enum rte_color color_in, color_out;
6941 TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
6944 m = instr_meter_idx_imm(p, ip);
6945 rte_prefetch0(m->n_pkts);
6946 time = rte_get_tsc_cycles();
6947 length = instr_meter_length_hbo(t, ip);
6948 color_in = (enum rte_color)ip->meter.color_in_val;
6950 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6951 &m->profile->profile,
6956 color_out &= m->color_mask;
6958 n_pkts = m->n_pkts[color_out];
6959 n_bytes = m->n_bytes[color_out];
6961 instr_meter_color_out_hbo_set(t, ip, color_out);
6963 m->n_pkts[color_out] = n_pkts + 1;
6964 m->n_bytes[color_out] = n_bytes + length;
6973 static struct action *
6974 action_find(struct rte_swx_pipeline *p, const char *name);
6977 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
6978 struct action *action __rte_unused,
6981 struct instruction *instr,
6982 struct instruction_data *data)
6984 CHECK(n_tokens == 2, EINVAL);
6986 strcpy(data->jmp_label, tokens[1]);
6988 instr->type = INSTR_JMP;
6989 instr->jmp.ip = NULL; /* Resolved later. */
6994 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
6995 struct action *action __rte_unused,
6998 struct instruction *instr,
6999 struct instruction_data *data)
7003 CHECK(n_tokens == 3, EINVAL);
7005 strcpy(data->jmp_label, tokens[1]);
7007 h = header_parse(p, tokens[2]);
7010 instr->type = INSTR_JMP_VALID;
7011 instr->jmp.ip = NULL; /* Resolved later. */
7012 instr->jmp.header_id = h->id;
7017 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
7018 struct action *action __rte_unused,
7021 struct instruction *instr,
7022 struct instruction_data *data)
7026 CHECK(n_tokens == 3, EINVAL);
7028 strcpy(data->jmp_label, tokens[1]);
7030 h = header_parse(p, tokens[2]);
7033 instr->type = INSTR_JMP_INVALID;
7034 instr->jmp.ip = NULL; /* Resolved later. */
7035 instr->jmp.header_id = h->id;
7040 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
7041 struct action *action,
7044 struct instruction *instr,
7045 struct instruction_data *data)
7047 CHECK(!action, EINVAL);
7048 CHECK(n_tokens == 2, EINVAL);
7050 strcpy(data->jmp_label, tokens[1]);
7052 instr->type = INSTR_JMP_HIT;
7053 instr->jmp.ip = NULL; /* Resolved later. */
7058 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
7059 struct action *action,
7062 struct instruction *instr,
7063 struct instruction_data *data)
7065 CHECK(!action, EINVAL);
7066 CHECK(n_tokens == 2, EINVAL);
7068 strcpy(data->jmp_label, tokens[1]);
7070 instr->type = INSTR_JMP_MISS;
7071 instr->jmp.ip = NULL; /* Resolved later. */
7076 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
7077 struct action *action,
7080 struct instruction *instr,
7081 struct instruction_data *data)
7085 CHECK(!action, EINVAL);
7086 CHECK(n_tokens == 3, EINVAL);
7088 strcpy(data->jmp_label, tokens[1]);
7090 a = action_find(p, tokens[2]);
7093 instr->type = INSTR_JMP_ACTION_HIT;
7094 instr->jmp.ip = NULL; /* Resolved later. */
7095 instr->jmp.action_id = a->id;
7100 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
7101 struct action *action,
7104 struct instruction *instr,
7105 struct instruction_data *data)
7109 CHECK(!action, EINVAL);
7110 CHECK(n_tokens == 3, EINVAL);
7112 strcpy(data->jmp_label, tokens[1]);
7114 a = action_find(p, tokens[2]);
7117 instr->type = INSTR_JMP_ACTION_MISS;
7118 instr->jmp.ip = NULL; /* Resolved later. */
7119 instr->jmp.action_id = a->id;
7124 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
7125 struct action *action,
7128 struct instruction *instr,
7129 struct instruction_data *data)
7131 char *a = tokens[2], *b = tokens[3];
7132 struct field *fa, *fb;
7134 uint32_t a_struct_id, b_struct_id;
7136 CHECK(n_tokens == 4, EINVAL);
7138 strcpy(data->jmp_label, tokens[1]);
7140 fa = struct_field_parse(p, action, a, &a_struct_id);
7143 /* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */
7144 fb = struct_field_parse(p, action, b, &b_struct_id);
7146 instr->type = INSTR_JMP_EQ;
7147 if (a[0] != 'h' && b[0] == 'h')
7148 instr->type = INSTR_JMP_EQ_MH;
7149 if (a[0] == 'h' && b[0] != 'h')
7150 instr->type = INSTR_JMP_EQ_HM;
7151 if (a[0] == 'h' && b[0] == 'h')
7152 instr->type = INSTR_JMP_EQ_HH;
7153 instr->jmp.ip = NULL; /* Resolved later. */
7155 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7156 instr->jmp.a.n_bits = fa->n_bits;
7157 instr->jmp.a.offset = fa->offset / 8;
7158 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7159 instr->jmp.b.n_bits = fb->n_bits;
7160 instr->jmp.b.offset = fb->offset / 8;
7165 b_val = strtoull(b, &b, 0);
7166 CHECK(!b[0], EINVAL);
7169 b_val = hton64(b_val) >> (64 - fa->n_bits);
7171 instr->type = INSTR_JMP_EQ_I;
7172 instr->jmp.ip = NULL; /* Resolved later. */
7173 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7174 instr->jmp.a.n_bits = fa->n_bits;
7175 instr->jmp.a.offset = fa->offset / 8;
7176 instr->jmp.b_val = b_val;
7181 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
7182 struct action *action,
7185 struct instruction *instr,
7186 struct instruction_data *data)
7188 char *a = tokens[2], *b = tokens[3];
7189 struct field *fa, *fb;
7191 uint32_t a_struct_id, b_struct_id;
7193 CHECK(n_tokens == 4, EINVAL);
7195 strcpy(data->jmp_label, tokens[1]);
7197 fa = struct_field_parse(p, action, a, &a_struct_id);
7200 /* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */
7201 fb = struct_field_parse(p, action, b, &b_struct_id);
7203 instr->type = INSTR_JMP_NEQ;
7204 if (a[0] != 'h' && b[0] == 'h')
7205 instr->type = INSTR_JMP_NEQ_MH;
7206 if (a[0] == 'h' && b[0] != 'h')
7207 instr->type = INSTR_JMP_NEQ_HM;
7208 if (a[0] == 'h' && b[0] == 'h')
7209 instr->type = INSTR_JMP_NEQ_HH;
7210 instr->jmp.ip = NULL; /* Resolved later. */
7212 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7213 instr->jmp.a.n_bits = fa->n_bits;
7214 instr->jmp.a.offset = fa->offset / 8;
7215 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7216 instr->jmp.b.n_bits = fb->n_bits;
7217 instr->jmp.b.offset = fb->offset / 8;
7222 b_val = strtoull(b, &b, 0);
7223 CHECK(!b[0], EINVAL);
7226 b_val = hton64(b_val) >> (64 - fa->n_bits);
7228 instr->type = INSTR_JMP_NEQ_I;
7229 instr->jmp.ip = NULL; /* Resolved later. */
7230 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7231 instr->jmp.a.n_bits = fa->n_bits;
7232 instr->jmp.a.offset = fa->offset / 8;
7233 instr->jmp.b_val = b_val;
7238 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
7239 struct action *action,
7242 struct instruction *instr,
7243 struct instruction_data *data)
7245 char *a = tokens[2], *b = tokens[3];
7246 struct field *fa, *fb;
7248 uint32_t a_struct_id, b_struct_id;
7250 CHECK(n_tokens == 4, EINVAL);
7252 strcpy(data->jmp_label, tokens[1]);
7254 fa = struct_field_parse(p, action, a, &a_struct_id);
7257 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
7258 fb = struct_field_parse(p, action, b, &b_struct_id);
7260 instr->type = INSTR_JMP_LT;
7261 if (a[0] == 'h' && b[0] != 'h')
7262 instr->type = INSTR_JMP_LT_HM;
7263 if (a[0] != 'h' && b[0] == 'h')
7264 instr->type = INSTR_JMP_LT_MH;
7265 if (a[0] == 'h' && b[0] == 'h')
7266 instr->type = INSTR_JMP_LT_HH;
7267 instr->jmp.ip = NULL; /* Resolved later. */
7269 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7270 instr->jmp.a.n_bits = fa->n_bits;
7271 instr->jmp.a.offset = fa->offset / 8;
7272 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7273 instr->jmp.b.n_bits = fb->n_bits;
7274 instr->jmp.b.offset = fb->offset / 8;
7278 /* JMP_LT_MI, JMP_LT_HI. */
7279 b_val = strtoull(b, &b, 0);
7280 CHECK(!b[0], EINVAL);
7282 instr->type = INSTR_JMP_LT_MI;
7284 instr->type = INSTR_JMP_LT_HI;
7285 instr->jmp.ip = NULL; /* Resolved later. */
7287 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7288 instr->jmp.a.n_bits = fa->n_bits;
7289 instr->jmp.a.offset = fa->offset / 8;
7290 instr->jmp.b_val = b_val;
7295 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
7296 struct action *action,
7299 struct instruction *instr,
7300 struct instruction_data *data)
7302 char *a = tokens[2], *b = tokens[3];
7303 struct field *fa, *fb;
7305 uint32_t a_struct_id, b_struct_id;
7307 CHECK(n_tokens == 4, EINVAL);
7309 strcpy(data->jmp_label, tokens[1]);
7311 fa = struct_field_parse(p, action, a, &a_struct_id);
7314 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
7315 fb = struct_field_parse(p, action, b, &b_struct_id);
7317 instr->type = INSTR_JMP_GT;
7318 if (a[0] == 'h' && b[0] != 'h')
7319 instr->type = INSTR_JMP_GT_HM;
7320 if (a[0] != 'h' && b[0] == 'h')
7321 instr->type = INSTR_JMP_GT_MH;
7322 if (a[0] == 'h' && b[0] == 'h')
7323 instr->type = INSTR_JMP_GT_HH;
7324 instr->jmp.ip = NULL; /* Resolved later. */
7326 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7327 instr->jmp.a.n_bits = fa->n_bits;
7328 instr->jmp.a.offset = fa->offset / 8;
7329 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7330 instr->jmp.b.n_bits = fb->n_bits;
7331 instr->jmp.b.offset = fb->offset / 8;
7335 /* JMP_GT_MI, JMP_GT_HI. */
7336 b_val = strtoull(b, &b, 0);
7337 CHECK(!b[0], EINVAL);
7339 instr->type = INSTR_JMP_GT_MI;
7341 instr->type = INSTR_JMP_GT_HI;
7342 instr->jmp.ip = NULL; /* Resolved later. */
7344 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7345 instr->jmp.a.n_bits = fa->n_bits;
7346 instr->jmp.a.offset = fa->offset / 8;
7347 instr->jmp.b_val = b_val;
7352 instr_jmp_exec(struct rte_swx_pipeline *p)
7354 struct thread *t = &p->threads[p->thread_id];
7355 struct instruction *ip = t->ip;
7357 TRACE("[Thread %2u] jmp\n", p->thread_id);
7359 thread_ip_set(t, ip->jmp.ip);
7363 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
7365 struct thread *t = &p->threads[p->thread_id];
7366 struct instruction *ip = t->ip;
7367 uint32_t header_id = ip->jmp.header_id;
7369 TRACE("[Thread %2u] jmpv\n", p->thread_id);
7371 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
7375 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
7377 struct thread *t = &p->threads[p->thread_id];
7378 struct instruction *ip = t->ip;
7379 uint32_t header_id = ip->jmp.header_id;
7381 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
7383 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
7387 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
7389 struct thread *t = &p->threads[p->thread_id];
7390 struct instruction *ip = t->ip;
7391 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
7393 TRACE("[Thread %2u] jmph\n", p->thread_id);
7395 t->ip = ip_next[t->hit];
7399 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
7401 struct thread *t = &p->threads[p->thread_id];
7402 struct instruction *ip = t->ip;
7403 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
7405 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
7407 t->ip = ip_next[t->hit];
7411 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
7413 struct thread *t = &p->threads[p->thread_id];
7414 struct instruction *ip = t->ip;
7416 TRACE("[Thread %2u] jmpa\n", p->thread_id);
7418 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
7422 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
7424 struct thread *t = &p->threads[p->thread_id];
7425 struct instruction *ip = t->ip;
7427 TRACE("[Thread %2u] jmpna\n", p->thread_id);
7429 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
7433 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
7435 struct thread *t = &p->threads[p->thread_id];
7436 struct instruction *ip = t->ip;
7438 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
7444 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p)
7446 struct thread *t = &p->threads[p->thread_id];
7447 struct instruction *ip = t->ip;
7449 TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id);
7451 JMP_CMP_MH(t, ip, ==);
7455 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p)
7457 struct thread *t = &p->threads[p->thread_id];
7458 struct instruction *ip = t->ip;
7460 TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id);
7462 JMP_CMP_HM(t, ip, ==);
7466 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p)
7468 struct thread *t = &p->threads[p->thread_id];
7469 struct instruction *ip = t->ip;
7471 TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id);
7473 JMP_CMP_HH_FAST(t, ip, ==);
7477 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
7479 struct thread *t = &p->threads[p->thread_id];
7480 struct instruction *ip = t->ip;
7482 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
7484 JMP_CMP_I(t, ip, ==);
7488 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
7490 struct thread *t = &p->threads[p->thread_id];
7491 struct instruction *ip = t->ip;
7493 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
7499 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p)
7501 struct thread *t = &p->threads[p->thread_id];
7502 struct instruction *ip = t->ip;
7504 TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id);
7506 JMP_CMP_MH(t, ip, !=);
7510 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p)
7512 struct thread *t = &p->threads[p->thread_id];
7513 struct instruction *ip = t->ip;
7515 TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id);
7517 JMP_CMP_HM(t, ip, !=);
7521 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p)
7523 struct thread *t = &p->threads[p->thread_id];
7524 struct instruction *ip = t->ip;
7526 TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id);
7528 JMP_CMP_HH_FAST(t, ip, !=);
7532 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
7534 struct thread *t = &p->threads[p->thread_id];
7535 struct instruction *ip = t->ip;
7537 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
7539 JMP_CMP_I(t, ip, !=);
7543 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
7545 struct thread *t = &p->threads[p->thread_id];
7546 struct instruction *ip = t->ip;
7548 TRACE("[Thread %2u] jmplt\n", p->thread_id);
7554 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
7556 struct thread *t = &p->threads[p->thread_id];
7557 struct instruction *ip = t->ip;
7559 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
7561 JMP_CMP_MH(t, ip, <);
7565 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
7567 struct thread *t = &p->threads[p->thread_id];
7568 struct instruction *ip = t->ip;
7570 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
7572 JMP_CMP_HM(t, ip, <);
7576 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
7578 struct thread *t = &p->threads[p->thread_id];
7579 struct instruction *ip = t->ip;
7581 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
7583 JMP_CMP_HH(t, ip, <);
7587 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
7589 struct thread *t = &p->threads[p->thread_id];
7590 struct instruction *ip = t->ip;
7592 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
7594 JMP_CMP_MI(t, ip, <);
7598 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
7600 struct thread *t = &p->threads[p->thread_id];
7601 struct instruction *ip = t->ip;
7603 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
7605 JMP_CMP_HI(t, ip, <);
7609 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
7611 struct thread *t = &p->threads[p->thread_id];
7612 struct instruction *ip = t->ip;
7614 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
7620 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
7622 struct thread *t = &p->threads[p->thread_id];
7623 struct instruction *ip = t->ip;
7625 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
7627 JMP_CMP_MH(t, ip, >);
7631 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
7633 struct thread *t = &p->threads[p->thread_id];
7634 struct instruction *ip = t->ip;
7636 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
7638 JMP_CMP_HM(t, ip, >);
7642 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
7644 struct thread *t = &p->threads[p->thread_id];
7645 struct instruction *ip = t->ip;
7647 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
7649 JMP_CMP_HH(t, ip, >);
7653 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
7655 struct thread *t = &p->threads[p->thread_id];
7656 struct instruction *ip = t->ip;
7658 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
7660 JMP_CMP_MI(t, ip, >);
7664 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
7666 struct thread *t = &p->threads[p->thread_id];
7667 struct instruction *ip = t->ip;
7669 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
7671 JMP_CMP_HI(t, ip, >);
7678 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
7679 struct action *action,
7680 char **tokens __rte_unused,
7682 struct instruction *instr,
7683 struct instruction_data *data __rte_unused)
7685 CHECK(action, EINVAL);
7686 CHECK(n_tokens == 1, EINVAL);
7688 instr->type = INSTR_RETURN;
7693 instr_return_exec(struct rte_swx_pipeline *p)
7695 struct thread *t = &p->threads[p->thread_id];
7697 TRACE("[Thread %2u] return\n", p->thread_id);
7703 instr_translate(struct rte_swx_pipeline *p,
7704 struct action *action,
7706 struct instruction *instr,
7707 struct instruction_data *data)
7709 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
7710 int n_tokens = 0, tpos = 0;
7712 /* Parse the instruction string into tokens. */
7716 token = strtok_r(string, " \t\v", &string);
7720 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
7721 CHECK_NAME(token, EINVAL);
7723 tokens[n_tokens] = token;
7727 CHECK(n_tokens, EINVAL);
7729 /* Handle the optional instruction label. */
7730 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
7731 strcpy(data->label, tokens[0]);
7734 CHECK(n_tokens - tpos, EINVAL);
7737 /* Identify the instruction type. */
7738 if (!strcmp(tokens[tpos], "rx"))
7739 return instr_rx_translate(p,
7746 if (!strcmp(tokens[tpos], "tx"))
7747 return instr_tx_translate(p,
7754 if (!strcmp(tokens[tpos], "drop"))
7755 return instr_drop_translate(p,
7762 if (!strcmp(tokens[tpos], "extract"))
7763 return instr_hdr_extract_translate(p,
7770 if (!strcmp(tokens[tpos], "emit"))
7771 return instr_hdr_emit_translate(p,
7778 if (!strcmp(tokens[tpos], "validate"))
7779 return instr_hdr_validate_translate(p,
7786 if (!strcmp(tokens[tpos], "invalidate"))
7787 return instr_hdr_invalidate_translate(p,
7794 if (!strcmp(tokens[tpos], "mov"))
7795 return instr_mov_translate(p,
7802 if (!strcmp(tokens[tpos], "add"))
7803 return instr_alu_add_translate(p,
7810 if (!strcmp(tokens[tpos], "sub"))
7811 return instr_alu_sub_translate(p,
7818 if (!strcmp(tokens[tpos], "ckadd"))
7819 return instr_alu_ckadd_translate(p,
7826 if (!strcmp(tokens[tpos], "cksub"))
7827 return instr_alu_cksub_translate(p,
7834 if (!strcmp(tokens[tpos], "and"))
7835 return instr_alu_and_translate(p,
7842 if (!strcmp(tokens[tpos], "or"))
7843 return instr_alu_or_translate(p,
7850 if (!strcmp(tokens[tpos], "xor"))
7851 return instr_alu_xor_translate(p,
7858 if (!strcmp(tokens[tpos], "shl"))
7859 return instr_alu_shl_translate(p,
7866 if (!strcmp(tokens[tpos], "shr"))
7867 return instr_alu_shr_translate(p,
7874 if (!strcmp(tokens[tpos], "regprefetch"))
7875 return instr_regprefetch_translate(p,
7882 if (!strcmp(tokens[tpos], "regrd"))
7883 return instr_regrd_translate(p,
7890 if (!strcmp(tokens[tpos], "regwr"))
7891 return instr_regwr_translate(p,
7898 if (!strcmp(tokens[tpos], "regadd"))
7899 return instr_regadd_translate(p,
7906 if (!strcmp(tokens[tpos], "metprefetch"))
7907 return instr_metprefetch_translate(p,
7914 if (!strcmp(tokens[tpos], "meter"))
7915 return instr_meter_translate(p,
7922 if (!strcmp(tokens[tpos], "table"))
7923 return instr_table_translate(p,
7930 if (!strcmp(tokens[tpos], "extern"))
7931 return instr_extern_translate(p,
7938 if (!strcmp(tokens[tpos], "jmp"))
7939 return instr_jmp_translate(p,
7946 if (!strcmp(tokens[tpos], "jmpv"))
7947 return instr_jmp_valid_translate(p,
7954 if (!strcmp(tokens[tpos], "jmpnv"))
7955 return instr_jmp_invalid_translate(p,
7962 if (!strcmp(tokens[tpos], "jmph"))
7963 return instr_jmp_hit_translate(p,
7970 if (!strcmp(tokens[tpos], "jmpnh"))
7971 return instr_jmp_miss_translate(p,
7978 if (!strcmp(tokens[tpos], "jmpa"))
7979 return instr_jmp_action_hit_translate(p,
7986 if (!strcmp(tokens[tpos], "jmpna"))
7987 return instr_jmp_action_miss_translate(p,
7994 if (!strcmp(tokens[tpos], "jmpeq"))
7995 return instr_jmp_eq_translate(p,
8002 if (!strcmp(tokens[tpos], "jmpneq"))
8003 return instr_jmp_neq_translate(p,
8010 if (!strcmp(tokens[tpos], "jmplt"))
8011 return instr_jmp_lt_translate(p,
8018 if (!strcmp(tokens[tpos], "jmpgt"))
8019 return instr_jmp_gt_translate(p,
8026 if (!strcmp(tokens[tpos], "return"))
8027 return instr_return_translate(p,
8037 static struct instruction_data *
8038 label_find(struct instruction_data *data, uint32_t n, const char *label)
8042 for (i = 0; i < n; i++)
8043 if (!strcmp(label, data[i].label))
8050 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
8052 uint32_t count = 0, i;
8057 for (i = 0; i < n; i++)
8058 if (!strcmp(label, data[i].jmp_label))
8065 instr_label_check(struct instruction_data *instruction_data,
8066 uint32_t n_instructions)
8070 /* Check that all instruction labels are unique. */
8071 for (i = 0; i < n_instructions; i++) {
8072 struct instruction_data *data = &instruction_data[i];
8073 char *label = data->label;
8079 for (j = i + 1; j < n_instructions; j++)
8080 CHECK(strcmp(label, data[j].label), EINVAL);
8083 /* Get users for each instruction label. */
8084 for (i = 0; i < n_instructions; i++) {
8085 struct instruction_data *data = &instruction_data[i];
8086 char *label = data->label;
8088 data->n_users = label_is_used(instruction_data,
8097 instr_jmp_resolve(struct instruction *instructions,
8098 struct instruction_data *instruction_data,
8099 uint32_t n_instructions)
8103 for (i = 0; i < n_instructions; i++) {
8104 struct instruction *instr = &instructions[i];
8105 struct instruction_data *data = &instruction_data[i];
8106 struct instruction_data *found;
8108 if (!instruction_is_jmp(instr))
8111 found = label_find(instruction_data,
8114 CHECK(found, EINVAL);
8116 instr->jmp.ip = &instructions[found - instruction_data];
8123 instr_verify(struct rte_swx_pipeline *p __rte_unused,
8125 struct instruction *instr,
8126 struct instruction_data *data __rte_unused,
8127 uint32_t n_instructions)
8130 enum instruction_type type;
8133 /* Check that the first instruction is rx. */
8134 CHECK(instr[0].type == INSTR_RX, EINVAL);
8136 /* Check that there is at least one tx instruction. */
8137 for (i = 0; i < n_instructions; i++) {
8138 type = instr[i].type;
8140 if (instruction_is_tx(type))
8143 CHECK(i < n_instructions, EINVAL);
8145 /* Check that the last instruction is either tx or unconditional
8148 type = instr[n_instructions - 1].type;
8149 CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
8153 enum instruction_type type;
8156 /* Check that there is at least one return or tx instruction. */
8157 for (i = 0; i < n_instructions; i++) {
8158 type = instr[i].type;
8160 if ((type == INSTR_RETURN) || instruction_is_tx(type))
8163 CHECK(i < n_instructions, EINVAL);
8170 instr_compact(struct instruction *instructions,
8171 struct instruction_data *instruction_data,
8172 uint32_t n_instructions)
8174 uint32_t i, pos = 0;
8176 /* Eliminate the invalid instructions that have been optimized out. */
8177 for (i = 0; i < n_instructions; i++) {
8178 struct instruction *instr = &instructions[i];
8179 struct instruction_data *data = &instruction_data[i];
8185 memcpy(&instructions[pos], instr, sizeof(*instr));
8186 memcpy(&instruction_data[pos], data, sizeof(*data));
8196 instr_pattern_extract_many_search(struct instruction *instr,
8197 struct instruction_data *data,
8199 uint32_t *n_pattern_instr)
8203 for (i = 0; i < n_instr; i++) {
8204 if (data[i].invalid)
8207 if (instr[i].type != INSTR_HDR_EXTRACT)
8210 if (i == RTE_DIM(instr->io.hdr.header_id))
8213 if (i && data[i].n_users)
8220 *n_pattern_instr = i;
8225 instr_pattern_extract_many_replace(struct instruction *instr,
8226 struct instruction_data *data,
8231 for (i = 1; i < n_instr; i++) {
8233 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8234 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8235 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8237 data[i].invalid = 1;
8242 instr_pattern_extract_many_optimize(struct instruction *instructions,
8243 struct instruction_data *instruction_data,
8244 uint32_t n_instructions)
8248 for (i = 0; i < n_instructions; ) {
8249 struct instruction *instr = &instructions[i];
8250 struct instruction_data *data = &instruction_data[i];
8251 uint32_t n_instr = 0;
8255 detected = instr_pattern_extract_many_search(instr,
8260 instr_pattern_extract_many_replace(instr,
8267 /* No pattern starting at the current instruction. */
8271 /* Eliminate the invalid instructions that have been optimized out. */
8272 n_instructions = instr_compact(instructions,
8276 return n_instructions;
8280 instr_pattern_emit_many_tx_search(struct instruction *instr,
8281 struct instruction_data *data,
8283 uint32_t *n_pattern_instr)
8287 for (i = 0; i < n_instr; i++) {
8288 if (data[i].invalid)
8291 if (instr[i].type != INSTR_HDR_EMIT)
8294 if (i == RTE_DIM(instr->io.hdr.header_id))
8297 if (i && data[i].n_users)
8304 if (!instruction_is_tx(instr[i].type))
8307 if (data[i].n_users)
8312 *n_pattern_instr = i;
8317 instr_pattern_emit_many_tx_replace(struct instruction *instr,
8318 struct instruction_data *data,
8323 /* Any emit instruction in addition to the first one. */
8324 for (i = 1; i < n_instr - 1; i++) {
8326 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8327 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8328 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8330 data[i].invalid = 1;
8333 /* The TX instruction is the last one in the pattern. */
8335 instr[0].io.io.offset = instr[i].io.io.offset;
8336 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
8337 data[i].invalid = 1;
8341 instr_pattern_emit_many_tx_optimize(struct instruction *instructions,
8342 struct instruction_data *instruction_data,
8343 uint32_t n_instructions)
8347 for (i = 0; i < n_instructions; ) {
8348 struct instruction *instr = &instructions[i];
8349 struct instruction_data *data = &instruction_data[i];
8350 uint32_t n_instr = 0;
8353 /* Emit many + TX. */
8354 detected = instr_pattern_emit_many_tx_search(instr,
8359 instr_pattern_emit_many_tx_replace(instr,
8366 /* No pattern starting at the current instruction. */
8370 /* Eliminate the invalid instructions that have been optimized out. */
8371 n_instructions = instr_compact(instructions,
8375 return n_instructions;
8379 action_arg_src_mov_count(struct action *a,
8381 struct instruction *instructions,
8382 struct instruction_data *instruction_data,
8383 uint32_t n_instructions);
8386 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p,
8388 struct instruction *instr,
8389 struct instruction_data *data,
8391 struct instruction *instructions,
8392 struct instruction_data *instruction_data,
8393 uint32_t n_instructions,
8394 uint32_t *n_pattern_instr)
8397 uint32_t src_field_id, i, j;
8399 /* Prerequisites. */
8403 /* First instruction: MOV_HM. */
8404 if (data[0].invalid || (instr[0].type != INSTR_MOV_HM))
8407 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8411 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8412 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8415 if (src_field_id == a->st->n_fields)
8418 if (instr[0].mov.dst.offset ||
8419 (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) ||
8420 instr[0].mov.src.struct_id ||
8421 (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) ||
8422 (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits))
8425 if ((n_instr < h->st->n_fields + 1) ||
8426 (a->st->n_fields < src_field_id + h->st->n_fields + 1))
8429 /* Subsequent instructions: MOV_HM. */
8430 for (i = 1; i < h->st->n_fields; i++)
8431 if (data[i].invalid ||
8433 (instr[i].type != INSTR_MOV_HM) ||
8434 (instr[i].mov.dst.struct_id != h->struct_id) ||
8435 (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) ||
8436 (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) ||
8437 instr[i].mov.src.struct_id ||
8438 (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) ||
8439 (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) ||
8440 (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits))
8443 /* Last instruction: HDR_VALIDATE. */
8444 if ((instr[i].type != INSTR_HDR_VALIDATE) ||
8445 (instr[i].valid.header_id != h->id))
8448 /* Check that none of the action args that are used as source for this
8449 * DMA transfer are not used as source in any other mov instruction.
8451 for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) {
8454 n_users = action_arg_src_mov_count(a,
8463 *n_pattern_instr = 1 + i;
8468 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p,
8470 struct instruction *instr,
8471 struct instruction_data *data,
8475 uint32_t src_field_id, src_offset, i;
8477 /* Read from the instructions before they are modified. */
8478 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8482 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8483 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8486 if (src_field_id == a->st->n_fields)
8489 src_offset = instr[0].mov.src.offset;
8491 /* Modify the instructions. */
8492 instr[0].type = INSTR_DMA_HT;
8493 instr[0].dma.dst.header_id[0] = h->id;
8494 instr[0].dma.dst.struct_id[0] = h->struct_id;
8495 instr[0].dma.src.offset[0] = (uint8_t)src_offset;
8496 instr[0].dma.n_bytes[0] = h->st->n_bits / 8;
8498 for (i = 1; i < n_instr; i++)
8499 data[i].invalid = 1;
8501 /* Update the endianness of the action arguments to header endianness. */
8502 for (i = 0; i < h->st->n_fields; i++)
8503 a->args_endianness[src_field_id + i] = 1;
8507 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p,
8509 struct instruction *instructions,
8510 struct instruction_data *instruction_data,
8511 uint32_t n_instructions)
8516 return n_instructions;
8518 for (i = 0; i < n_instructions; ) {
8519 struct instruction *instr = &instructions[i];
8520 struct instruction_data *data = &instruction_data[i];
8521 uint32_t n_instr = 0;
8524 /* Mov all + validate. */
8525 detected = instr_pattern_mov_all_validate_search(p,
8535 instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr);
8540 /* No pattern starting at the current instruction. */
8544 /* Eliminate the invalid instructions that have been optimized out. */
8545 n_instructions = instr_compact(instructions,
8549 return n_instructions;
8553 instr_pattern_dma_many_search(struct instruction *instr,
8554 struct instruction_data *data,
8556 uint32_t *n_pattern_instr)
8560 for (i = 0; i < n_instr; i++) {
8561 if (data[i].invalid)
8564 if (instr[i].type != INSTR_DMA_HT)
8567 if (i == RTE_DIM(instr->dma.dst.header_id))
8570 if (i && data[i].n_users)
8577 *n_pattern_instr = i;
8582 instr_pattern_dma_many_replace(struct instruction *instr,
8583 struct instruction_data *data,
8588 for (i = 1; i < n_instr; i++) {
8590 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
8591 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
8592 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
8593 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
8595 data[i].invalid = 1;
8600 instr_pattern_dma_many_optimize(struct instruction *instructions,
8601 struct instruction_data *instruction_data,
8602 uint32_t n_instructions)
8606 for (i = 0; i < n_instructions; ) {
8607 struct instruction *instr = &instructions[i];
8608 struct instruction_data *data = &instruction_data[i];
8609 uint32_t n_instr = 0;
8613 detected = instr_pattern_dma_many_search(instr,
8618 instr_pattern_dma_many_replace(instr, data, n_instr);
8623 /* No pattern starting at the current instruction. */
8627 /* Eliminate the invalid instructions that have been optimized out. */
8628 n_instructions = instr_compact(instructions,
8632 return n_instructions;
8636 instr_optimize(struct rte_swx_pipeline *p,
8638 struct instruction *instructions,
8639 struct instruction_data *instruction_data,
8640 uint32_t n_instructions)
8643 n_instructions = instr_pattern_extract_many_optimize(instructions,
8647 /* Emit many + TX. */
8648 n_instructions = instr_pattern_emit_many_tx_optimize(instructions,
8652 /* Mov all + validate. */
8653 n_instructions = instr_pattern_mov_all_validate_optimize(p,
8660 n_instructions = instr_pattern_dma_many_optimize(instructions,
8664 return n_instructions;
8668 instruction_config(struct rte_swx_pipeline *p,
8670 const char **instructions,
8671 uint32_t n_instructions)
8673 struct instruction *instr = NULL;
8674 struct instruction_data *data = NULL;
8678 CHECK(n_instructions, EINVAL);
8679 CHECK(instructions, EINVAL);
8680 for (i = 0; i < n_instructions; i++)
8681 CHECK_INSTRUCTION(instructions[i], EINVAL);
8683 /* Memory allocation. */
8684 instr = calloc(n_instructions, sizeof(struct instruction));
8690 data = calloc(n_instructions, sizeof(struct instruction_data));
8696 for (i = 0; i < n_instructions; i++) {
8697 char *string = strdup(instructions[i]);
8703 err = instr_translate(p, a, string, &instr[i], &data[i]);
8712 err = instr_label_check(data, n_instructions);
8716 err = instr_verify(p, a, instr, data, n_instructions);
8720 n_instructions = instr_optimize(p, a, instr, data, n_instructions);
8722 err = instr_jmp_resolve(instr, data, n_instructions);
8727 a->instructions = instr;
8728 a->n_instructions = n_instructions;
8730 p->instructions = instr;
8731 p->n_instructions = n_instructions;
8743 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
8745 static instr_exec_t instruction_table[] = {
8746 [INSTR_RX] = instr_rx_exec,
8747 [INSTR_TX] = instr_tx_exec,
8748 [INSTR_TX_I] = instr_tx_i_exec,
8750 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
8751 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
8752 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
8753 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
8754 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
8755 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
8756 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
8757 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
8759 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
8760 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
8761 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
8762 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
8763 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
8764 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
8765 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
8766 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
8767 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
8769 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
8770 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
8772 [INSTR_MOV] = instr_mov_exec,
8773 [INSTR_MOV_MH] = instr_mov_mh_exec,
8774 [INSTR_MOV_HM] = instr_mov_hm_exec,
8775 [INSTR_MOV_HH] = instr_mov_hh_exec,
8776 [INSTR_MOV_I] = instr_mov_i_exec,
8778 [INSTR_DMA_HT] = instr_dma_ht_exec,
8779 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
8780 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
8781 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
8782 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
8783 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
8784 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
8785 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
8787 [INSTR_ALU_ADD] = instr_alu_add_exec,
8788 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
8789 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
8790 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
8791 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
8792 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
8794 [INSTR_ALU_SUB] = instr_alu_sub_exec,
8795 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
8796 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
8797 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
8798 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
8799 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
8801 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
8802 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
8803 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
8804 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
8806 [INSTR_ALU_AND] = instr_alu_and_exec,
8807 [INSTR_ALU_AND_MH] = instr_alu_and_mh_exec,
8808 [INSTR_ALU_AND_HM] = instr_alu_and_hm_exec,
8809 [INSTR_ALU_AND_HH] = instr_alu_and_hh_exec,
8810 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
8812 [INSTR_ALU_OR] = instr_alu_or_exec,
8813 [INSTR_ALU_OR_MH] = instr_alu_or_mh_exec,
8814 [INSTR_ALU_OR_HM] = instr_alu_or_hm_exec,
8815 [INSTR_ALU_OR_HH] = instr_alu_or_hh_exec,
8816 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
8818 [INSTR_ALU_XOR] = instr_alu_xor_exec,
8819 [INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec,
8820 [INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec,
8821 [INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec,
8822 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
8824 [INSTR_ALU_SHL] = instr_alu_shl_exec,
8825 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
8826 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
8827 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
8828 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
8829 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
8831 [INSTR_ALU_SHR] = instr_alu_shr_exec,
8832 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
8833 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
8834 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
8835 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
8836 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
8838 [INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec,
8839 [INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec,
8840 [INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec,
8842 [INSTR_REGRD_HRH] = instr_regrd_hrh_exec,
8843 [INSTR_REGRD_HRM] = instr_regrd_hrm_exec,
8844 [INSTR_REGRD_MRH] = instr_regrd_mrh_exec,
8845 [INSTR_REGRD_MRM] = instr_regrd_mrm_exec,
8846 [INSTR_REGRD_HRI] = instr_regrd_hri_exec,
8847 [INSTR_REGRD_MRI] = instr_regrd_mri_exec,
8849 [INSTR_REGWR_RHH] = instr_regwr_rhh_exec,
8850 [INSTR_REGWR_RHM] = instr_regwr_rhm_exec,
8851 [INSTR_REGWR_RMH] = instr_regwr_rmh_exec,
8852 [INSTR_REGWR_RMM] = instr_regwr_rmm_exec,
8853 [INSTR_REGWR_RHI] = instr_regwr_rhi_exec,
8854 [INSTR_REGWR_RMI] = instr_regwr_rmi_exec,
8855 [INSTR_REGWR_RIH] = instr_regwr_rih_exec,
8856 [INSTR_REGWR_RIM] = instr_regwr_rim_exec,
8857 [INSTR_REGWR_RII] = instr_regwr_rii_exec,
8859 [INSTR_REGADD_RHH] = instr_regadd_rhh_exec,
8860 [INSTR_REGADD_RHM] = instr_regadd_rhm_exec,
8861 [INSTR_REGADD_RMH] = instr_regadd_rmh_exec,
8862 [INSTR_REGADD_RMM] = instr_regadd_rmm_exec,
8863 [INSTR_REGADD_RHI] = instr_regadd_rhi_exec,
8864 [INSTR_REGADD_RMI] = instr_regadd_rmi_exec,
8865 [INSTR_REGADD_RIH] = instr_regadd_rih_exec,
8866 [INSTR_REGADD_RIM] = instr_regadd_rim_exec,
8867 [INSTR_REGADD_RII] = instr_regadd_rii_exec,
8869 [INSTR_METPREFETCH_H] = instr_metprefetch_h_exec,
8870 [INSTR_METPREFETCH_M] = instr_metprefetch_m_exec,
8871 [INSTR_METPREFETCH_I] = instr_metprefetch_i_exec,
8873 [INSTR_METER_HHM] = instr_meter_hhm_exec,
8874 [INSTR_METER_HHI] = instr_meter_hhi_exec,
8875 [INSTR_METER_HMM] = instr_meter_hmm_exec,
8876 [INSTR_METER_HMI] = instr_meter_hmi_exec,
8877 [INSTR_METER_MHM] = instr_meter_mhm_exec,
8878 [INSTR_METER_MHI] = instr_meter_mhi_exec,
8879 [INSTR_METER_MMM] = instr_meter_mmm_exec,
8880 [INSTR_METER_MMI] = instr_meter_mmi_exec,
8881 [INSTR_METER_IHM] = instr_meter_ihm_exec,
8882 [INSTR_METER_IHI] = instr_meter_ihi_exec,
8883 [INSTR_METER_IMM] = instr_meter_imm_exec,
8884 [INSTR_METER_IMI] = instr_meter_imi_exec,
8886 [INSTR_TABLE] = instr_table_exec,
8887 [INSTR_SELECTOR] = instr_selector_exec,
8888 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
8889 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
8891 [INSTR_JMP] = instr_jmp_exec,
8892 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
8893 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
8894 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
8895 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
8896 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
8897 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
8899 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
8900 [INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec,
8901 [INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec,
8902 [INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec,
8903 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
8905 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
8906 [INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec,
8907 [INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec,
8908 [INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec,
8909 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
8911 [INSTR_JMP_LT] = instr_jmp_lt_exec,
8912 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
8913 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
8914 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
8915 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
8916 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
8918 [INSTR_JMP_GT] = instr_jmp_gt_exec,
8919 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
8920 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
8921 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
8922 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
8923 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
8925 [INSTR_RETURN] = instr_return_exec,
8929 instr_exec(struct rte_swx_pipeline *p)
8931 struct thread *t = &p->threads[p->thread_id];
8932 struct instruction *ip = t->ip;
8933 instr_exec_t instr = instruction_table[ip->type];
8941 static struct action *
8942 action_find(struct rte_swx_pipeline *p, const char *name)
8944 struct action *elem;
8949 TAILQ_FOREACH(elem, &p->actions, node)
8950 if (strcmp(elem->name, name) == 0)
8956 static struct action *
8957 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
8959 struct action *action = NULL;
8961 TAILQ_FOREACH(action, &p->actions, node)
8962 if (action->id == id)
8968 static struct field *
8969 action_field_find(struct action *a, const char *name)
8971 return a->st ? struct_type_field_find(a->st, name) : NULL;
8974 static struct field *
8975 action_field_parse(struct action *action, const char *name)
8977 if (name[0] != 't' || name[1] != '.')
8980 return action_field_find(action, &name[2]);
8984 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
8986 const char *args_struct_type_name,
8987 const char **instructions,
8988 uint32_t n_instructions)
8990 struct struct_type *args_struct_type;
8996 CHECK_NAME(name, EINVAL);
8997 CHECK(!action_find(p, name), EEXIST);
8999 if (args_struct_type_name) {
9000 CHECK_NAME(args_struct_type_name, EINVAL);
9001 args_struct_type = struct_type_find(p, args_struct_type_name);
9002 CHECK(args_struct_type, EINVAL);
9004 args_struct_type = NULL;
9007 /* Node allocation. */
9008 a = calloc(1, sizeof(struct action));
9010 if (args_struct_type) {
9011 a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int));
9012 if (!a->args_endianness) {
9018 /* Node initialization. */
9019 strcpy(a->name, name);
9020 a->st = args_struct_type;
9021 a->id = p->n_actions;
9023 /* Instruction translation. */
9024 err = instruction_config(p, a, instructions, n_instructions);
9026 free(a->args_endianness);
9031 /* Node add to tailq. */
9032 TAILQ_INSERT_TAIL(&p->actions, a, node);
9039 action_build(struct rte_swx_pipeline *p)
9041 struct action *action;
9043 p->action_instructions = calloc(p->n_actions,
9044 sizeof(struct instruction *));
9045 CHECK(p->action_instructions, ENOMEM);
9047 TAILQ_FOREACH(action, &p->actions, node)
9048 p->action_instructions[action->id] = action->instructions;
9054 action_build_free(struct rte_swx_pipeline *p)
9056 free(p->action_instructions);
9057 p->action_instructions = NULL;
9061 action_free(struct rte_swx_pipeline *p)
9063 action_build_free(p);
9066 struct action *action;
9068 action = TAILQ_FIRST(&p->actions);
9072 TAILQ_REMOVE(&p->actions, action, node);
9073 free(action->instructions);
9079 action_arg_src_mov_count(struct action *a,
9081 struct instruction *instructions,
9082 struct instruction_data *instruction_data,
9083 uint32_t n_instructions)
9085 uint32_t offset, n_users = 0, i;
9088 (arg_id >= a->st->n_fields) ||
9090 !instruction_data ||
9094 offset = a->st->fields[arg_id].offset / 8;
9096 for (i = 0; i < n_instructions; i++) {
9097 struct instruction *instr = &instructions[i];
9098 struct instruction_data *data = &instruction_data[i];
9100 if (data->invalid ||
9101 ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) ||
9102 instr->mov.src.struct_id ||
9103 (instr->mov.src.offset != offset))
9115 static struct table_type *
9116 table_type_find(struct rte_swx_pipeline *p, const char *name)
9118 struct table_type *elem;
9120 TAILQ_FOREACH(elem, &p->table_types, node)
9121 if (strcmp(elem->name, name) == 0)
9127 static struct table_type *
9128 table_type_resolve(struct rte_swx_pipeline *p,
9129 const char *recommended_type_name,
9130 enum rte_swx_table_match_type match_type)
9132 struct table_type *elem;
9134 /* Only consider the recommended type if the match type is correct. */
9135 if (recommended_type_name)
9136 TAILQ_FOREACH(elem, &p->table_types, node)
9137 if (!strcmp(elem->name, recommended_type_name) &&
9138 (elem->match_type == match_type))
9141 /* Ignore the recommended type and get the first element with this match
9144 TAILQ_FOREACH(elem, &p->table_types, node)
9145 if (elem->match_type == match_type)
9151 static struct table *
9152 table_find(struct rte_swx_pipeline *p, const char *name)
9156 TAILQ_FOREACH(elem, &p->tables, node)
9157 if (strcmp(elem->name, name) == 0)
9163 static struct table *
9164 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9166 struct table *table = NULL;
9168 TAILQ_FOREACH(table, &p->tables, node)
9169 if (table->id == id)
9176 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
9178 enum rte_swx_table_match_type match_type,
9179 struct rte_swx_table_ops *ops)
9181 struct table_type *elem;
9185 CHECK_NAME(name, EINVAL);
9186 CHECK(!table_type_find(p, name), EEXIST);
9189 CHECK(ops->create, EINVAL);
9190 CHECK(ops->lkp, EINVAL);
9191 CHECK(ops->free, EINVAL);
9193 /* Node allocation. */
9194 elem = calloc(1, sizeof(struct table_type));
9195 CHECK(elem, ENOMEM);
9197 /* Node initialization. */
9198 strcpy(elem->name, name);
9199 elem->match_type = match_type;
9200 memcpy(&elem->ops, ops, sizeof(*ops));
9202 /* Node add to tailq. */
9203 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
9209 table_match_type_resolve(struct rte_swx_match_field_params *fields,
9211 enum rte_swx_table_match_type *match_type)
9213 uint32_t n_fields_em = 0, n_fields_lpm = 0, i;
9215 for (i = 0; i < n_fields; i++) {
9216 struct rte_swx_match_field_params *f = &fields[i];
9218 if (f->match_type == RTE_SWX_TABLE_MATCH_EXACT)
9221 if (f->match_type == RTE_SWX_TABLE_MATCH_LPM)
9225 if ((n_fields_lpm > 1) ||
9226 (n_fields_lpm && (n_fields_em != n_fields - 1)))
9229 *match_type = (n_fields_em == n_fields) ?
9230 RTE_SWX_TABLE_MATCH_EXACT :
9231 RTE_SWX_TABLE_MATCH_WILDCARD;
9237 table_match_fields_check(struct rte_swx_pipeline *p,
9238 struct rte_swx_pipeline_table_params *params,
9239 struct header **header)
9241 struct header *h0 = NULL;
9242 struct field *hf, *mf;
9243 uint32_t *offset = NULL, i;
9246 /* Return if no match fields. */
9247 if (!params->n_fields) {
9248 if (params->fields) {
9256 /* Memory allocation. */
9257 offset = calloc(params->n_fields, sizeof(uint32_t));
9263 /* Check that all the match fields belong to either the same header or
9266 hf = header_field_parse(p, params->fields[0].name, &h0);
9267 mf = metadata_field_parse(p, params->fields[0].name);
9273 offset[0] = h0 ? hf->offset : mf->offset;
9275 for (i = 1; i < params->n_fields; i++)
9279 hf = header_field_parse(p, params->fields[i].name, &h);
9280 if (!hf || (h->id != h0->id)) {
9285 offset[i] = hf->offset;
9287 mf = metadata_field_parse(p, params->fields[i].name);
9293 offset[i] = mf->offset;
9296 /* Check that there are no duplicated match fields. */
9297 for (i = 0; i < params->n_fields; i++) {
9300 for (j = 0; j < i; j++)
9301 if (offset[j] == offset[i]) {
9317 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
9319 struct rte_swx_pipeline_table_params *params,
9320 const char *recommended_table_type_name,
9324 struct table_type *type;
9326 struct action *default_action;
9327 struct header *header = NULL;
9328 uint32_t action_data_size_max = 0, i;
9333 CHECK_NAME(name, EINVAL);
9334 CHECK(!table_find(p, name), EEXIST);
9335 CHECK(!selector_find(p, name), EEXIST);
9337 CHECK(params, EINVAL);
9340 status = table_match_fields_check(p, params, &header);
9344 /* Action checks. */
9345 CHECK(params->n_actions, EINVAL);
9346 CHECK(params->action_names, EINVAL);
9347 for (i = 0; i < params->n_actions; i++) {
9348 const char *action_name = params->action_names[i];
9350 uint32_t action_data_size;
9352 CHECK_NAME(action_name, EINVAL);
9354 a = action_find(p, action_name);
9357 action_data_size = a->st ? a->st->n_bits / 8 : 0;
9358 if (action_data_size > action_data_size_max)
9359 action_data_size_max = action_data_size;
9362 CHECK_NAME(params->default_action_name, EINVAL);
9363 for (i = 0; i < p->n_actions; i++)
9364 if (!strcmp(params->action_names[i],
9365 params->default_action_name))
9367 CHECK(i < params->n_actions, EINVAL);
9368 default_action = action_find(p, params->default_action_name);
9369 CHECK((default_action->st && params->default_action_data) ||
9370 !params->default_action_data, EINVAL);
9372 /* Table type checks. */
9373 if (recommended_table_type_name)
9374 CHECK_NAME(recommended_table_type_name, EINVAL);
9376 if (params->n_fields) {
9377 enum rte_swx_table_match_type match_type;
9379 status = table_match_type_resolve(params->fields, params->n_fields, &match_type);
9383 type = table_type_resolve(p, recommended_table_type_name, match_type);
9384 CHECK(type, EINVAL);
9389 /* Memory allocation. */
9390 t = calloc(1, sizeof(struct table));
9393 t->fields = calloc(params->n_fields, sizeof(struct match_field));
9399 t->actions = calloc(params->n_actions, sizeof(struct action *));
9406 if (action_data_size_max) {
9407 t->default_action_data = calloc(1, action_data_size_max);
9408 if (!t->default_action_data) {
9416 /* Node initialization. */
9417 strcpy(t->name, name);
9418 if (args && args[0])
9419 strcpy(t->args, args);
9422 for (i = 0; i < params->n_fields; i++) {
9423 struct rte_swx_match_field_params *field = ¶ms->fields[i];
9424 struct match_field *f = &t->fields[i];
9426 f->match_type = field->match_type;
9428 header_field_parse(p, field->name, NULL) :
9429 metadata_field_parse(p, field->name);
9431 t->n_fields = params->n_fields;
9434 for (i = 0; i < params->n_actions; i++)
9435 t->actions[i] = action_find(p, params->action_names[i]);
9436 t->default_action = default_action;
9437 if (default_action->st)
9438 memcpy(t->default_action_data,
9439 params->default_action_data,
9440 default_action->st->n_bits / 8);
9441 t->n_actions = params->n_actions;
9442 t->default_action_is_const = params->default_action_is_const;
9443 t->action_data_size_max = action_data_size_max;
9446 t->id = p->n_tables;
9448 /* Node add to tailq. */
9449 TAILQ_INSERT_TAIL(&p->tables, t, node);
9455 static struct rte_swx_table_params *
9456 table_params_get(struct table *table)
9458 struct rte_swx_table_params *params;
9459 struct field *first, *last;
9461 uint32_t key_size, key_offset, action_data_size, i;
9463 /* Memory allocation. */
9464 params = calloc(1, sizeof(struct rte_swx_table_params));
9468 /* Find first (smallest offset) and last (biggest offset) match fields. */
9469 first = table->fields[0].field;
9470 last = table->fields[0].field;
9472 for (i = 0; i < table->n_fields; i++) {
9473 struct field *f = table->fields[i].field;
9475 if (f->offset < first->offset)
9478 if (f->offset > last->offset)
9482 /* Key offset and size. */
9483 key_offset = first->offset / 8;
9484 key_size = (last->offset + last->n_bits - first->offset) / 8;
9486 /* Memory allocation. */
9487 key_mask = calloc(1, key_size);
9494 for (i = 0; i < table->n_fields; i++) {
9495 struct field *f = table->fields[i].field;
9496 uint32_t start = (f->offset - first->offset) / 8;
9497 size_t size = f->n_bits / 8;
9499 memset(&key_mask[start], 0xFF, size);
9502 /* Action data size. */
9503 action_data_size = 0;
9504 for (i = 0; i < table->n_actions; i++) {
9505 struct action *action = table->actions[i];
9506 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
9508 if (ads > action_data_size)
9509 action_data_size = ads;
9513 params->match_type = table->type->match_type;
9514 params->key_size = key_size;
9515 params->key_offset = key_offset;
9516 params->key_mask0 = key_mask;
9517 params->action_data_size = action_data_size;
9518 params->n_keys_max = table->size;
9524 table_params_free(struct rte_swx_table_params *params)
9529 free(params->key_mask0);
9534 table_stub_lkp(void *table __rte_unused,
9535 void *mailbox __rte_unused,
9536 uint8_t **key __rte_unused,
9537 uint64_t *action_id __rte_unused,
9538 uint8_t **action_data __rte_unused,
9542 return 1; /* DONE. */
9546 table_build(struct rte_swx_pipeline *p)
9550 /* Per pipeline: table statistics. */
9551 p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics));
9552 CHECK(p->table_stats, ENOMEM);
9554 for (i = 0; i < p->n_tables; i++) {
9555 p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t));
9556 CHECK(p->table_stats[i].n_pkts_action, ENOMEM);
9559 /* Per thread: table runt-time. */
9560 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9561 struct thread *t = &p->threads[i];
9562 struct table *table;
9564 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
9565 CHECK(t->tables, ENOMEM);
9567 TAILQ_FOREACH(table, &p->tables, node) {
9568 struct table_runtime *r = &t->tables[table->id];
9573 size = table->type->ops.mailbox_size_get();
9576 r->func = table->type->ops.lkp;
9580 r->mailbox = calloc(1, size);
9581 CHECK(r->mailbox, ENOMEM);
9585 r->key = table->header ?
9586 &t->structs[table->header->struct_id] :
9587 &t->structs[p->metadata_struct_id];
9589 r->func = table_stub_lkp;
9598 table_build_free(struct rte_swx_pipeline *p)
9602 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9603 struct thread *t = &p->threads[i];
9609 for (j = 0; j < p->n_tables; j++) {
9610 struct table_runtime *r = &t->tables[j];
9619 if (p->table_stats) {
9620 for (i = 0; i < p->n_tables; i++)
9621 free(p->table_stats[i].n_pkts_action);
9623 free(p->table_stats);
9628 table_free(struct rte_swx_pipeline *p)
9630 table_build_free(p);
9636 elem = TAILQ_FIRST(&p->tables);
9640 TAILQ_REMOVE(&p->tables, elem, node);
9642 free(elem->actions);
9643 free(elem->default_action_data);
9649 struct table_type *elem;
9651 elem = TAILQ_FIRST(&p->table_types);
9655 TAILQ_REMOVE(&p->table_types, elem, node);
9663 static struct selector *
9664 selector_find(struct rte_swx_pipeline *p, const char *name)
9668 TAILQ_FOREACH(s, &p->selectors, node)
9669 if (strcmp(s->name, name) == 0)
9675 static struct selector *
9676 selector_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9678 struct selector *s = NULL;
9680 TAILQ_FOREACH(s, &p->selectors, node)
9688 selector_fields_check(struct rte_swx_pipeline *p,
9689 struct rte_swx_pipeline_selector_params *params,
9690 struct header **header)
9692 struct header *h0 = NULL;
9693 struct field *hf, *mf;
9696 /* Return if no selector fields. */
9697 if (!params->n_selector_fields || !params->selector_field_names)
9700 /* Check that all the selector fields either belong to the same header
9701 * or are all meta-data fields.
9703 hf = header_field_parse(p, params->selector_field_names[0], &h0);
9704 mf = metadata_field_parse(p, params->selector_field_names[0]);
9708 for (i = 1; i < params->n_selector_fields; i++)
9712 hf = header_field_parse(p, params->selector_field_names[i], &h);
9713 if (!hf || (h->id != h0->id))
9716 mf = metadata_field_parse(p, params->selector_field_names[i]);
9721 /* Check that there are no duplicated match fields. */
9722 for (i = 0; i < params->n_selector_fields; i++) {
9723 const char *field_name = params->selector_field_names[i];
9726 for (j = i + 1; j < params->n_selector_fields; j++)
9727 if (!strcmp(params->selector_field_names[j], field_name))
9739 rte_swx_pipeline_selector_config(struct rte_swx_pipeline *p,
9741 struct rte_swx_pipeline_selector_params *params)
9744 struct header *selector_header = NULL;
9745 struct field *group_id_field, *member_id_field;
9751 CHECK_NAME(name, EINVAL);
9752 CHECK(!table_find(p, name), EEXIST);
9753 CHECK(!selector_find(p, name), EEXIST);
9755 CHECK(params, EINVAL);
9757 CHECK_NAME(params->group_id_field_name, EINVAL);
9758 group_id_field = metadata_field_parse(p, params->group_id_field_name);
9759 CHECK(group_id_field, EINVAL);
9761 for (i = 0; i < params->n_selector_fields; i++) {
9762 const char *field_name = params->selector_field_names[i];
9764 CHECK_NAME(field_name, EINVAL);
9766 status = selector_fields_check(p, params, &selector_header);
9770 CHECK_NAME(params->member_id_field_name, EINVAL);
9771 member_id_field = metadata_field_parse(p, params->member_id_field_name);
9772 CHECK(member_id_field, EINVAL);
9774 CHECK(params->n_groups_max, EINVAL);
9776 CHECK(params->n_members_per_group_max, EINVAL);
9778 /* Memory allocation. */
9779 s = calloc(1, sizeof(struct selector));
9785 s->selector_fields = calloc(params->n_selector_fields, sizeof(struct field *));
9786 if (!s->selector_fields) {
9791 /* Node initialization. */
9792 strcpy(s->name, name);
9794 s->group_id_field = group_id_field;
9796 for (i = 0; i < params->n_selector_fields; i++) {
9797 const char *field_name = params->selector_field_names[i];
9799 s->selector_fields[i] = selector_header ?
9800 header_field_parse(p, field_name, NULL) :
9801 metadata_field_parse(p, field_name);
9804 s->n_selector_fields = params->n_selector_fields;
9806 s->selector_header = selector_header;
9808 s->member_id_field = member_id_field;
9810 s->n_groups_max = params->n_groups_max;
9812 s->n_members_per_group_max = params->n_members_per_group_max;
9814 s->id = p->n_selectors;
9816 /* Node add to tailq. */
9817 TAILQ_INSERT_TAIL(&p->selectors, s, node);
9826 free(s->selector_fields);
9834 selector_params_free(struct rte_swx_table_selector_params *params)
9839 free(params->selector_mask);
9844 static struct rte_swx_table_selector_params *
9845 selector_table_params_get(struct selector *s)
9847 struct rte_swx_table_selector_params *params = NULL;
9848 struct field *first, *last;
9851 /* Memory allocation. */
9852 params = calloc(1, sizeof(struct rte_swx_table_selector_params));
9857 params->group_id_offset = s->group_id_field->offset / 8;
9859 /* Find first (smallest offset) and last (biggest offset) selector fields. */
9860 first = s->selector_fields[0];
9861 last = s->selector_fields[0];
9863 for (i = 0; i < s->n_selector_fields; i++) {
9864 struct field *f = s->selector_fields[i];
9866 if (f->offset < first->offset)
9869 if (f->offset > last->offset)
9873 /* Selector offset and size. */
9874 params->selector_offset = first->offset / 8;
9875 params->selector_size = (last->offset + last->n_bits - first->offset) / 8;
9877 /* Memory allocation. */
9878 params->selector_mask = calloc(1, params->selector_size);
9879 if (!params->selector_mask)
9882 /* Selector mask. */
9883 for (i = 0; i < s->n_selector_fields; i++) {
9884 struct field *f = s->selector_fields[i];
9885 uint32_t start = (f->offset - first->offset) / 8;
9886 size_t size = f->n_bits / 8;
9888 memset(¶ms->selector_mask[start], 0xFF, size);
9892 params->member_id_offset = s->member_id_field->offset / 8;
9894 /* Maximum number of groups. */
9895 params->n_groups_max = s->n_groups_max;
9897 /* Maximum number of members per group. */
9898 params->n_members_per_group_max = s->n_members_per_group_max;
9903 selector_params_free(params);
9908 selector_build_free(struct rte_swx_pipeline *p)
9912 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9913 struct thread *t = &p->threads[i];
9919 for (j = 0; j < p->n_selectors; j++) {
9920 struct selector_runtime *r = &t->selectors[j];
9926 t->selectors = NULL;
9929 free(p->selector_stats);
9930 p->selector_stats = NULL;
9934 selector_build(struct rte_swx_pipeline *p)
9939 /* Per pipeline: selector statistics. */
9940 p->selector_stats = calloc(p->n_selectors, sizeof(struct selector_statistics));
9941 if (!p->selector_stats) {
9946 /* Per thread: selector run-time. */
9947 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9948 struct thread *t = &p->threads[i];
9951 t->selectors = calloc(p->n_selectors, sizeof(struct selector_runtime));
9952 if (!t->selectors) {
9957 TAILQ_FOREACH(s, &p->selectors, node) {
9958 struct selector_runtime *r = &t->selectors[s->id];
9962 size = rte_swx_table_selector_mailbox_size_get();
9964 r->mailbox = calloc(1, size);
9971 /* r->group_id_buffer. */
9972 r->group_id_buffer = &t->structs[p->metadata_struct_id];
9974 /* r->selector_buffer. */
9975 r->selector_buffer = s->selector_header ?
9976 &t->structs[s->selector_header->struct_id] :
9977 &t->structs[p->metadata_struct_id];
9979 /* r->member_id_buffer. */
9980 r->member_id_buffer = &t->structs[p->metadata_struct_id];
9987 selector_build_free(p);
9992 selector_free(struct rte_swx_pipeline *p)
9994 selector_build_free(p);
9996 /* Selector tables. */
9998 struct selector *elem;
10000 elem = TAILQ_FIRST(&p->selectors);
10004 TAILQ_REMOVE(&p->selectors, elem, node);
10005 free(elem->selector_fields);
10014 table_state_build(struct rte_swx_pipeline *p)
10016 struct table *table;
10017 struct selector *s;
10019 p->table_state = calloc(p->n_tables + p->n_selectors,
10020 sizeof(struct rte_swx_table_state));
10021 CHECK(p->table_state, ENOMEM);
10023 TAILQ_FOREACH(table, &p->tables, node) {
10024 struct rte_swx_table_state *ts = &p->table_state[table->id];
10027 struct rte_swx_table_params *params;
10030 params = table_params_get(table);
10031 CHECK(params, ENOMEM);
10033 ts->obj = table->type->ops.create(params,
10038 table_params_free(params);
10039 CHECK(ts->obj, ENODEV);
10042 /* ts->default_action_data. */
10043 if (table->action_data_size_max) {
10044 ts->default_action_data =
10045 malloc(table->action_data_size_max);
10046 CHECK(ts->default_action_data, ENOMEM);
10048 memcpy(ts->default_action_data,
10049 table->default_action_data,
10050 table->action_data_size_max);
10053 /* ts->default_action_id. */
10054 ts->default_action_id = table->default_action->id;
10057 TAILQ_FOREACH(s, &p->selectors, node) {
10058 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + s->id];
10059 struct rte_swx_table_selector_params *params;
10062 params = selector_table_params_get(s);
10063 CHECK(params, ENOMEM);
10065 ts->obj = rte_swx_table_selector_create(params, NULL, p->numa_node);
10067 selector_params_free(params);
10068 CHECK(ts->obj, ENODEV);
10075 table_state_build_free(struct rte_swx_pipeline *p)
10079 if (!p->table_state)
10082 for (i = 0; i < p->n_tables; i++) {
10083 struct rte_swx_table_state *ts = &p->table_state[i];
10084 struct table *table = table_find_by_id(p, i);
10087 if (table->type && ts->obj)
10088 table->type->ops.free(ts->obj);
10090 /* ts->default_action_data. */
10091 free(ts->default_action_data);
10094 for (i = 0; i < p->n_selectors; i++) {
10095 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + i];
10099 rte_swx_table_selector_free(ts->obj);
10102 free(p->table_state);
10103 p->table_state = NULL;
10107 table_state_free(struct rte_swx_pipeline *p)
10109 table_state_build_free(p);
10115 static struct regarray *
10116 regarray_find(struct rte_swx_pipeline *p, const char *name)
10118 struct regarray *elem;
10120 TAILQ_FOREACH(elem, &p->regarrays, node)
10121 if (!strcmp(elem->name, name))
10127 static struct regarray *
10128 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10130 struct regarray *elem = NULL;
10132 TAILQ_FOREACH(elem, &p->regarrays, node)
10133 if (elem->id == id)
10140 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p,
10145 struct regarray *r;
10149 CHECK_NAME(name, EINVAL);
10150 CHECK(!regarray_find(p, name), EEXIST);
10152 CHECK(size, EINVAL);
10153 size = rte_align32pow2(size);
10155 /* Memory allocation. */
10156 r = calloc(1, sizeof(struct regarray));
10159 /* Node initialization. */
10160 strcpy(r->name, name);
10161 r->init_val = init_val;
10163 r->id = p->n_regarrays;
10165 /* Node add to tailq. */
10166 TAILQ_INSERT_TAIL(&p->regarrays, r, node);
10173 regarray_build(struct rte_swx_pipeline *p)
10175 struct regarray *regarray;
10177 if (!p->n_regarrays)
10180 p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime));
10181 CHECK(p->regarray_runtime, ENOMEM);
10183 TAILQ_FOREACH(regarray, &p->regarrays, node) {
10184 struct regarray_runtime *r = &p->regarray_runtime[regarray->id];
10187 r->regarray = env_malloc(regarray->size * sizeof(uint64_t),
10188 RTE_CACHE_LINE_SIZE,
10190 CHECK(r->regarray, ENOMEM);
10192 if (regarray->init_val)
10193 for (i = 0; i < regarray->size; i++)
10194 r->regarray[i] = regarray->init_val;
10196 r->size_mask = regarray->size - 1;
10203 regarray_build_free(struct rte_swx_pipeline *p)
10207 if (!p->regarray_runtime)
10210 for (i = 0; i < p->n_regarrays; i++) {
10211 struct regarray *regarray = regarray_find_by_id(p, i);
10212 struct regarray_runtime *r = &p->regarray_runtime[i];
10214 env_free(r->regarray, regarray->size * sizeof(uint64_t));
10217 free(p->regarray_runtime);
10218 p->regarray_runtime = NULL;
10222 regarray_free(struct rte_swx_pipeline *p)
10224 regarray_build_free(p);
10227 struct regarray *elem;
10229 elem = TAILQ_FIRST(&p->regarrays);
10233 TAILQ_REMOVE(&p->regarrays, elem, node);
10241 static struct meter_profile *
10242 meter_profile_find(struct rte_swx_pipeline *p, const char *name)
10244 struct meter_profile *elem;
10246 TAILQ_FOREACH(elem, &p->meter_profiles, node)
10247 if (!strcmp(elem->name, name))
10253 static struct metarray *
10254 metarray_find(struct rte_swx_pipeline *p, const char *name)
10256 struct metarray *elem;
10258 TAILQ_FOREACH(elem, &p->metarrays, node)
10259 if (!strcmp(elem->name, name))
10265 static struct metarray *
10266 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10268 struct metarray *elem = NULL;
10270 TAILQ_FOREACH(elem, &p->metarrays, node)
10271 if (elem->id == id)
10278 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p,
10282 struct metarray *m;
10286 CHECK_NAME(name, EINVAL);
10287 CHECK(!metarray_find(p, name), EEXIST);
10289 CHECK(size, EINVAL);
10290 size = rte_align32pow2(size);
10292 /* Memory allocation. */
10293 m = calloc(1, sizeof(struct metarray));
10296 /* Node initialization. */
10297 strcpy(m->name, name);
10299 m->id = p->n_metarrays;
10301 /* Node add to tailq. */
10302 TAILQ_INSERT_TAIL(&p->metarrays, m, node);
10308 struct meter_profile meter_profile_default = {
10317 .cir_bytes_per_period = 1,
10319 .pir_bytes_per_period = 1,
10326 meter_init(struct meter *m)
10328 memset(m, 0, sizeof(struct meter));
10329 rte_meter_trtcm_config(&m->m, &meter_profile_default.profile);
10330 m->profile = &meter_profile_default;
10331 m->color_mask = RTE_COLOR_GREEN;
10333 meter_profile_default.n_users++;
10337 metarray_build(struct rte_swx_pipeline *p)
10339 struct metarray *m;
10341 if (!p->n_metarrays)
10344 p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime));
10345 CHECK(p->metarray_runtime, ENOMEM);
10347 TAILQ_FOREACH(m, &p->metarrays, node) {
10348 struct metarray_runtime *r = &p->metarray_runtime[m->id];
10351 r->metarray = env_malloc(m->size * sizeof(struct meter),
10352 RTE_CACHE_LINE_SIZE,
10354 CHECK(r->metarray, ENOMEM);
10356 for (i = 0; i < m->size; i++)
10357 meter_init(&r->metarray[i]);
10359 r->size_mask = m->size - 1;
10366 metarray_build_free(struct rte_swx_pipeline *p)
10370 if (!p->metarray_runtime)
10373 for (i = 0; i < p->n_metarrays; i++) {
10374 struct metarray *m = metarray_find_by_id(p, i);
10375 struct metarray_runtime *r = &p->metarray_runtime[i];
10377 env_free(r->metarray, m->size * sizeof(struct meter));
10380 free(p->metarray_runtime);
10381 p->metarray_runtime = NULL;
10385 metarray_free(struct rte_swx_pipeline *p)
10387 metarray_build_free(p);
10389 /* Meter arrays. */
10391 struct metarray *elem;
10393 elem = TAILQ_FIRST(&p->metarrays);
10397 TAILQ_REMOVE(&p->metarrays, elem, node);
10401 /* Meter profiles. */
10403 struct meter_profile *elem;
10405 elem = TAILQ_FIRST(&p->meter_profiles);
10409 TAILQ_REMOVE(&p->meter_profiles, elem, node);
10418 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
10420 struct rte_swx_pipeline *pipeline;
10422 /* Check input parameters. */
10425 /* Memory allocation. */
10426 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
10427 CHECK(pipeline, ENOMEM);
10429 /* Initialization. */
10430 TAILQ_INIT(&pipeline->struct_types);
10431 TAILQ_INIT(&pipeline->port_in_types);
10432 TAILQ_INIT(&pipeline->ports_in);
10433 TAILQ_INIT(&pipeline->port_out_types);
10434 TAILQ_INIT(&pipeline->ports_out);
10435 TAILQ_INIT(&pipeline->extern_types);
10436 TAILQ_INIT(&pipeline->extern_objs);
10437 TAILQ_INIT(&pipeline->extern_funcs);
10438 TAILQ_INIT(&pipeline->headers);
10439 TAILQ_INIT(&pipeline->actions);
10440 TAILQ_INIT(&pipeline->table_types);
10441 TAILQ_INIT(&pipeline->tables);
10442 TAILQ_INIT(&pipeline->selectors);
10443 TAILQ_INIT(&pipeline->regarrays);
10444 TAILQ_INIT(&pipeline->meter_profiles);
10445 TAILQ_INIT(&pipeline->metarrays);
10447 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
10448 pipeline->numa_node = numa_node;
10455 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
10460 free(p->instructions);
10464 table_state_free(p);
10470 extern_func_free(p);
10471 extern_obj_free(p);
10480 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
10481 const char **instructions,
10482 uint32_t n_instructions)
10487 err = instruction_config(p, NULL, instructions, n_instructions);
10491 /* Thread instruction pointer reset. */
10492 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10493 struct thread *t = &p->threads[i];
10495 thread_ip_reset(p, t);
10502 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
10507 CHECK(p->build_done == 0, EEXIST);
10509 status = port_in_build(p);
10513 status = port_out_build(p);
10517 status = struct_build(p);
10521 status = extern_obj_build(p);
10525 status = extern_func_build(p);
10529 status = header_build(p);
10533 status = metadata_build(p);
10537 status = action_build(p);
10541 status = table_build(p);
10545 status = selector_build(p);
10549 status = table_state_build(p);
10553 status = regarray_build(p);
10557 status = metarray_build(p);
10565 metarray_build_free(p);
10566 regarray_build_free(p);
10567 table_state_build_free(p);
10568 selector_build_free(p);
10569 table_build_free(p);
10570 action_build_free(p);
10571 metadata_build_free(p);
10572 header_build_free(p);
10573 extern_func_build_free(p);
10574 extern_obj_build_free(p);
10575 port_out_build_free(p);
10576 port_in_build_free(p);
10577 struct_build_free(p);
10583 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
10587 for (i = 0; i < n_instructions; i++)
10592 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
10596 for (i = 0; i < p->n_ports_out; i++) {
10597 struct port_out_runtime *port = &p->out[i];
10600 port->flush(port->obj);
10608 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
10609 struct rte_swx_ctl_pipeline_info *pipeline)
10611 struct action *action;
10612 struct table *table;
10613 uint32_t n_actions = 0, n_tables = 0;
10615 if (!p || !pipeline)
10618 TAILQ_FOREACH(action, &p->actions, node)
10621 TAILQ_FOREACH(table, &p->tables, node)
10624 pipeline->n_ports_in = p->n_ports_in;
10625 pipeline->n_ports_out = p->n_ports_out;
10626 pipeline->n_actions = n_actions;
10627 pipeline->n_tables = n_tables;
10628 pipeline->n_selectors = p->n_selectors;
10629 pipeline->n_regarrays = p->n_regarrays;
10630 pipeline->n_metarrays = p->n_metarrays;
10636 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
10638 if (!p || !numa_node)
10641 *numa_node = p->numa_node;
10646 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
10647 uint32_t action_id,
10648 struct rte_swx_ctl_action_info *action)
10650 struct action *a = NULL;
10652 if (!p || (action_id >= p->n_actions) || !action)
10655 a = action_find_by_id(p, action_id);
10659 strcpy(action->name, a->name);
10660 action->n_args = a->st ? a->st->n_fields : 0;
10665 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
10666 uint32_t action_id,
10667 uint32_t action_arg_id,
10668 struct rte_swx_ctl_action_arg_info *action_arg)
10670 struct action *a = NULL;
10671 struct field *arg = NULL;
10673 if (!p || (action_id >= p->n_actions) || !action_arg)
10676 a = action_find_by_id(p, action_id);
10677 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
10680 arg = &a->st->fields[action_arg_id];
10681 strcpy(action_arg->name, arg->name);
10682 action_arg->n_bits = arg->n_bits;
10683 action_arg->is_network_byte_order = a->args_endianness[action_arg_id];
10689 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
10691 struct rte_swx_ctl_table_info *table)
10693 struct table *t = NULL;
10698 t = table_find_by_id(p, table_id);
10702 strcpy(table->name, t->name);
10703 strcpy(table->args, t->args);
10704 table->n_match_fields = t->n_fields;
10705 table->n_actions = t->n_actions;
10706 table->default_action_is_const = t->default_action_is_const;
10707 table->size = t->size;
10712 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
10714 uint32_t match_field_id,
10715 struct rte_swx_ctl_table_match_field_info *match_field)
10718 struct match_field *f;
10720 if (!p || (table_id >= p->n_tables) || !match_field)
10723 t = table_find_by_id(p, table_id);
10724 if (!t || (match_field_id >= t->n_fields))
10727 f = &t->fields[match_field_id];
10728 match_field->match_type = f->match_type;
10729 match_field->is_header = t->header ? 1 : 0;
10730 match_field->n_bits = f->field->n_bits;
10731 match_field->offset = f->field->offset;
10737 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
10739 uint32_t table_action_id,
10740 struct rte_swx_ctl_table_action_info *table_action)
10744 if (!p || (table_id >= p->n_tables) || !table_action)
10747 t = table_find_by_id(p, table_id);
10748 if (!t || (table_action_id >= t->n_actions))
10751 table_action->action_id = t->actions[table_action_id]->id;
10757 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
10759 struct rte_swx_table_ops *table_ops,
10764 if (!p || (table_id >= p->n_tables))
10767 t = table_find_by_id(p, table_id);
10773 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
10783 rte_swx_ctl_selector_info_get(struct rte_swx_pipeline *p,
10784 uint32_t selector_id,
10785 struct rte_swx_ctl_selector_info *selector)
10787 struct selector *s = NULL;
10789 if (!p || !selector)
10792 s = selector_find_by_id(p, selector_id);
10796 strcpy(selector->name, s->name);
10798 selector->n_selector_fields = s->n_selector_fields;
10799 selector->n_groups_max = s->n_groups_max;
10800 selector->n_members_per_group_max = s->n_members_per_group_max;
10806 rte_swx_ctl_selector_group_id_field_info_get(struct rte_swx_pipeline *p,
10807 uint32_t selector_id,
10808 struct rte_swx_ctl_table_match_field_info *field)
10810 struct selector *s;
10812 if (!p || (selector_id >= p->n_selectors) || !field)
10815 s = selector_find_by_id(p, selector_id);
10819 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10820 field->is_header = 0;
10821 field->n_bits = s->group_id_field->n_bits;
10822 field->offset = s->group_id_field->offset;
10828 rte_swx_ctl_selector_field_info_get(struct rte_swx_pipeline *p,
10829 uint32_t selector_id,
10830 uint32_t selector_field_id,
10831 struct rte_swx_ctl_table_match_field_info *field)
10833 struct selector *s;
10836 if (!p || (selector_id >= p->n_selectors) || !field)
10839 s = selector_find_by_id(p, selector_id);
10840 if (!s || (selector_field_id >= s->n_selector_fields))
10843 f = s->selector_fields[selector_field_id];
10844 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10845 field->is_header = s->selector_header ? 1 : 0;
10846 field->n_bits = f->n_bits;
10847 field->offset = f->offset;
10853 rte_swx_ctl_selector_member_id_field_info_get(struct rte_swx_pipeline *p,
10854 uint32_t selector_id,
10855 struct rte_swx_ctl_table_match_field_info *field)
10857 struct selector *s;
10859 if (!p || (selector_id >= p->n_selectors) || !field)
10862 s = selector_find_by_id(p, selector_id);
10866 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10867 field->is_header = 0;
10868 field->n_bits = s->member_id_field->n_bits;
10869 field->offset = s->member_id_field->offset;
10875 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
10876 struct rte_swx_table_state **table_state)
10878 if (!p || !table_state || !p->build_done)
10881 *table_state = p->table_state;
10886 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
10887 struct rte_swx_table_state *table_state)
10889 if (!p || !table_state || !p->build_done)
10892 p->table_state = table_state;
10897 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
10899 struct rte_swx_port_in_stats *stats)
10901 struct port_in *port;
10906 port = port_in_find(p, port_id);
10910 port->type->ops.stats_read(port->obj, stats);
10915 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
10917 struct rte_swx_port_out_stats *stats)
10919 struct port_out *port;
10924 port = port_out_find(p, port_id);
10928 port->type->ops.stats_read(port->obj, stats);
10933 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p,
10934 const char *table_name,
10935 struct rte_swx_table_stats *stats)
10937 struct table *table;
10938 struct table_statistics *table_stats;
10940 if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action)
10943 table = table_find(p, table_name);
10947 table_stats = &p->table_stats[table->id];
10949 memcpy(stats->n_pkts_action,
10950 table_stats->n_pkts_action,
10951 p->n_actions * sizeof(uint64_t));
10953 stats->n_pkts_hit = table_stats->n_pkts_hit[1];
10954 stats->n_pkts_miss = table_stats->n_pkts_hit[0];
10960 rte_swx_ctl_pipeline_selector_stats_read(struct rte_swx_pipeline *p,
10961 const char *selector_name,
10962 struct rte_swx_pipeline_selector_stats *stats)
10964 struct selector *s;
10966 if (!p || !selector_name || !selector_name[0] || !stats)
10969 s = selector_find(p, selector_name);
10973 stats->n_pkts = p->selector_stats[s->id].n_pkts;
10979 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p,
10980 uint32_t regarray_id,
10981 struct rte_swx_ctl_regarray_info *regarray)
10983 struct regarray *r;
10985 if (!p || !regarray)
10988 r = regarray_find_by_id(p, regarray_id);
10992 strcpy(regarray->name, r->name);
10993 regarray->size = r->size;
10998 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p,
10999 const char *regarray_name,
11000 uint32_t regarray_index,
11003 struct regarray *regarray;
11004 struct regarray_runtime *r;
11006 if (!p || !regarray_name || !value)
11009 regarray = regarray_find(p, regarray_name);
11010 if (!regarray || (regarray_index >= regarray->size))
11013 r = &p->regarray_runtime[regarray->id];
11014 *value = r->regarray[regarray_index];
11019 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p,
11020 const char *regarray_name,
11021 uint32_t regarray_index,
11024 struct regarray *regarray;
11025 struct regarray_runtime *r;
11027 if (!p || !regarray_name)
11030 regarray = regarray_find(p, regarray_name);
11031 if (!regarray || (regarray_index >= regarray->size))
11034 r = &p->regarray_runtime[regarray->id];
11035 r->regarray[regarray_index] = value;
11040 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p,
11041 uint32_t metarray_id,
11042 struct rte_swx_ctl_metarray_info *metarray)
11044 struct metarray *m;
11046 if (!p || !metarray)
11049 m = metarray_find_by_id(p, metarray_id);
11053 strcpy(metarray->name, m->name);
11054 metarray->size = m->size;
11059 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p,
11061 struct rte_meter_trtcm_params *params)
11063 struct meter_profile *mp;
11067 CHECK_NAME(name, EINVAL);
11068 CHECK(params, EINVAL);
11069 CHECK(!meter_profile_find(p, name), EEXIST);
11071 /* Node allocation. */
11072 mp = calloc(1, sizeof(struct meter_profile));
11075 /* Node initialization. */
11076 strcpy(mp->name, name);
11077 memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params));
11078 status = rte_meter_trtcm_profile_config(&mp->profile, params);
11084 /* Node add to tailq. */
11085 TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node);
11091 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p,
11094 struct meter_profile *mp;
11097 CHECK_NAME(name, EINVAL);
11099 mp = meter_profile_find(p, name);
11101 CHECK(!mp->n_users, EBUSY);
11103 /* Remove node from tailq. */
11104 TAILQ_REMOVE(&p->meter_profiles, mp, node);
11111 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p,
11112 const char *metarray_name,
11113 uint32_t metarray_index)
11115 struct meter_profile *mp_old;
11116 struct metarray *metarray;
11117 struct metarray_runtime *metarray_runtime;
11121 CHECK_NAME(metarray_name, EINVAL);
11123 metarray = metarray_find(p, metarray_name);
11124 CHECK(metarray, EINVAL);
11125 CHECK(metarray_index < metarray->size, EINVAL);
11127 metarray_runtime = &p->metarray_runtime[metarray->id];
11128 m = &metarray_runtime->metarray[metarray_index];
11129 mp_old = m->profile;
11139 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p,
11140 const char *metarray_name,
11141 uint32_t metarray_index,
11142 const char *profile_name)
11144 struct meter_profile *mp, *mp_old;
11145 struct metarray *metarray;
11146 struct metarray_runtime *metarray_runtime;
11150 CHECK_NAME(metarray_name, EINVAL);
11152 metarray = metarray_find(p, metarray_name);
11153 CHECK(metarray, EINVAL);
11154 CHECK(metarray_index < metarray->size, EINVAL);
11156 mp = meter_profile_find(p, profile_name);
11159 metarray_runtime = &p->metarray_runtime[metarray->id];
11160 m = &metarray_runtime->metarray[metarray_index];
11161 mp_old = m->profile;
11163 memset(m, 0, sizeof(struct meter));
11164 rte_meter_trtcm_config(&m->m, &mp->profile);
11166 m->color_mask = RTE_COLORS;
11175 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p,
11176 const char *metarray_name,
11177 uint32_t metarray_index,
11178 struct rte_swx_ctl_meter_stats *stats)
11180 struct metarray *metarray;
11181 struct metarray_runtime *metarray_runtime;
11185 CHECK_NAME(metarray_name, EINVAL);
11187 metarray = metarray_find(p, metarray_name);
11188 CHECK(metarray, EINVAL);
11189 CHECK(metarray_index < metarray->size, EINVAL);
11191 CHECK(stats, EINVAL);
11193 metarray_runtime = &p->metarray_runtime[metarray->id];
11194 m = &metarray_runtime->metarray[metarray_index];
11196 memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts));
11197 memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes));