1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <arpa/inet.h>
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15 #include <rte_cycles.h>
16 #include <rte_meter.h>
18 #include <rte_swx_table_selector.h>
20 #include "rte_swx_pipeline.h"
21 #include "rte_swx_ctl.h"
23 #define CHECK(condition, err_code) \
29 #define CHECK_NAME(name, err_code) \
32 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
35 #define CHECK_INSTRUCTION(instr, err_code) \
38 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
39 RTE_SWX_INSTRUCTION_SIZE), \
47 #define TRACE(...) printf(__VA_ARGS__)
55 #define ntoh64(x) rte_be_to_cpu_64(x)
56 #define hton64(x) rte_cpu_to_be_64(x)
58 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE
60 #include <rte_malloc.h>
63 env_malloc(size_t size, size_t alignment, int numa_node)
65 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
69 env_free(void *start, size_t size __rte_unused)
79 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
83 if (numa_available() == -1)
86 start = numa_alloc_onnode(size, numa_node);
90 memset(start, 0, size);
95 env_free(void *start, size_t size)
97 if (numa_available() == -1)
100 numa_free(start, size);
109 char name[RTE_SWX_NAME_SIZE];
116 TAILQ_ENTRY(struct_type) node;
117 char name[RTE_SWX_NAME_SIZE];
118 struct field *fields;
125 TAILQ_HEAD(struct_type_tailq, struct_type);
130 struct port_in_type {
131 TAILQ_ENTRY(port_in_type) node;
132 char name[RTE_SWX_NAME_SIZE];
133 struct rte_swx_port_in_ops ops;
136 TAILQ_HEAD(port_in_type_tailq, port_in_type);
139 TAILQ_ENTRY(port_in) node;
140 struct port_in_type *type;
145 TAILQ_HEAD(port_in_tailq, port_in);
147 struct port_in_runtime {
148 rte_swx_port_in_pkt_rx_t pkt_rx;
155 struct port_out_type {
156 TAILQ_ENTRY(port_out_type) node;
157 char name[RTE_SWX_NAME_SIZE];
158 struct rte_swx_port_out_ops ops;
161 TAILQ_HEAD(port_out_type_tailq, port_out_type);
164 TAILQ_ENTRY(port_out) node;
165 struct port_out_type *type;
170 TAILQ_HEAD(port_out_tailq, port_out);
172 struct port_out_runtime {
173 rte_swx_port_out_pkt_tx_t pkt_tx;
174 rte_swx_port_out_flush_t flush;
181 struct extern_type_member_func {
182 TAILQ_ENTRY(extern_type_member_func) node;
183 char name[RTE_SWX_NAME_SIZE];
184 rte_swx_extern_type_member_func_t func;
188 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
191 TAILQ_ENTRY(extern_type) node;
192 char name[RTE_SWX_NAME_SIZE];
193 struct struct_type *mailbox_struct_type;
194 rte_swx_extern_type_constructor_t constructor;
195 rte_swx_extern_type_destructor_t destructor;
196 struct extern_type_member_func_tailq funcs;
200 TAILQ_HEAD(extern_type_tailq, extern_type);
203 TAILQ_ENTRY(extern_obj) node;
204 char name[RTE_SWX_NAME_SIZE];
205 struct extern_type *type;
211 TAILQ_HEAD(extern_obj_tailq, extern_obj);
213 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
214 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
217 struct extern_obj_runtime {
220 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
227 TAILQ_ENTRY(extern_func) node;
228 char name[RTE_SWX_NAME_SIZE];
229 struct struct_type *mailbox_struct_type;
230 rte_swx_extern_func_t func;
235 TAILQ_HEAD(extern_func_tailq, extern_func);
237 struct extern_func_runtime {
239 rte_swx_extern_func_t func;
246 TAILQ_ENTRY(header) node;
247 char name[RTE_SWX_NAME_SIZE];
248 struct struct_type *st;
253 TAILQ_HEAD(header_tailq, header);
255 struct header_runtime {
260 struct header_out_runtime {
270 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
271 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
272 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
273 * when transferred to packet meta-data and in NBO when transferred to packet
277 /* Notation conventions:
278 * -Header field: H = h.header.field (dst/src)
279 * -Meta-data field: M = m.field (dst/src)
280 * -Extern object mailbox field: E = e.field (dst/src)
281 * -Extern function mailbox field: F = f.field (dst/src)
282 * -Table action data field: T = t.field (src only)
283 * -Immediate value: I = 32-bit unsigned value (src only)
286 enum instruction_type {
293 INSTR_TX, /* port_out = M */
294 INSTR_TX_I, /* port_out = I */
296 /* extract h.header */
306 /* extract h.header m.last_field_size */
309 /* lookahead h.header */
323 /* validate h.header */
326 /* invalidate h.header */
327 INSTR_HDR_INVALIDATE,
331 * dst = HMEF, src = HMEFTI
333 INSTR_MOV, /* dst = MEF, src = MEFT */
334 INSTR_MOV_MH, /* dst = MEF, src = H */
335 INSTR_MOV_HM, /* dst = H, src = MEFT */
336 INSTR_MOV_HH, /* dst = H, src = H */
337 INSTR_MOV_I, /* dst = HMEF, src = I */
339 /* dma h.header t.field
340 * memcpy(h.header, t.field, sizeof(h.header))
353 * dst = HMEF, src = HMEFTI
355 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
356 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
357 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
358 INSTR_ALU_ADD_HH, /* dst = H, src = H */
359 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
360 INSTR_ALU_ADD_HI, /* dst = H, src = I */
364 * dst = HMEF, src = HMEFTI
366 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
367 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
368 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
369 INSTR_ALU_SUB_HH, /* dst = H, src = H */
370 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
371 INSTR_ALU_SUB_HI, /* dst = H, src = I */
374 * dst = dst '+ src[0:1] '+ src[2:3] + ...
375 * dst = H, src = {H, h.header}
377 INSTR_ALU_CKADD_FIELD, /* src = H */
378 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
379 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
385 INSTR_ALU_CKSUB_FIELD,
389 * dst = HMEF, src = HMEFTI
391 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
392 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
393 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
394 INSTR_ALU_AND_HH, /* dst = H, src = H */
395 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
399 * dst = HMEF, src = HMEFTI
401 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
402 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
403 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
404 INSTR_ALU_OR_HH, /* dst = H, src = H */
405 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
409 * dst = HMEF, src = HMEFTI
411 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
412 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
413 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
414 INSTR_ALU_XOR_HH, /* dst = H, src = H */
415 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
419 * dst = HMEF, src = HMEFTI
421 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
422 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
423 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
424 INSTR_ALU_SHL_HH, /* dst = H, src = H */
425 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
426 INSTR_ALU_SHL_HI, /* dst = H, src = I */
430 * dst = HMEF, src = HMEFTI
432 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
433 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
434 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
435 INSTR_ALU_SHR_HH, /* dst = H, src = H */
436 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
437 INSTR_ALU_SHR_HI, /* dst = H, src = I */
439 /* regprefetch REGARRAY index
440 * prefetch REGARRAY[index]
443 INSTR_REGPREFETCH_RH, /* index = H */
444 INSTR_REGPREFETCH_RM, /* index = MEFT */
445 INSTR_REGPREFETCH_RI, /* index = I */
447 /* regrd dst REGARRAY index
448 * dst = REGARRAY[index]
449 * dst = HMEF, index = HMEFTI
451 INSTR_REGRD_HRH, /* dst = H, index = H */
452 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
453 INSTR_REGRD_HRI, /* dst = H, index = I */
454 INSTR_REGRD_MRH, /* dst = MEF, index = H */
455 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
456 INSTR_REGRD_MRI, /* dst = MEF, index = I */
458 /* regwr REGARRAY index src
459 * REGARRAY[index] = src
460 * index = HMEFTI, src = HMEFTI
462 INSTR_REGWR_RHH, /* index = H, src = H */
463 INSTR_REGWR_RHM, /* index = H, src = MEFT */
464 INSTR_REGWR_RHI, /* index = H, src = I */
465 INSTR_REGWR_RMH, /* index = MEFT, src = H */
466 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
467 INSTR_REGWR_RMI, /* index = MEFT, src = I */
468 INSTR_REGWR_RIH, /* index = I, src = H */
469 INSTR_REGWR_RIM, /* index = I, src = MEFT */
470 INSTR_REGWR_RII, /* index = I, src = I */
472 /* regadd REGARRAY index src
473 * REGARRAY[index] += src
474 * index = HMEFTI, src = HMEFTI
476 INSTR_REGADD_RHH, /* index = H, src = H */
477 INSTR_REGADD_RHM, /* index = H, src = MEFT */
478 INSTR_REGADD_RHI, /* index = H, src = I */
479 INSTR_REGADD_RMH, /* index = MEFT, src = H */
480 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
481 INSTR_REGADD_RMI, /* index = MEFT, src = I */
482 INSTR_REGADD_RIH, /* index = I, src = H */
483 INSTR_REGADD_RIM, /* index = I, src = MEFT */
484 INSTR_REGADD_RII, /* index = I, src = I */
486 /* metprefetch METARRAY index
487 * prefetch METARRAY[index]
490 INSTR_METPREFETCH_H, /* index = H */
491 INSTR_METPREFETCH_M, /* index = MEFT */
492 INSTR_METPREFETCH_I, /* index = I */
494 /* meter METARRAY index length color_in color_out
495 * color_out = meter(METARRAY[index], length, color_in)
496 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
498 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
499 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
500 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
501 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
502 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
503 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
504 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
505 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
506 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
507 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
508 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
509 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
515 /* extern e.obj.func */
526 /* jmpv LABEL h.header
527 * Jump if header is valid
531 /* jmpnv LABEL h.header
532 * Jump if header is invalid
537 * Jump if table lookup hit
542 * Jump if table lookup miss
549 INSTR_JMP_ACTION_HIT,
551 /* jmpna LABEL ACTION
552 * Jump if action not run
554 INSTR_JMP_ACTION_MISS,
557 * Jump if a is equal to b
558 * a = HMEFT, b = HMEFTI
560 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
561 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
562 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
563 INSTR_JMP_EQ_HH, /* a = H, b = H */
564 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
567 * Jump if a is not equal to b
568 * a = HMEFT, b = HMEFTI
570 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
571 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
572 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
573 INSTR_JMP_NEQ_HH, /* a = H, b = H */
574 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
577 * Jump if a is less than b
578 * a = HMEFT, b = HMEFTI
580 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
581 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
582 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
583 INSTR_JMP_LT_HH, /* a = H, b = H */
584 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
585 INSTR_JMP_LT_HI, /* a = H, b = I */
588 * Jump if a is greater than b
589 * a = HMEFT, b = HMEFTI
591 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
592 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
593 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
594 INSTR_JMP_GT_HH, /* a = H, b = H */
595 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
596 INSTR_JMP_GT_HI, /* a = H, b = I */
604 struct instr_operand {
625 uint8_t header_id[8];
626 uint8_t struct_id[8];
631 struct instr_hdr_validity {
639 struct instr_extern_obj {
644 struct instr_extern_func {
648 struct instr_dst_src {
649 struct instr_operand dst;
651 struct instr_operand src;
656 struct instr_regarray {
661 struct instr_operand idx;
666 struct instr_operand dstsrc;
676 struct instr_operand idx;
680 struct instr_operand length;
683 struct instr_operand color_in;
684 uint32_t color_in_val;
687 struct instr_operand color_out;
692 uint8_t header_id[8];
693 uint8_t struct_id[8];
704 struct instruction *ip;
707 struct instr_operand a;
713 struct instr_operand b;
719 enum instruction_type type;
722 struct instr_hdr_validity valid;
723 struct instr_dst_src mov;
724 struct instr_regarray regarray;
725 struct instr_meter meter;
726 struct instr_dma dma;
727 struct instr_dst_src alu;
728 struct instr_table table;
729 struct instr_extern_obj ext_obj;
730 struct instr_extern_func ext_func;
731 struct instr_jmp jmp;
735 struct instruction_data {
736 char label[RTE_SWX_NAME_SIZE];
737 char jmp_label[RTE_SWX_NAME_SIZE];
738 uint32_t n_users; /* user = jmp instruction to this instruction. */
746 TAILQ_ENTRY(action) node;
747 char name[RTE_SWX_NAME_SIZE];
748 struct struct_type *st;
749 int *args_endianness; /* 0 = Host Byte Order (HBO). */
750 struct instruction *instructions;
751 uint32_t n_instructions;
755 TAILQ_HEAD(action_tailq, action);
761 TAILQ_ENTRY(table_type) node;
762 char name[RTE_SWX_NAME_SIZE];
763 enum rte_swx_table_match_type match_type;
764 struct rte_swx_table_ops ops;
767 TAILQ_HEAD(table_type_tailq, table_type);
770 enum rte_swx_table_match_type match_type;
775 TAILQ_ENTRY(table) node;
776 char name[RTE_SWX_NAME_SIZE];
777 char args[RTE_SWX_NAME_SIZE];
778 struct table_type *type; /* NULL when n_fields == 0. */
781 struct match_field *fields;
783 struct header *header; /* Only valid when n_fields > 0. */
786 struct action **actions;
787 struct action *default_action;
788 uint8_t *default_action_data;
790 int default_action_is_const;
791 uint32_t action_data_size_max;
797 TAILQ_HEAD(table_tailq, table);
799 struct table_runtime {
800 rte_swx_table_lookup_t func;
805 struct table_statistics {
806 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
807 uint64_t *n_pkts_action;
814 TAILQ_ENTRY(selector) node;
815 char name[RTE_SWX_NAME_SIZE];
817 struct field *group_id_field;
818 struct field **selector_fields;
819 uint32_t n_selector_fields;
820 struct header *selector_header;
821 struct field *member_id_field;
823 uint32_t n_groups_max;
824 uint32_t n_members_per_group_max;
829 TAILQ_HEAD(selector_tailq, selector);
831 struct selector_runtime {
833 uint8_t **group_id_buffer;
834 uint8_t **selector_buffer;
835 uint8_t **member_id_buffer;
838 struct selector_statistics {
846 TAILQ_ENTRY(regarray) node;
847 char name[RTE_SWX_NAME_SIZE];
853 TAILQ_HEAD(regarray_tailq, regarray);
855 struct regarray_runtime {
863 struct meter_profile {
864 TAILQ_ENTRY(meter_profile) node;
865 char name[RTE_SWX_NAME_SIZE];
866 struct rte_meter_trtcm_params params;
867 struct rte_meter_trtcm_profile profile;
871 TAILQ_HEAD(meter_profile_tailq, meter_profile);
874 TAILQ_ENTRY(metarray) node;
875 char name[RTE_SWX_NAME_SIZE];
880 TAILQ_HEAD(metarray_tailq, metarray);
883 struct rte_meter_trtcm m;
884 struct meter_profile *profile;
885 enum rte_color color_mask;
888 uint64_t n_pkts[RTE_COLORS];
889 uint64_t n_bytes[RTE_COLORS];
892 struct metarray_runtime {
893 struct meter *metarray;
902 struct rte_swx_pkt pkt;
908 /* Packet headers. */
909 struct header_runtime *headers; /* Extracted or generated headers. */
910 struct header_out_runtime *headers_out; /* Emitted headers. */
911 uint8_t *header_storage;
912 uint8_t *header_out_storage;
913 uint64_t valid_headers;
914 uint32_t n_headers_out;
916 /* Packet meta-data. */
920 struct table_runtime *tables;
921 struct selector_runtime *selectors;
922 struct rte_swx_table_state *table_state;
924 int hit; /* 0 = Miss, 1 = Hit. */
926 /* Extern objects and functions. */
927 struct extern_obj_runtime *extern_objs;
928 struct extern_func_runtime *extern_funcs;
931 struct instruction *ip;
932 struct instruction *ret;
935 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
936 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
937 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
939 #define HEADER_VALID(thread, header_id) \
940 MASK64_BIT_GET((thread)->valid_headers, header_id)
942 #define ALU(thread, ip, operator) \
944 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
945 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
946 uint64_t dst64 = *dst64_ptr; \
947 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
948 uint64_t dst = dst64 & dst64_mask; \
950 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
951 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
952 uint64_t src64 = *src64_ptr; \
953 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
954 uint64_t src = src64 & src64_mask; \
956 uint64_t result = dst operator src; \
958 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
961 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
963 #define ALU_MH(thread, ip, operator) \
965 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
966 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
967 uint64_t dst64 = *dst64_ptr; \
968 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
969 uint64_t dst = dst64 & dst64_mask; \
971 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
972 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
973 uint64_t src64 = *src64_ptr; \
974 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
976 uint64_t result = dst operator src; \
978 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
981 #define ALU_HM(thread, ip, operator) \
983 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
984 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
985 uint64_t dst64 = *dst64_ptr; \
986 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
987 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
989 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
990 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
991 uint64_t src64 = *src64_ptr; \
992 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
993 uint64_t src = src64 & src64_mask; \
995 uint64_t result = dst operator src; \
996 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
998 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1001 #define ALU_HM_FAST(thread, ip, operator) \
1003 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1004 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1005 uint64_t dst64 = *dst64_ptr; \
1006 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1007 uint64_t dst = dst64 & dst64_mask; \
1009 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1010 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1011 uint64_t src64 = *src64_ptr; \
1012 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1013 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1015 uint64_t result = dst operator src; \
1017 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1020 #define ALU_HH(thread, ip, operator) \
1022 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1023 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1024 uint64_t dst64 = *dst64_ptr; \
1025 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1026 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1028 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1029 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1030 uint64_t src64 = *src64_ptr; \
1031 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1033 uint64_t result = dst operator src; \
1034 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1036 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1039 #define ALU_HH_FAST(thread, ip, operator) \
1041 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1042 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1043 uint64_t dst64 = *dst64_ptr; \
1044 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1045 uint64_t dst = dst64 & dst64_mask; \
1047 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1048 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1049 uint64_t src64 = *src64_ptr; \
1050 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1052 uint64_t result = dst operator src; \
1054 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1061 #define ALU_HM_FAST ALU
1063 #define ALU_HH_FAST ALU
1067 #define ALU_I(thread, ip, operator) \
1069 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1070 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1071 uint64_t dst64 = *dst64_ptr; \
1072 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1073 uint64_t dst = dst64 & dst64_mask; \
1075 uint64_t src = (ip)->alu.src_val; \
1077 uint64_t result = dst operator src; \
1079 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1082 #define ALU_MI ALU_I
1084 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1086 #define ALU_HI(thread, ip, operator) \
1088 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1089 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1090 uint64_t dst64 = *dst64_ptr; \
1091 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1092 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1094 uint64_t src = (ip)->alu.src_val; \
1096 uint64_t result = dst operator src; \
1097 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1099 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1104 #define ALU_HI ALU_I
1108 #define MOV(thread, ip) \
1110 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1111 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1112 uint64_t dst64 = *dst64_ptr; \
1113 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1115 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1116 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1117 uint64_t src64 = *src64_ptr; \
1118 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1119 uint64_t src = src64 & src64_mask; \
1121 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1124 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1126 #define MOV_MH(thread, ip) \
1128 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1129 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1130 uint64_t dst64 = *dst64_ptr; \
1131 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1133 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1134 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1135 uint64_t src64 = *src64_ptr; \
1136 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1138 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1141 #define MOV_HM(thread, ip) \
1143 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1144 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1145 uint64_t dst64 = *dst64_ptr; \
1146 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1148 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1149 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1150 uint64_t src64 = *src64_ptr; \
1151 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1152 uint64_t src = src64 & src64_mask; \
1154 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1155 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1158 #define MOV_HH(thread, ip) \
1160 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1161 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1162 uint64_t dst64 = *dst64_ptr; \
1163 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1165 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1166 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1167 uint64_t src64 = *src64_ptr; \
1169 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1170 src = src >> (64 - (ip)->mov.dst.n_bits); \
1171 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1182 #define MOV_I(thread, ip) \
1184 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1185 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1186 uint64_t dst64 = *dst64_ptr; \
1187 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1189 uint64_t src = (ip)->mov.src_val; \
1191 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1194 #define JMP_CMP(thread, ip, operator) \
1196 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1197 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1198 uint64_t a64 = *a64_ptr; \
1199 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1200 uint64_t a = a64 & a64_mask; \
1202 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1203 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1204 uint64_t b64 = *b64_ptr; \
1205 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1206 uint64_t b = b64 & b64_mask; \
1208 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1211 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1213 #define JMP_CMP_MH(thread, ip, operator) \
1215 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1216 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1217 uint64_t a64 = *a64_ptr; \
1218 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1219 uint64_t a = a64 & a64_mask; \
1221 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1222 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1223 uint64_t b64 = *b64_ptr; \
1224 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1226 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1229 #define JMP_CMP_HM(thread, ip, operator) \
1231 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1232 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1233 uint64_t a64 = *a64_ptr; \
1234 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1236 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1237 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1238 uint64_t b64 = *b64_ptr; \
1239 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1240 uint64_t b = b64 & b64_mask; \
1242 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1245 #define JMP_CMP_HH(thread, ip, operator) \
1247 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1248 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1249 uint64_t a64 = *a64_ptr; \
1250 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1252 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1253 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1254 uint64_t b64 = *b64_ptr; \
1255 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1257 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1260 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1262 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1263 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1264 uint64_t a64 = *a64_ptr; \
1265 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1267 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1268 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1269 uint64_t b64 = *b64_ptr; \
1270 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1272 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1277 #define JMP_CMP_MH JMP_CMP
1278 #define JMP_CMP_HM JMP_CMP
1279 #define JMP_CMP_HH JMP_CMP
1280 #define JMP_CMP_HH_FAST JMP_CMP
1284 #define JMP_CMP_I(thread, ip, operator) \
1286 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1287 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1288 uint64_t a64 = *a64_ptr; \
1289 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1290 uint64_t a = a64 & a64_mask; \
1292 uint64_t b = (ip)->jmp.b_val; \
1294 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1297 #define JMP_CMP_MI JMP_CMP_I
1299 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1301 #define JMP_CMP_HI(thread, ip, operator) \
1303 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1304 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1305 uint64_t a64 = *a64_ptr; \
1306 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1308 uint64_t b = (ip)->jmp.b_val; \
1310 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1315 #define JMP_CMP_HI JMP_CMP_I
1319 #define METADATA_READ(thread, offset, n_bits) \
1321 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1322 uint64_t m64 = *m64_ptr; \
1323 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1327 #define METADATA_WRITE(thread, offset, n_bits, value) \
1329 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1330 uint64_t m64 = *m64_ptr; \
1331 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1333 uint64_t m_new = value; \
1335 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1338 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1339 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1342 struct rte_swx_pipeline {
1343 struct struct_type_tailq struct_types;
1344 struct port_in_type_tailq port_in_types;
1345 struct port_in_tailq ports_in;
1346 struct port_out_type_tailq port_out_types;
1347 struct port_out_tailq ports_out;
1348 struct extern_type_tailq extern_types;
1349 struct extern_obj_tailq extern_objs;
1350 struct extern_func_tailq extern_funcs;
1351 struct header_tailq headers;
1352 struct struct_type *metadata_st;
1353 uint32_t metadata_struct_id;
1354 struct action_tailq actions;
1355 struct table_type_tailq table_types;
1356 struct table_tailq tables;
1357 struct selector_tailq selectors;
1358 struct regarray_tailq regarrays;
1359 struct meter_profile_tailq meter_profiles;
1360 struct metarray_tailq metarrays;
1362 struct port_in_runtime *in;
1363 struct port_out_runtime *out;
1364 struct instruction **action_instructions;
1365 struct rte_swx_table_state *table_state;
1366 struct table_statistics *table_stats;
1367 struct selector_statistics *selector_stats;
1368 struct regarray_runtime *regarray_runtime;
1369 struct metarray_runtime *metarray_runtime;
1370 struct instruction *instructions;
1371 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1374 uint32_t n_ports_in;
1375 uint32_t n_ports_out;
1376 uint32_t n_extern_objs;
1377 uint32_t n_extern_funcs;
1380 uint32_t n_selectors;
1381 uint32_t n_regarrays;
1382 uint32_t n_metarrays;
1386 uint32_t n_instructions;
1394 static struct struct_type *
1395 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1397 struct struct_type *elem;
1399 TAILQ_FOREACH(elem, &p->struct_types, node)
1400 if (strcmp(elem->name, name) == 0)
1406 static struct field *
1407 struct_type_field_find(struct struct_type *st, const char *name)
1411 for (i = 0; i < st->n_fields; i++) {
1412 struct field *f = &st->fields[i];
1414 if (strcmp(f->name, name) == 0)
1422 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1424 struct rte_swx_field_params *fields,
1426 int last_field_has_variable_size)
1428 struct struct_type *st;
1432 CHECK_NAME(name, EINVAL);
1433 CHECK(fields, EINVAL);
1434 CHECK(n_fields, EINVAL);
1436 for (i = 0; i < n_fields; i++) {
1437 struct rte_swx_field_params *f = &fields[i];
1438 int var_size = ((i == n_fields - 1) && last_field_has_variable_size) ? 1 : 0;
1441 CHECK_NAME(f->name, EINVAL);
1442 CHECK(f->n_bits, EINVAL);
1443 CHECK((f->n_bits <= 64) || var_size, EINVAL);
1444 CHECK((f->n_bits & 7) == 0, EINVAL);
1446 for (j = 0; j < i; j++) {
1447 struct rte_swx_field_params *f_prev = &fields[j];
1449 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1453 CHECK(!struct_type_find(p, name), EEXIST);
1455 /* Node allocation. */
1456 st = calloc(1, sizeof(struct struct_type));
1459 st->fields = calloc(n_fields, sizeof(struct field));
1465 /* Node initialization. */
1466 strcpy(st->name, name);
1467 for (i = 0; i < n_fields; i++) {
1468 struct field *dst = &st->fields[i];
1469 struct rte_swx_field_params *src = &fields[i];
1470 int var_size = ((i == n_fields - 1) && last_field_has_variable_size) ? 1 : 0;
1472 strcpy(dst->name, src->name);
1473 dst->n_bits = src->n_bits;
1474 dst->offset = st->n_bits;
1475 dst->var_size = var_size;
1477 st->n_bits += src->n_bits;
1478 st->n_bits_min += var_size ? 0 : src->n_bits;
1480 st->n_fields = n_fields;
1481 st->var_size = last_field_has_variable_size;
1483 /* Node add to tailq. */
1484 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1490 struct_build(struct rte_swx_pipeline *p)
1494 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1495 struct thread *t = &p->threads[i];
1497 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1498 CHECK(t->structs, ENOMEM);
1505 struct_build_free(struct rte_swx_pipeline *p)
1509 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1510 struct thread *t = &p->threads[i];
1518 struct_free(struct rte_swx_pipeline *p)
1520 struct_build_free(p);
1524 struct struct_type *elem;
1526 elem = TAILQ_FIRST(&p->struct_types);
1530 TAILQ_REMOVE(&p->struct_types, elem, node);
1539 static struct port_in_type *
1540 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1542 struct port_in_type *elem;
1547 TAILQ_FOREACH(elem, &p->port_in_types, node)
1548 if (strcmp(elem->name, name) == 0)
1555 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1557 struct rte_swx_port_in_ops *ops)
1559 struct port_in_type *elem;
1562 CHECK_NAME(name, EINVAL);
1564 CHECK(ops->create, EINVAL);
1565 CHECK(ops->free, EINVAL);
1566 CHECK(ops->pkt_rx, EINVAL);
1567 CHECK(ops->stats_read, EINVAL);
1569 CHECK(!port_in_type_find(p, name), EEXIST);
1571 /* Node allocation. */
1572 elem = calloc(1, sizeof(struct port_in_type));
1573 CHECK(elem, ENOMEM);
1575 /* Node initialization. */
1576 strcpy(elem->name, name);
1577 memcpy(&elem->ops, ops, sizeof(*ops));
1579 /* Node add to tailq. */
1580 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1585 static struct port_in *
1586 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1588 struct port_in *port;
1590 TAILQ_FOREACH(port, &p->ports_in, node)
1591 if (port->id == port_id)
1598 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1600 const char *port_type_name,
1603 struct port_in_type *type = NULL;
1604 struct port_in *port = NULL;
1609 CHECK(!port_in_find(p, port_id), EINVAL);
1611 CHECK_NAME(port_type_name, EINVAL);
1612 type = port_in_type_find(p, port_type_name);
1613 CHECK(type, EINVAL);
1615 obj = type->ops.create(args);
1618 /* Node allocation. */
1619 port = calloc(1, sizeof(struct port_in));
1620 CHECK(port, ENOMEM);
1622 /* Node initialization. */
1627 /* Node add to tailq. */
1628 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1629 if (p->n_ports_in < port_id + 1)
1630 p->n_ports_in = port_id + 1;
1636 port_in_build(struct rte_swx_pipeline *p)
1638 struct port_in *port;
1641 CHECK(p->n_ports_in, EINVAL);
1642 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1644 for (i = 0; i < p->n_ports_in; i++)
1645 CHECK(port_in_find(p, i), EINVAL);
1647 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1648 CHECK(p->in, ENOMEM);
1650 TAILQ_FOREACH(port, &p->ports_in, node) {
1651 struct port_in_runtime *in = &p->in[port->id];
1653 in->pkt_rx = port->type->ops.pkt_rx;
1654 in->obj = port->obj;
1661 port_in_build_free(struct rte_swx_pipeline *p)
1668 port_in_free(struct rte_swx_pipeline *p)
1670 port_in_build_free(p);
1674 struct port_in *port;
1676 port = TAILQ_FIRST(&p->ports_in);
1680 TAILQ_REMOVE(&p->ports_in, port, node);
1681 port->type->ops.free(port->obj);
1685 /* Input port types. */
1687 struct port_in_type *elem;
1689 elem = TAILQ_FIRST(&p->port_in_types);
1693 TAILQ_REMOVE(&p->port_in_types, elem, node);
1701 static struct port_out_type *
1702 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1704 struct port_out_type *elem;
1709 TAILQ_FOREACH(elem, &p->port_out_types, node)
1710 if (!strcmp(elem->name, name))
1717 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1719 struct rte_swx_port_out_ops *ops)
1721 struct port_out_type *elem;
1724 CHECK_NAME(name, EINVAL);
1726 CHECK(ops->create, EINVAL);
1727 CHECK(ops->free, EINVAL);
1728 CHECK(ops->pkt_tx, EINVAL);
1729 CHECK(ops->stats_read, EINVAL);
1731 CHECK(!port_out_type_find(p, name), EEXIST);
1733 /* Node allocation. */
1734 elem = calloc(1, sizeof(struct port_out_type));
1735 CHECK(elem, ENOMEM);
1737 /* Node initialization. */
1738 strcpy(elem->name, name);
1739 memcpy(&elem->ops, ops, sizeof(*ops));
1741 /* Node add to tailq. */
1742 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1747 static struct port_out *
1748 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1750 struct port_out *port;
1752 TAILQ_FOREACH(port, &p->ports_out, node)
1753 if (port->id == port_id)
1760 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1762 const char *port_type_name,
1765 struct port_out_type *type = NULL;
1766 struct port_out *port = NULL;
1771 CHECK(!port_out_find(p, port_id), EINVAL);
1773 CHECK_NAME(port_type_name, EINVAL);
1774 type = port_out_type_find(p, port_type_name);
1775 CHECK(type, EINVAL);
1777 obj = type->ops.create(args);
1780 /* Node allocation. */
1781 port = calloc(1, sizeof(struct port_out));
1782 CHECK(port, ENOMEM);
1784 /* Node initialization. */
1789 /* Node add to tailq. */
1790 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1791 if (p->n_ports_out < port_id + 1)
1792 p->n_ports_out = port_id + 1;
1798 port_out_build(struct rte_swx_pipeline *p)
1800 struct port_out *port;
1803 CHECK(p->n_ports_out, EINVAL);
1805 for (i = 0; i < p->n_ports_out; i++)
1806 CHECK(port_out_find(p, i), EINVAL);
1808 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1809 CHECK(p->out, ENOMEM);
1811 TAILQ_FOREACH(port, &p->ports_out, node) {
1812 struct port_out_runtime *out = &p->out[port->id];
1814 out->pkt_tx = port->type->ops.pkt_tx;
1815 out->flush = port->type->ops.flush;
1816 out->obj = port->obj;
1823 port_out_build_free(struct rte_swx_pipeline *p)
1830 port_out_free(struct rte_swx_pipeline *p)
1832 port_out_build_free(p);
1836 struct port_out *port;
1838 port = TAILQ_FIRST(&p->ports_out);
1842 TAILQ_REMOVE(&p->ports_out, port, node);
1843 port->type->ops.free(port->obj);
1847 /* Output port types. */
1849 struct port_out_type *elem;
1851 elem = TAILQ_FIRST(&p->port_out_types);
1855 TAILQ_REMOVE(&p->port_out_types, elem, node);
1863 static struct extern_type *
1864 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1866 struct extern_type *elem;
1868 TAILQ_FOREACH(elem, &p->extern_types, node)
1869 if (strcmp(elem->name, name) == 0)
1875 static struct extern_type_member_func *
1876 extern_type_member_func_find(struct extern_type *type, const char *name)
1878 struct extern_type_member_func *elem;
1880 TAILQ_FOREACH(elem, &type->funcs, node)
1881 if (strcmp(elem->name, name) == 0)
1887 static struct extern_obj *
1888 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1890 struct extern_obj *elem;
1892 TAILQ_FOREACH(elem, &p->extern_objs, node)
1893 if (strcmp(elem->name, name) == 0)
1899 static struct extern_type_member_func *
1900 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1902 struct extern_obj **obj)
1904 struct extern_obj *object;
1905 struct extern_type_member_func *func;
1906 char *object_name, *func_name;
1908 if (name[0] != 'e' || name[1] != '.')
1911 object_name = strdup(&name[2]);
1915 func_name = strchr(object_name, '.');
1924 object = extern_obj_find(p, object_name);
1930 func = extern_type_member_func_find(object->type, func_name);
1943 static struct field *
1944 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1946 struct extern_obj **object)
1948 struct extern_obj *obj;
1950 char *obj_name, *field_name;
1952 if ((name[0] != 'e') || (name[1] != '.'))
1955 obj_name = strdup(&name[2]);
1959 field_name = strchr(obj_name, '.');
1968 obj = extern_obj_find(p, obj_name);
1974 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1988 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1990 const char *mailbox_struct_type_name,
1991 rte_swx_extern_type_constructor_t constructor,
1992 rte_swx_extern_type_destructor_t destructor)
1994 struct extern_type *elem;
1995 struct struct_type *mailbox_struct_type;
1999 CHECK_NAME(name, EINVAL);
2000 CHECK(!extern_type_find(p, name), EEXIST);
2002 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2003 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2004 CHECK(mailbox_struct_type, EINVAL);
2005 CHECK(!mailbox_struct_type->var_size, EINVAL);
2007 CHECK(constructor, EINVAL);
2008 CHECK(destructor, EINVAL);
2010 /* Node allocation. */
2011 elem = calloc(1, sizeof(struct extern_type));
2012 CHECK(elem, ENOMEM);
2014 /* Node initialization. */
2015 strcpy(elem->name, name);
2016 elem->mailbox_struct_type = mailbox_struct_type;
2017 elem->constructor = constructor;
2018 elem->destructor = destructor;
2019 TAILQ_INIT(&elem->funcs);
2021 /* Node add to tailq. */
2022 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
2028 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
2029 const char *extern_type_name,
2031 rte_swx_extern_type_member_func_t member_func)
2033 struct extern_type *type;
2034 struct extern_type_member_func *type_member;
2038 CHECK_NAME(extern_type_name, EINVAL);
2039 type = extern_type_find(p, extern_type_name);
2040 CHECK(type, EINVAL);
2041 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
2043 CHECK_NAME(name, EINVAL);
2044 CHECK(!extern_type_member_func_find(type, name), EEXIST);
2046 CHECK(member_func, EINVAL);
2048 /* Node allocation. */
2049 type_member = calloc(1, sizeof(struct extern_type_member_func));
2050 CHECK(type_member, ENOMEM);
2052 /* Node initialization. */
2053 strcpy(type_member->name, name);
2054 type_member->func = member_func;
2055 type_member->id = type->n_funcs;
2057 /* Node add to tailq. */
2058 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
2065 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
2066 const char *extern_type_name,
2070 struct extern_type *type;
2071 struct extern_obj *obj;
2076 CHECK_NAME(extern_type_name, EINVAL);
2077 type = extern_type_find(p, extern_type_name);
2078 CHECK(type, EINVAL);
2080 CHECK_NAME(name, EINVAL);
2081 CHECK(!extern_obj_find(p, name), EEXIST);
2083 /* Node allocation. */
2084 obj = calloc(1, sizeof(struct extern_obj));
2087 /* Object construction. */
2088 obj_handle = type->constructor(args);
2094 /* Node initialization. */
2095 strcpy(obj->name, name);
2097 obj->obj = obj_handle;
2098 obj->struct_id = p->n_structs;
2099 obj->id = p->n_extern_objs;
2101 /* Node add to tailq. */
2102 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
2110 extern_obj_build(struct rte_swx_pipeline *p)
2114 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2115 struct thread *t = &p->threads[i];
2116 struct extern_obj *obj;
2118 t->extern_objs = calloc(p->n_extern_objs,
2119 sizeof(struct extern_obj_runtime));
2120 CHECK(t->extern_objs, ENOMEM);
2122 TAILQ_FOREACH(obj, &p->extern_objs, node) {
2123 struct extern_obj_runtime *r =
2124 &t->extern_objs[obj->id];
2125 struct extern_type_member_func *func;
2126 uint32_t mailbox_size =
2127 obj->type->mailbox_struct_type->n_bits / 8;
2131 r->mailbox = calloc(1, mailbox_size);
2132 CHECK(r->mailbox, ENOMEM);
2134 TAILQ_FOREACH(func, &obj->type->funcs, node)
2135 r->funcs[func->id] = func->func;
2137 t->structs[obj->struct_id] = r->mailbox;
2145 extern_obj_build_free(struct rte_swx_pipeline *p)
2149 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2150 struct thread *t = &p->threads[i];
2153 if (!t->extern_objs)
2156 for (j = 0; j < p->n_extern_objs; j++) {
2157 struct extern_obj_runtime *r = &t->extern_objs[j];
2162 free(t->extern_objs);
2163 t->extern_objs = NULL;
2168 extern_obj_free(struct rte_swx_pipeline *p)
2170 extern_obj_build_free(p);
2172 /* Extern objects. */
2174 struct extern_obj *elem;
2176 elem = TAILQ_FIRST(&p->extern_objs);
2180 TAILQ_REMOVE(&p->extern_objs, elem, node);
2182 elem->type->destructor(elem->obj);
2188 struct extern_type *elem;
2190 elem = TAILQ_FIRST(&p->extern_types);
2194 TAILQ_REMOVE(&p->extern_types, elem, node);
2197 struct extern_type_member_func *func;
2199 func = TAILQ_FIRST(&elem->funcs);
2203 TAILQ_REMOVE(&elem->funcs, func, node);
2214 static struct extern_func *
2215 extern_func_find(struct rte_swx_pipeline *p, const char *name)
2217 struct extern_func *elem;
2219 TAILQ_FOREACH(elem, &p->extern_funcs, node)
2220 if (strcmp(elem->name, name) == 0)
2226 static struct extern_func *
2227 extern_func_parse(struct rte_swx_pipeline *p,
2230 if (name[0] != 'f' || name[1] != '.')
2233 return extern_func_find(p, &name[2]);
2236 static struct field *
2237 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
2239 struct extern_func **function)
2241 struct extern_func *func;
2243 char *func_name, *field_name;
2245 if ((name[0] != 'f') || (name[1] != '.'))
2248 func_name = strdup(&name[2]);
2252 field_name = strchr(func_name, '.');
2261 func = extern_func_find(p, func_name);
2267 f = struct_type_field_find(func->mailbox_struct_type, field_name);
2281 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
2283 const char *mailbox_struct_type_name,
2284 rte_swx_extern_func_t func)
2286 struct extern_func *f;
2287 struct struct_type *mailbox_struct_type;
2291 CHECK_NAME(name, EINVAL);
2292 CHECK(!extern_func_find(p, name), EEXIST);
2294 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2295 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2296 CHECK(mailbox_struct_type, EINVAL);
2297 CHECK(!mailbox_struct_type->var_size, EINVAL);
2299 CHECK(func, EINVAL);
2301 /* Node allocation. */
2302 f = calloc(1, sizeof(struct extern_func));
2303 CHECK(func, ENOMEM);
2305 /* Node initialization. */
2306 strcpy(f->name, name);
2307 f->mailbox_struct_type = mailbox_struct_type;
2309 f->struct_id = p->n_structs;
2310 f->id = p->n_extern_funcs;
2312 /* Node add to tailq. */
2313 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
2314 p->n_extern_funcs++;
2321 extern_func_build(struct rte_swx_pipeline *p)
2325 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2326 struct thread *t = &p->threads[i];
2327 struct extern_func *func;
2329 /* Memory allocation. */
2330 t->extern_funcs = calloc(p->n_extern_funcs,
2331 sizeof(struct extern_func_runtime));
2332 CHECK(t->extern_funcs, ENOMEM);
2334 /* Extern function. */
2335 TAILQ_FOREACH(func, &p->extern_funcs, node) {
2336 struct extern_func_runtime *r =
2337 &t->extern_funcs[func->id];
2338 uint32_t mailbox_size =
2339 func->mailbox_struct_type->n_bits / 8;
2341 r->func = func->func;
2343 r->mailbox = calloc(1, mailbox_size);
2344 CHECK(r->mailbox, ENOMEM);
2346 t->structs[func->struct_id] = r->mailbox;
2354 extern_func_build_free(struct rte_swx_pipeline *p)
2358 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2359 struct thread *t = &p->threads[i];
2362 if (!t->extern_funcs)
2365 for (j = 0; j < p->n_extern_funcs; j++) {
2366 struct extern_func_runtime *r = &t->extern_funcs[j];
2371 free(t->extern_funcs);
2372 t->extern_funcs = NULL;
2377 extern_func_free(struct rte_swx_pipeline *p)
2379 extern_func_build_free(p);
2382 struct extern_func *elem;
2384 elem = TAILQ_FIRST(&p->extern_funcs);
2388 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2396 static struct header *
2397 header_find(struct rte_swx_pipeline *p, const char *name)
2399 struct header *elem;
2401 TAILQ_FOREACH(elem, &p->headers, node)
2402 if (strcmp(elem->name, name) == 0)
2408 static struct header *
2409 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id)
2411 struct header *elem;
2413 TAILQ_FOREACH(elem, &p->headers, node)
2414 if (elem->struct_id == struct_id)
2420 static struct header *
2421 header_parse(struct rte_swx_pipeline *p,
2424 if (name[0] != 'h' || name[1] != '.')
2427 return header_find(p, &name[2]);
2430 static struct field *
2431 header_field_parse(struct rte_swx_pipeline *p,
2433 struct header **header)
2437 char *header_name, *field_name;
2439 if ((name[0] != 'h') || (name[1] != '.'))
2442 header_name = strdup(&name[2]);
2446 field_name = strchr(header_name, '.');
2455 h = header_find(p, header_name);
2461 f = struct_type_field_find(h->st, field_name);
2475 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2477 const char *struct_type_name)
2479 struct struct_type *st;
2481 size_t n_headers_max;
2484 CHECK_NAME(name, EINVAL);
2485 CHECK_NAME(struct_type_name, EINVAL);
2487 CHECK(!header_find(p, name), EEXIST);
2489 st = struct_type_find(p, struct_type_name);
2492 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2493 CHECK(p->n_headers < n_headers_max, ENOSPC);
2495 /* Node allocation. */
2496 h = calloc(1, sizeof(struct header));
2499 /* Node initialization. */
2500 strcpy(h->name, name);
2502 h->struct_id = p->n_structs;
2503 h->id = p->n_headers;
2505 /* Node add to tailq. */
2506 TAILQ_INSERT_TAIL(&p->headers, h, node);
2514 header_build(struct rte_swx_pipeline *p)
2517 uint32_t n_bytes = 0, i;
2519 TAILQ_FOREACH(h, &p->headers, node) {
2520 n_bytes += h->st->n_bits / 8;
2523 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2524 struct thread *t = &p->threads[i];
2525 uint32_t offset = 0;
2527 t->headers = calloc(p->n_headers,
2528 sizeof(struct header_runtime));
2529 CHECK(t->headers, ENOMEM);
2531 t->headers_out = calloc(p->n_headers,
2532 sizeof(struct header_out_runtime));
2533 CHECK(t->headers_out, ENOMEM);
2535 t->header_storage = calloc(1, n_bytes);
2536 CHECK(t->header_storage, ENOMEM);
2538 t->header_out_storage = calloc(1, n_bytes);
2539 CHECK(t->header_out_storage, ENOMEM);
2541 TAILQ_FOREACH(h, &p->headers, node) {
2542 uint8_t *header_storage;
2543 uint32_t n_bytes = h->st->n_bits / 8;
2545 header_storage = &t->header_storage[offset];
2548 t->headers[h->id].ptr0 = header_storage;
2549 t->headers[h->id].n_bytes = n_bytes;
2551 t->structs[h->struct_id] = header_storage;
2559 header_build_free(struct rte_swx_pipeline *p)
2563 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2564 struct thread *t = &p->threads[i];
2566 free(t->headers_out);
2567 t->headers_out = NULL;
2572 free(t->header_out_storage);
2573 t->header_out_storage = NULL;
2575 free(t->header_storage);
2576 t->header_storage = NULL;
2581 header_free(struct rte_swx_pipeline *p)
2583 header_build_free(p);
2586 struct header *elem;
2588 elem = TAILQ_FIRST(&p->headers);
2592 TAILQ_REMOVE(&p->headers, elem, node);
2600 static struct field *
2601 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2603 if (!p->metadata_st)
2606 if (name[0] != 'm' || name[1] != '.')
2609 return struct_type_field_find(p->metadata_st, &name[2]);
2613 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2614 const char *struct_type_name)
2616 struct struct_type *st = NULL;
2620 CHECK_NAME(struct_type_name, EINVAL);
2621 st = struct_type_find(p, struct_type_name);
2623 CHECK(!st->var_size, EINVAL);
2624 CHECK(!p->metadata_st, EINVAL);
2626 p->metadata_st = st;
2627 p->metadata_struct_id = p->n_structs;
2635 metadata_build(struct rte_swx_pipeline *p)
2637 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2640 /* Thread-level initialization. */
2641 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2642 struct thread *t = &p->threads[i];
2645 metadata = calloc(1, n_bytes);
2646 CHECK(metadata, ENOMEM);
2648 t->metadata = metadata;
2649 t->structs[p->metadata_struct_id] = metadata;
2656 metadata_build_free(struct rte_swx_pipeline *p)
2660 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2661 struct thread *t = &p->threads[i];
2669 metadata_free(struct rte_swx_pipeline *p)
2671 metadata_build_free(p);
2678 instruction_is_tx(enum instruction_type type)
2691 instruction_is_jmp(struct instruction *instr)
2693 switch (instr->type) {
2695 case INSTR_JMP_VALID:
2696 case INSTR_JMP_INVALID:
2698 case INSTR_JMP_MISS:
2699 case INSTR_JMP_ACTION_HIT:
2700 case INSTR_JMP_ACTION_MISS:
2702 case INSTR_JMP_EQ_MH:
2703 case INSTR_JMP_EQ_HM:
2704 case INSTR_JMP_EQ_HH:
2705 case INSTR_JMP_EQ_I:
2707 case INSTR_JMP_NEQ_MH:
2708 case INSTR_JMP_NEQ_HM:
2709 case INSTR_JMP_NEQ_HH:
2710 case INSTR_JMP_NEQ_I:
2712 case INSTR_JMP_LT_MH:
2713 case INSTR_JMP_LT_HM:
2714 case INSTR_JMP_LT_HH:
2715 case INSTR_JMP_LT_MI:
2716 case INSTR_JMP_LT_HI:
2718 case INSTR_JMP_GT_MH:
2719 case INSTR_JMP_GT_HM:
2720 case INSTR_JMP_GT_HH:
2721 case INSTR_JMP_GT_MI:
2722 case INSTR_JMP_GT_HI:
2730 static struct field *
2731 action_field_parse(struct action *action, const char *name);
2733 static struct field *
2734 struct_field_parse(struct rte_swx_pipeline *p,
2735 struct action *action,
2737 uint32_t *struct_id)
2744 struct header *header;
2746 f = header_field_parse(p, name, &header);
2750 *struct_id = header->struct_id;
2756 f = metadata_field_parse(p, name);
2760 *struct_id = p->metadata_struct_id;
2769 f = action_field_parse(action, name);
2779 struct extern_obj *obj;
2781 f = extern_obj_mailbox_field_parse(p, name, &obj);
2785 *struct_id = obj->struct_id;
2791 struct extern_func *func;
2793 f = extern_func_mailbox_field_parse(p, name, &func);
2797 *struct_id = func->struct_id;
2807 pipeline_port_inc(struct rte_swx_pipeline *p)
2809 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2813 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2815 t->ip = p->instructions;
2819 thread_ip_set(struct thread *t, struct instruction *ip)
2825 thread_ip_action_call(struct rte_swx_pipeline *p,
2830 t->ip = p->action_instructions[action_id];
2834 thread_ip_inc(struct rte_swx_pipeline *p);
2837 thread_ip_inc(struct rte_swx_pipeline *p)
2839 struct thread *t = &p->threads[p->thread_id];
2845 thread_ip_inc_cond(struct thread *t, int cond)
2851 thread_yield(struct rte_swx_pipeline *p)
2853 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2857 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2859 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2866 instr_rx_translate(struct rte_swx_pipeline *p,
2867 struct action *action,
2870 struct instruction *instr,
2871 struct instruction_data *data __rte_unused)
2875 CHECK(!action, EINVAL);
2876 CHECK(n_tokens == 2, EINVAL);
2878 f = metadata_field_parse(p, tokens[1]);
2881 instr->type = INSTR_RX;
2882 instr->io.io.offset = f->offset / 8;
2883 instr->io.io.n_bits = f->n_bits;
2888 instr_rx_exec(struct rte_swx_pipeline *p);
2891 instr_rx_exec(struct rte_swx_pipeline *p)
2893 struct thread *t = &p->threads[p->thread_id];
2894 struct instruction *ip = t->ip;
2895 struct port_in_runtime *port = &p->in[p->port_id];
2896 struct rte_swx_pkt *pkt = &t->pkt;
2900 pkt_received = port->pkt_rx(port->obj, pkt);
2901 t->ptr = &pkt->pkt[pkt->offset];
2902 rte_prefetch0(t->ptr);
2904 TRACE("[Thread %2u] rx %s from port %u\n",
2906 pkt_received ? "1 pkt" : "0 pkts",
2910 t->valid_headers = 0;
2911 t->n_headers_out = 0;
2914 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2917 t->table_state = p->table_state;
2920 pipeline_port_inc(p);
2921 thread_ip_inc_cond(t, pkt_received);
2929 instr_tx_translate(struct rte_swx_pipeline *p,
2930 struct action *action __rte_unused,
2933 struct instruction *instr,
2934 struct instruction_data *data __rte_unused)
2936 char *port = tokens[1];
2940 CHECK(n_tokens == 2, EINVAL);
2942 f = metadata_field_parse(p, port);
2944 instr->type = INSTR_TX;
2945 instr->io.io.offset = f->offset / 8;
2946 instr->io.io.n_bits = f->n_bits;
2951 port_val = strtoul(port, &port, 0);
2952 CHECK(!port[0], EINVAL);
2954 instr->type = INSTR_TX_I;
2955 instr->io.io.val = port_val;
2960 instr_drop_translate(struct rte_swx_pipeline *p,
2961 struct action *action __rte_unused,
2962 char **tokens __rte_unused,
2964 struct instruction *instr,
2965 struct instruction_data *data __rte_unused)
2967 CHECK(n_tokens == 1, EINVAL);
2970 instr->type = INSTR_TX_I;
2971 instr->io.io.val = p->n_ports_out - 1;
2976 emit_handler(struct thread *t)
2978 struct header_out_runtime *h0 = &t->headers_out[0];
2979 struct header_out_runtime *h1 = &t->headers_out[1];
2980 uint32_t offset = 0, i;
2982 /* No header change or header decapsulation. */
2983 if ((t->n_headers_out == 1) &&
2984 (h0->ptr + h0->n_bytes == t->ptr)) {
2985 TRACE("Emit handler: no header change or header decap.\n");
2987 t->pkt.offset -= h0->n_bytes;
2988 t->pkt.length += h0->n_bytes;
2993 /* Header encapsulation (optionally, with prior header decasulation). */
2994 if ((t->n_headers_out == 2) &&
2995 (h1->ptr + h1->n_bytes == t->ptr) &&
2996 (h0->ptr == h0->ptr0)) {
2999 TRACE("Emit handler: header encapsulation.\n");
3001 offset = h0->n_bytes + h1->n_bytes;
3002 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
3003 t->pkt.offset -= offset;
3004 t->pkt.length += offset;
3009 /* Header insertion. */
3012 /* Header extraction. */
3015 /* For any other case. */
3016 TRACE("Emit handler: complex case.\n");
3018 for (i = 0; i < t->n_headers_out; i++) {
3019 struct header_out_runtime *h = &t->headers_out[i];
3021 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
3022 offset += h->n_bytes;
3026 memcpy(t->ptr - offset, t->header_out_storage, offset);
3027 t->pkt.offset -= offset;
3028 t->pkt.length += offset;
3033 instr_tx_exec(struct rte_swx_pipeline *p);
3036 instr_tx_exec(struct rte_swx_pipeline *p)
3038 struct thread *t = &p->threads[p->thread_id];
3039 struct instruction *ip = t->ip;
3040 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
3041 struct port_out_runtime *port = &p->out[port_id];
3042 struct rte_swx_pkt *pkt = &t->pkt;
3044 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
3052 port->pkt_tx(port->obj, pkt);
3055 thread_ip_reset(p, t);
3060 instr_tx_i_exec(struct rte_swx_pipeline *p)
3062 struct thread *t = &p->threads[p->thread_id];
3063 struct instruction *ip = t->ip;
3064 uint64_t port_id = ip->io.io.val;
3065 struct port_out_runtime *port = &p->out[port_id];
3066 struct rte_swx_pkt *pkt = &t->pkt;
3068 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
3076 port->pkt_tx(port->obj, pkt);
3079 thread_ip_reset(p, t);
3087 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
3088 struct action *action,
3091 struct instruction *instr,
3092 struct instruction_data *data __rte_unused)
3096 CHECK(!action, EINVAL);
3097 CHECK((n_tokens == 2) || (n_tokens == 3), EINVAL);
3099 h = header_parse(p, tokens[1]);
3102 if (n_tokens == 2) {
3103 CHECK(!h->st->var_size, EINVAL);
3105 instr->type = INSTR_HDR_EXTRACT;
3106 instr->io.hdr.header_id[0] = h->id;
3107 instr->io.hdr.struct_id[0] = h->struct_id;
3108 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3112 CHECK(h->st->var_size, EINVAL);
3114 mf = metadata_field_parse(p, tokens[2]);
3116 CHECK(!mf->var_size, EINVAL);
3118 instr->type = INSTR_HDR_EXTRACT_M;
3119 instr->io.io.offset = mf->offset / 8;
3120 instr->io.io.n_bits = mf->n_bits;
3121 instr->io.hdr.header_id[0] = h->id;
3122 instr->io.hdr.struct_id[0] = h->struct_id;
3123 instr->io.hdr.n_bytes[0] = h->st->n_bits_min / 8;
3130 instr_hdr_lookahead_translate(struct rte_swx_pipeline *p,
3131 struct action *action,
3134 struct instruction *instr,
3135 struct instruction_data *data __rte_unused)
3139 CHECK(!action, EINVAL);
3140 CHECK(n_tokens == 2, EINVAL);
3142 h = header_parse(p, tokens[1]);
3144 CHECK(!h->st->var_size, EINVAL);
3146 instr->type = INSTR_HDR_LOOKAHEAD;
3147 instr->io.hdr.header_id[0] = h->id;
3148 instr->io.hdr.struct_id[0] = h->struct_id;
3149 instr->io.hdr.n_bytes[0] = 0; /* Unused. */
3155 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
3158 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
3160 struct thread *t = &p->threads[p->thread_id];
3161 struct instruction *ip = t->ip;
3162 uint64_t valid_headers = t->valid_headers;
3163 uint8_t *ptr = t->ptr;
3164 uint32_t offset = t->pkt.offset;
3165 uint32_t length = t->pkt.length;
3168 for (i = 0; i < n_extract; i++) {
3169 uint32_t header_id = ip->io.hdr.header_id[i];
3170 uint32_t struct_id = ip->io.hdr.struct_id[i];
3171 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3173 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
3179 t->structs[struct_id] = ptr;
3180 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3189 t->valid_headers = valid_headers;
3192 t->pkt.offset = offset;
3193 t->pkt.length = length;
3198 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
3200 __instr_hdr_extract_exec(p, 1);
3207 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
3209 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3212 __instr_hdr_extract_exec(p, 2);
3219 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
3221 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3224 __instr_hdr_extract_exec(p, 3);
3231 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
3233 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3236 __instr_hdr_extract_exec(p, 4);
3243 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
3245 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3248 __instr_hdr_extract_exec(p, 5);
3255 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
3257 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3260 __instr_hdr_extract_exec(p, 6);
3267 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
3269 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3272 __instr_hdr_extract_exec(p, 7);
3279 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
3281 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3284 __instr_hdr_extract_exec(p, 8);
3291 instr_hdr_extract_m_exec(struct rte_swx_pipeline *p)
3293 struct thread *t = &p->threads[p->thread_id];
3294 struct instruction *ip = t->ip;
3296 uint64_t valid_headers = t->valid_headers;
3297 uint8_t *ptr = t->ptr;
3298 uint32_t offset = t->pkt.offset;
3299 uint32_t length = t->pkt.length;
3301 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
3302 uint32_t header_id = ip->io.hdr.header_id[0];
3303 uint32_t struct_id = ip->io.hdr.struct_id[0];
3304 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
3306 struct header_runtime *h = &t->headers[header_id];
3308 TRACE("[Thread %2u]: extract header %u (%u + %u bytes)\n",
3314 n_bytes += n_bytes_last;
3317 t->structs[struct_id] = ptr;
3318 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3319 h->n_bytes = n_bytes;
3322 t->pkt.offset = offset + n_bytes;
3323 t->pkt.length = length - n_bytes;
3324 t->ptr = ptr + n_bytes;
3331 instr_hdr_lookahead_exec(struct rte_swx_pipeline *p)
3333 struct thread *t = &p->threads[p->thread_id];
3334 struct instruction *ip = t->ip;
3336 uint64_t valid_headers = t->valid_headers;
3337 uint8_t *ptr = t->ptr;
3339 uint32_t header_id = ip->io.hdr.header_id[0];
3340 uint32_t struct_id = ip->io.hdr.struct_id[0];
3342 TRACE("[Thread %2u]: lookahead header %u\n",
3347 t->structs[struct_id] = ptr;
3348 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3358 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
3359 struct action *action __rte_unused,
3362 struct instruction *instr,
3363 struct instruction_data *data __rte_unused)
3367 CHECK(n_tokens == 2, EINVAL);
3369 h = header_parse(p, tokens[1]);
3372 instr->type = INSTR_HDR_EMIT;
3373 instr->io.hdr.header_id[0] = h->id;
3374 instr->io.hdr.struct_id[0] = h->struct_id;
3375 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3380 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
3383 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
3385 struct thread *t = &p->threads[p->thread_id];
3386 struct instruction *ip = t->ip;
3387 uint64_t valid_headers = t->valid_headers;
3388 uint32_t n_headers_out = t->n_headers_out;
3389 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
3390 uint8_t *ho_ptr = NULL;
3391 uint32_t ho_nbytes = 0, first = 1, i;
3393 for (i = 0; i < n_emit; i++) {
3394 uint32_t header_id = ip->io.hdr.header_id[i];
3395 uint32_t struct_id = ip->io.hdr.struct_id[i];
3397 struct header_runtime *hi = &t->headers[header_id];
3398 uint8_t *hi_ptr0 = hi->ptr0;
3399 uint32_t n_bytes = hi->n_bytes;
3401 uint8_t *hi_ptr = t->structs[struct_id];
3403 if (!MASK64_BIT_GET(valid_headers, header_id))
3406 TRACE("[Thread %2u]: emit header %u\n",
3414 if (!t->n_headers_out) {
3415 ho = &t->headers_out[0];
3421 ho_nbytes = n_bytes;
3428 ho_nbytes = ho->n_bytes;
3432 if (ho_ptr + ho_nbytes == hi_ptr) {
3433 ho_nbytes += n_bytes;
3435 ho->n_bytes = ho_nbytes;
3442 ho_nbytes = n_bytes;
3448 ho->n_bytes = ho_nbytes;
3449 t->n_headers_out = n_headers_out;
3453 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
3455 __instr_hdr_emit_exec(p, 1);
3462 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
3464 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3467 __instr_hdr_emit_exec(p, 1);
3472 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
3474 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3477 __instr_hdr_emit_exec(p, 2);
3482 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
3484 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3487 __instr_hdr_emit_exec(p, 3);
3492 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
3494 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3497 __instr_hdr_emit_exec(p, 4);
3502 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
3504 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3507 __instr_hdr_emit_exec(p, 5);
3512 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
3514 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3517 __instr_hdr_emit_exec(p, 6);
3522 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
3524 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3527 __instr_hdr_emit_exec(p, 7);
3532 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
3534 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
3537 __instr_hdr_emit_exec(p, 8);
3545 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
3546 struct action *action __rte_unused,
3549 struct instruction *instr,
3550 struct instruction_data *data __rte_unused)
3554 CHECK(n_tokens == 2, EINVAL);
3556 h = header_parse(p, tokens[1]);
3559 instr->type = INSTR_HDR_VALIDATE;
3560 instr->valid.header_id = h->id;
3565 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
3567 struct thread *t = &p->threads[p->thread_id];
3568 struct instruction *ip = t->ip;
3569 uint32_t header_id = ip->valid.header_id;
3571 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
3574 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
3584 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
3585 struct action *action __rte_unused,
3588 struct instruction *instr,
3589 struct instruction_data *data __rte_unused)
3593 CHECK(n_tokens == 2, EINVAL);
3595 h = header_parse(p, tokens[1]);
3598 instr->type = INSTR_HDR_INVALIDATE;
3599 instr->valid.header_id = h->id;
3604 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3606 struct thread *t = &p->threads[p->thread_id];
3607 struct instruction *ip = t->ip;
3608 uint32_t header_id = ip->valid.header_id;
3610 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3613 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3622 static struct table *
3623 table_find(struct rte_swx_pipeline *p, const char *name);
3625 static struct selector *
3626 selector_find(struct rte_swx_pipeline *p, const char *name);
3629 instr_table_translate(struct rte_swx_pipeline *p,
3630 struct action *action,
3633 struct instruction *instr,
3634 struct instruction_data *data __rte_unused)
3639 CHECK(!action, EINVAL);
3640 CHECK(n_tokens == 2, EINVAL);
3642 t = table_find(p, tokens[1]);
3644 instr->type = INSTR_TABLE;
3645 instr->table.table_id = t->id;
3649 s = selector_find(p, tokens[1]);
3651 instr->type = INSTR_SELECTOR;
3652 instr->table.table_id = s->id;
3660 instr_table_exec(struct rte_swx_pipeline *p)
3662 struct thread *t = &p->threads[p->thread_id];
3663 struct instruction *ip = t->ip;
3664 uint32_t table_id = ip->table.table_id;
3665 struct rte_swx_table_state *ts = &t->table_state[table_id];
3666 struct table_runtime *table = &t->tables[table_id];
3667 struct table_statistics *stats = &p->table_stats[table_id];
3668 uint64_t action_id, n_pkts_hit, n_pkts_action;
3669 uint8_t *action_data;
3673 done = table->func(ts->obj,
3681 TRACE("[Thread %2u] table %u (not finalized)\n",
3689 action_id = hit ? action_id : ts->default_action_id;
3690 action_data = hit ? action_data : ts->default_action_data;
3691 n_pkts_hit = stats->n_pkts_hit[hit];
3692 n_pkts_action = stats->n_pkts_action[action_id];
3694 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3697 hit ? "hit" : "miss",
3698 (uint32_t)action_id);
3700 t->action_id = action_id;
3701 t->structs[0] = action_data;
3703 stats->n_pkts_hit[hit] = n_pkts_hit + 1;
3704 stats->n_pkts_action[action_id] = n_pkts_action + 1;
3707 thread_ip_action_call(p, t, action_id);
3711 instr_selector_exec(struct rte_swx_pipeline *p)
3713 struct thread *t = &p->threads[p->thread_id];
3714 struct instruction *ip = t->ip;
3715 uint32_t selector_id = ip->table.table_id;
3716 struct rte_swx_table_state *ts = &t->table_state[p->n_tables + selector_id];
3717 struct selector_runtime *selector = &t->selectors[selector_id];
3718 struct selector_statistics *stats = &p->selector_stats[selector_id];
3719 uint64_t n_pkts = stats->n_pkts;
3723 done = rte_swx_table_selector_select(ts->obj,
3725 selector->group_id_buffer,
3726 selector->selector_buffer,
3727 selector->member_id_buffer);
3730 TRACE("[Thread %2u] selector %u (not finalized)\n",
3739 TRACE("[Thread %2u] selector %u\n",
3743 stats->n_pkts = n_pkts + 1;
3753 instr_extern_translate(struct rte_swx_pipeline *p,
3754 struct action *action __rte_unused,
3757 struct instruction *instr,
3758 struct instruction_data *data __rte_unused)
3760 char *token = tokens[1];
3762 CHECK(n_tokens == 2, EINVAL);
3764 if (token[0] == 'e') {
3765 struct extern_obj *obj;
3766 struct extern_type_member_func *func;
3768 func = extern_obj_member_func_parse(p, token, &obj);
3769 CHECK(func, EINVAL);
3771 instr->type = INSTR_EXTERN_OBJ;
3772 instr->ext_obj.ext_obj_id = obj->id;
3773 instr->ext_obj.func_id = func->id;
3778 if (token[0] == 'f') {
3779 struct extern_func *func;
3781 func = extern_func_parse(p, token);
3782 CHECK(func, EINVAL);
3784 instr->type = INSTR_EXTERN_FUNC;
3785 instr->ext_func.ext_func_id = func->id;
3794 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3796 struct thread *t = &p->threads[p->thread_id];
3797 struct instruction *ip = t->ip;
3798 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3799 uint32_t func_id = ip->ext_obj.func_id;
3800 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3801 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3803 TRACE("[Thread %2u] extern obj %u member func %u\n",
3808 /* Extern object member function execute. */
3809 uint32_t done = func(obj->obj, obj->mailbox);
3812 thread_ip_inc_cond(t, done);
3813 thread_yield_cond(p, done ^ 1);
3817 instr_extern_func_exec(struct rte_swx_pipeline *p)
3819 struct thread *t = &p->threads[p->thread_id];
3820 struct instruction *ip = t->ip;
3821 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3822 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3823 rte_swx_extern_func_t func = ext_func->func;
3825 TRACE("[Thread %2u] extern func %u\n",
3829 /* Extern function execute. */
3830 uint32_t done = func(ext_func->mailbox);
3833 thread_ip_inc_cond(t, done);
3834 thread_yield_cond(p, done ^ 1);
3841 instr_mov_translate(struct rte_swx_pipeline *p,
3842 struct action *action,
3845 struct instruction *instr,
3846 struct instruction_data *data __rte_unused)
3848 char *dst = tokens[1], *src = tokens[2];
3849 struct field *fdst, *fsrc;
3851 uint32_t dst_struct_id = 0, src_struct_id = 0;
3853 CHECK(n_tokens == 3, EINVAL);
3855 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3856 CHECK(fdst, EINVAL);
3857 CHECK(!fdst->var_size, EINVAL);
3859 /* MOV, MOV_MH, MOV_HM or MOV_HH. */
3860 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3862 CHECK(!fsrc->var_size, EINVAL);
3864 instr->type = INSTR_MOV;
3865 if (dst[0] != 'h' && src[0] == 'h')
3866 instr->type = INSTR_MOV_MH;
3867 if (dst[0] == 'h' && src[0] != 'h')
3868 instr->type = INSTR_MOV_HM;
3869 if (dst[0] == 'h' && src[0] == 'h')
3870 instr->type = INSTR_MOV_HH;
3872 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3873 instr->mov.dst.n_bits = fdst->n_bits;
3874 instr->mov.dst.offset = fdst->offset / 8;
3875 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3876 instr->mov.src.n_bits = fsrc->n_bits;
3877 instr->mov.src.offset = fsrc->offset / 8;
3882 src_val = strtoull(src, &src, 0);
3883 CHECK(!src[0], EINVAL);
3886 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3888 instr->type = INSTR_MOV_I;
3889 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3890 instr->mov.dst.n_bits = fdst->n_bits;
3891 instr->mov.dst.offset = fdst->offset / 8;
3892 instr->mov.src_val = src_val;
3897 instr_mov_exec(struct rte_swx_pipeline *p)
3899 struct thread *t = &p->threads[p->thread_id];
3900 struct instruction *ip = t->ip;
3902 TRACE("[Thread %2u] mov\n",
3912 instr_mov_mh_exec(struct rte_swx_pipeline *p)
3914 struct thread *t = &p->threads[p->thread_id];
3915 struct instruction *ip = t->ip;
3917 TRACE("[Thread %2u] mov (mh)\n",
3927 instr_mov_hm_exec(struct rte_swx_pipeline *p)
3929 struct thread *t = &p->threads[p->thread_id];
3930 struct instruction *ip = t->ip;
3932 TRACE("[Thread %2u] mov (hm)\n",
3942 instr_mov_hh_exec(struct rte_swx_pipeline *p)
3944 struct thread *t = &p->threads[p->thread_id];
3945 struct instruction *ip = t->ip;
3947 TRACE("[Thread %2u] mov (hh)\n",
3957 instr_mov_i_exec(struct rte_swx_pipeline *p)
3959 struct thread *t = &p->threads[p->thread_id];
3960 struct instruction *ip = t->ip;
3962 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3976 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3979 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3981 struct thread *t = &p->threads[p->thread_id];
3982 struct instruction *ip = t->ip;
3983 uint8_t *action_data = t->structs[0];
3984 uint64_t valid_headers = t->valid_headers;
3987 for (i = 0; i < n_dma; i++) {
3988 uint32_t header_id = ip->dma.dst.header_id[i];
3989 uint32_t struct_id = ip->dma.dst.struct_id[i];
3990 uint32_t offset = ip->dma.src.offset[i];
3991 uint32_t n_bytes = ip->dma.n_bytes[i];
3993 struct header_runtime *h = &t->headers[header_id];
3994 uint8_t *h_ptr0 = h->ptr0;
3995 uint8_t *h_ptr = t->structs[struct_id];
3997 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3999 void *src = &action_data[offset];
4001 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
4004 memcpy(dst, src, n_bytes);
4005 t->structs[struct_id] = dst;
4006 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
4009 t->valid_headers = valid_headers;
4013 instr_dma_ht_exec(struct rte_swx_pipeline *p)
4015 __instr_dma_ht_exec(p, 1);
4022 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
4024 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
4027 __instr_dma_ht_exec(p, 2);
4034 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
4036 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
4039 __instr_dma_ht_exec(p, 3);
4046 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
4048 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
4051 __instr_dma_ht_exec(p, 4);
4058 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
4060 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
4063 __instr_dma_ht_exec(p, 5);
4070 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
4072 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
4075 __instr_dma_ht_exec(p, 6);
4082 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
4084 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
4087 __instr_dma_ht_exec(p, 7);
4094 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
4096 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
4099 __instr_dma_ht_exec(p, 8);
4109 instr_alu_add_translate(struct rte_swx_pipeline *p,
4110 struct action *action,
4113 struct instruction *instr,
4114 struct instruction_data *data __rte_unused)
4116 char *dst = tokens[1], *src = tokens[2];
4117 struct field *fdst, *fsrc;
4119 uint32_t dst_struct_id = 0, src_struct_id = 0;
4121 CHECK(n_tokens == 3, EINVAL);
4123 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4124 CHECK(fdst, EINVAL);
4125 CHECK(!fdst->var_size, EINVAL);
4127 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
4128 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4130 CHECK(!fsrc->var_size, EINVAL);
4132 instr->type = INSTR_ALU_ADD;
4133 if (dst[0] == 'h' && src[0] != 'h')
4134 instr->type = INSTR_ALU_ADD_HM;
4135 if (dst[0] != 'h' && src[0] == 'h')
4136 instr->type = INSTR_ALU_ADD_MH;
4137 if (dst[0] == 'h' && src[0] == 'h')
4138 instr->type = INSTR_ALU_ADD_HH;
4140 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4141 instr->alu.dst.n_bits = fdst->n_bits;
4142 instr->alu.dst.offset = fdst->offset / 8;
4143 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4144 instr->alu.src.n_bits = fsrc->n_bits;
4145 instr->alu.src.offset = fsrc->offset / 8;
4149 /* ADD_MI, ADD_HI. */
4150 src_val = strtoull(src, &src, 0);
4151 CHECK(!src[0], EINVAL);
4153 instr->type = INSTR_ALU_ADD_MI;
4155 instr->type = INSTR_ALU_ADD_HI;
4157 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4158 instr->alu.dst.n_bits = fdst->n_bits;
4159 instr->alu.dst.offset = fdst->offset / 8;
4160 instr->alu.src_val = src_val;
4165 instr_alu_sub_translate(struct rte_swx_pipeline *p,
4166 struct action *action,
4169 struct instruction *instr,
4170 struct instruction_data *data __rte_unused)
4172 char *dst = tokens[1], *src = tokens[2];
4173 struct field *fdst, *fsrc;
4175 uint32_t dst_struct_id = 0, src_struct_id = 0;
4177 CHECK(n_tokens == 3, EINVAL);
4179 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4180 CHECK(fdst, EINVAL);
4181 CHECK(!fdst->var_size, EINVAL);
4183 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
4184 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4186 CHECK(!fsrc->var_size, EINVAL);
4188 instr->type = INSTR_ALU_SUB;
4189 if (dst[0] == 'h' && src[0] != 'h')
4190 instr->type = INSTR_ALU_SUB_HM;
4191 if (dst[0] != 'h' && src[0] == 'h')
4192 instr->type = INSTR_ALU_SUB_MH;
4193 if (dst[0] == 'h' && src[0] == 'h')
4194 instr->type = INSTR_ALU_SUB_HH;
4196 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4197 instr->alu.dst.n_bits = fdst->n_bits;
4198 instr->alu.dst.offset = fdst->offset / 8;
4199 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4200 instr->alu.src.n_bits = fsrc->n_bits;
4201 instr->alu.src.offset = fsrc->offset / 8;
4205 /* SUB_MI, SUB_HI. */
4206 src_val = strtoull(src, &src, 0);
4207 CHECK(!src[0], EINVAL);
4209 instr->type = INSTR_ALU_SUB_MI;
4211 instr->type = INSTR_ALU_SUB_HI;
4213 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4214 instr->alu.dst.n_bits = fdst->n_bits;
4215 instr->alu.dst.offset = fdst->offset / 8;
4216 instr->alu.src_val = src_val;
4221 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
4222 struct action *action __rte_unused,
4225 struct instruction *instr,
4226 struct instruction_data *data __rte_unused)
4228 char *dst = tokens[1], *src = tokens[2];
4229 struct header *hdst, *hsrc;
4230 struct field *fdst, *fsrc;
4232 CHECK(n_tokens == 3, EINVAL);
4234 fdst = header_field_parse(p, dst, &hdst);
4235 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4236 CHECK(!fdst->var_size, EINVAL);
4239 fsrc = header_field_parse(p, src, &hsrc);
4241 CHECK(!fsrc->var_size, EINVAL);
4243 instr->type = INSTR_ALU_CKADD_FIELD;
4244 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4245 instr->alu.dst.n_bits = fdst->n_bits;
4246 instr->alu.dst.offset = fdst->offset / 8;
4247 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4248 instr->alu.src.n_bits = fsrc->n_bits;
4249 instr->alu.src.offset = fsrc->offset / 8;
4253 /* CKADD_STRUCT, CKADD_STRUCT20. */
4254 hsrc = header_parse(p, src);
4255 CHECK(hsrc, EINVAL);
4256 CHECK(!hsrc->st->var_size, EINVAL);
4258 instr->type = INSTR_ALU_CKADD_STRUCT;
4259 if ((hsrc->st->n_bits / 8) == 20)
4260 instr->type = INSTR_ALU_CKADD_STRUCT20;
4262 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4263 instr->alu.dst.n_bits = fdst->n_bits;
4264 instr->alu.dst.offset = fdst->offset / 8;
4265 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4266 instr->alu.src.n_bits = hsrc->st->n_bits;
4267 instr->alu.src.offset = 0; /* Unused. */
4272 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
4273 struct action *action __rte_unused,
4276 struct instruction *instr,
4277 struct instruction_data *data __rte_unused)
4279 char *dst = tokens[1], *src = tokens[2];
4280 struct header *hdst, *hsrc;
4281 struct field *fdst, *fsrc;
4283 CHECK(n_tokens == 3, EINVAL);
4285 fdst = header_field_parse(p, dst, &hdst);
4286 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4287 CHECK(!fdst->var_size, EINVAL);
4289 fsrc = header_field_parse(p, src, &hsrc);
4290 CHECK(fsrc, EINVAL);
4291 CHECK(!fsrc->var_size, EINVAL);
4293 instr->type = INSTR_ALU_CKSUB_FIELD;
4294 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4295 instr->alu.dst.n_bits = fdst->n_bits;
4296 instr->alu.dst.offset = fdst->offset / 8;
4297 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4298 instr->alu.src.n_bits = fsrc->n_bits;
4299 instr->alu.src.offset = fsrc->offset / 8;
4304 instr_alu_shl_translate(struct rte_swx_pipeline *p,
4305 struct action *action,
4308 struct instruction *instr,
4309 struct instruction_data *data __rte_unused)
4311 char *dst = tokens[1], *src = tokens[2];
4312 struct field *fdst, *fsrc;
4314 uint32_t dst_struct_id = 0, src_struct_id = 0;
4316 CHECK(n_tokens == 3, EINVAL);
4318 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4319 CHECK(fdst, EINVAL);
4320 CHECK(!fdst->var_size, EINVAL);
4322 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
4323 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4325 CHECK(!fsrc->var_size, EINVAL);
4327 instr->type = INSTR_ALU_SHL;
4328 if (dst[0] == 'h' && src[0] != 'h')
4329 instr->type = INSTR_ALU_SHL_HM;
4330 if (dst[0] != 'h' && src[0] == 'h')
4331 instr->type = INSTR_ALU_SHL_MH;
4332 if (dst[0] == 'h' && src[0] == 'h')
4333 instr->type = INSTR_ALU_SHL_HH;
4335 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4336 instr->alu.dst.n_bits = fdst->n_bits;
4337 instr->alu.dst.offset = fdst->offset / 8;
4338 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4339 instr->alu.src.n_bits = fsrc->n_bits;
4340 instr->alu.src.offset = fsrc->offset / 8;
4344 /* SHL_MI, SHL_HI. */
4345 src_val = strtoull(src, &src, 0);
4346 CHECK(!src[0], EINVAL);
4348 instr->type = INSTR_ALU_SHL_MI;
4350 instr->type = INSTR_ALU_SHL_HI;
4352 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4353 instr->alu.dst.n_bits = fdst->n_bits;
4354 instr->alu.dst.offset = fdst->offset / 8;
4355 instr->alu.src_val = src_val;
4360 instr_alu_shr_translate(struct rte_swx_pipeline *p,
4361 struct action *action,
4364 struct instruction *instr,
4365 struct instruction_data *data __rte_unused)
4367 char *dst = tokens[1], *src = tokens[2];
4368 struct field *fdst, *fsrc;
4370 uint32_t dst_struct_id = 0, src_struct_id = 0;
4372 CHECK(n_tokens == 3, EINVAL);
4374 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4375 CHECK(fdst, EINVAL);
4376 CHECK(!fdst->var_size, EINVAL);
4378 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
4379 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4381 CHECK(!fsrc->var_size, EINVAL);
4383 instr->type = INSTR_ALU_SHR;
4384 if (dst[0] == 'h' && src[0] != 'h')
4385 instr->type = INSTR_ALU_SHR_HM;
4386 if (dst[0] != 'h' && src[0] == 'h')
4387 instr->type = INSTR_ALU_SHR_MH;
4388 if (dst[0] == 'h' && src[0] == 'h')
4389 instr->type = INSTR_ALU_SHR_HH;
4391 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4392 instr->alu.dst.n_bits = fdst->n_bits;
4393 instr->alu.dst.offset = fdst->offset / 8;
4394 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4395 instr->alu.src.n_bits = fsrc->n_bits;
4396 instr->alu.src.offset = fsrc->offset / 8;
4400 /* SHR_MI, SHR_HI. */
4401 src_val = strtoull(src, &src, 0);
4402 CHECK(!src[0], EINVAL);
4404 instr->type = INSTR_ALU_SHR_MI;
4406 instr->type = INSTR_ALU_SHR_HI;
4408 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4409 instr->alu.dst.n_bits = fdst->n_bits;
4410 instr->alu.dst.offset = fdst->offset / 8;
4411 instr->alu.src_val = src_val;
4416 instr_alu_and_translate(struct rte_swx_pipeline *p,
4417 struct action *action,
4420 struct instruction *instr,
4421 struct instruction_data *data __rte_unused)
4423 char *dst = tokens[1], *src = tokens[2];
4424 struct field *fdst, *fsrc;
4426 uint32_t dst_struct_id = 0, src_struct_id = 0;
4428 CHECK(n_tokens == 3, EINVAL);
4430 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4431 CHECK(fdst, EINVAL);
4432 CHECK(!fdst->var_size, EINVAL);
4434 /* AND, AND_MH, AND_HM, AND_HH. */
4435 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4437 CHECK(!fsrc->var_size, EINVAL);
4439 instr->type = INSTR_ALU_AND;
4440 if (dst[0] != 'h' && src[0] == 'h')
4441 instr->type = INSTR_ALU_AND_MH;
4442 if (dst[0] == 'h' && src[0] != 'h')
4443 instr->type = INSTR_ALU_AND_HM;
4444 if (dst[0] == 'h' && src[0] == 'h')
4445 instr->type = INSTR_ALU_AND_HH;
4447 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4448 instr->alu.dst.n_bits = fdst->n_bits;
4449 instr->alu.dst.offset = fdst->offset / 8;
4450 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4451 instr->alu.src.n_bits = fsrc->n_bits;
4452 instr->alu.src.offset = fsrc->offset / 8;
4457 src_val = strtoull(src, &src, 0);
4458 CHECK(!src[0], EINVAL);
4461 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4463 instr->type = INSTR_ALU_AND_I;
4464 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4465 instr->alu.dst.n_bits = fdst->n_bits;
4466 instr->alu.dst.offset = fdst->offset / 8;
4467 instr->alu.src_val = src_val;
4472 instr_alu_or_translate(struct rte_swx_pipeline *p,
4473 struct action *action,
4476 struct instruction *instr,
4477 struct instruction_data *data __rte_unused)
4479 char *dst = tokens[1], *src = tokens[2];
4480 struct field *fdst, *fsrc;
4482 uint32_t dst_struct_id = 0, src_struct_id = 0;
4484 CHECK(n_tokens == 3, EINVAL);
4486 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4487 CHECK(fdst, EINVAL);
4488 CHECK(!fdst->var_size, EINVAL);
4490 /* OR, OR_MH, OR_HM, OR_HH. */
4491 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4493 CHECK(!fsrc->var_size, EINVAL);
4495 instr->type = INSTR_ALU_OR;
4496 if (dst[0] != 'h' && src[0] == 'h')
4497 instr->type = INSTR_ALU_OR_MH;
4498 if (dst[0] == 'h' && src[0] != 'h')
4499 instr->type = INSTR_ALU_OR_HM;
4500 if (dst[0] == 'h' && src[0] == 'h')
4501 instr->type = INSTR_ALU_OR_HH;
4503 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4504 instr->alu.dst.n_bits = fdst->n_bits;
4505 instr->alu.dst.offset = fdst->offset / 8;
4506 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4507 instr->alu.src.n_bits = fsrc->n_bits;
4508 instr->alu.src.offset = fsrc->offset / 8;
4513 src_val = strtoull(src, &src, 0);
4514 CHECK(!src[0], EINVAL);
4517 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4519 instr->type = INSTR_ALU_OR_I;
4520 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4521 instr->alu.dst.n_bits = fdst->n_bits;
4522 instr->alu.dst.offset = fdst->offset / 8;
4523 instr->alu.src_val = src_val;
4528 instr_alu_xor_translate(struct rte_swx_pipeline *p,
4529 struct action *action,
4532 struct instruction *instr,
4533 struct instruction_data *data __rte_unused)
4535 char *dst = tokens[1], *src = tokens[2];
4536 struct field *fdst, *fsrc;
4538 uint32_t dst_struct_id = 0, src_struct_id = 0;
4540 CHECK(n_tokens == 3, EINVAL);
4542 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4543 CHECK(fdst, EINVAL);
4544 CHECK(!fdst->var_size, EINVAL);
4546 /* XOR, XOR_MH, XOR_HM, XOR_HH. */
4547 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4549 CHECK(!fsrc->var_size, EINVAL);
4551 instr->type = INSTR_ALU_XOR;
4552 if (dst[0] != 'h' && src[0] == 'h')
4553 instr->type = INSTR_ALU_XOR_MH;
4554 if (dst[0] == 'h' && src[0] != 'h')
4555 instr->type = INSTR_ALU_XOR_HM;
4556 if (dst[0] == 'h' && src[0] == 'h')
4557 instr->type = INSTR_ALU_XOR_HH;
4559 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4560 instr->alu.dst.n_bits = fdst->n_bits;
4561 instr->alu.dst.offset = fdst->offset / 8;
4562 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4563 instr->alu.src.n_bits = fsrc->n_bits;
4564 instr->alu.src.offset = fsrc->offset / 8;
4569 src_val = strtoull(src, &src, 0);
4570 CHECK(!src[0], EINVAL);
4573 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4575 instr->type = INSTR_ALU_XOR_I;
4576 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4577 instr->alu.dst.n_bits = fdst->n_bits;
4578 instr->alu.dst.offset = fdst->offset / 8;
4579 instr->alu.src_val = src_val;
4584 instr_alu_add_exec(struct rte_swx_pipeline *p)
4586 struct thread *t = &p->threads[p->thread_id];
4587 struct instruction *ip = t->ip;
4589 TRACE("[Thread %2u] add\n", p->thread_id);
4599 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
4601 struct thread *t = &p->threads[p->thread_id];
4602 struct instruction *ip = t->ip;
4604 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
4614 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
4616 struct thread *t = &p->threads[p->thread_id];
4617 struct instruction *ip = t->ip;
4619 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
4629 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
4631 struct thread *t = &p->threads[p->thread_id];
4632 struct instruction *ip = t->ip;
4634 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
4644 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
4646 struct thread *t = &p->threads[p->thread_id];
4647 struct instruction *ip = t->ip;
4649 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
4659 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
4661 struct thread *t = &p->threads[p->thread_id];
4662 struct instruction *ip = t->ip;
4664 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
4674 instr_alu_sub_exec(struct rte_swx_pipeline *p)
4676 struct thread *t = &p->threads[p->thread_id];
4677 struct instruction *ip = t->ip;
4679 TRACE("[Thread %2u] sub\n", p->thread_id);
4689 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4691 struct thread *t = &p->threads[p->thread_id];
4692 struct instruction *ip = t->ip;
4694 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4704 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4706 struct thread *t = &p->threads[p->thread_id];
4707 struct instruction *ip = t->ip;
4709 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4719 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4721 struct thread *t = &p->threads[p->thread_id];
4722 struct instruction *ip = t->ip;
4724 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4734 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4736 struct thread *t = &p->threads[p->thread_id];
4737 struct instruction *ip = t->ip;
4739 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4749 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4751 struct thread *t = &p->threads[p->thread_id];
4752 struct instruction *ip = t->ip;
4754 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4764 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4766 struct thread *t = &p->threads[p->thread_id];
4767 struct instruction *ip = t->ip;
4769 TRACE("[Thread %2u] shl\n", p->thread_id);
4779 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4781 struct thread *t = &p->threads[p->thread_id];
4782 struct instruction *ip = t->ip;
4784 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4794 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4796 struct thread *t = &p->threads[p->thread_id];
4797 struct instruction *ip = t->ip;
4799 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4809 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4811 struct thread *t = &p->threads[p->thread_id];
4812 struct instruction *ip = t->ip;
4814 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4824 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4826 struct thread *t = &p->threads[p->thread_id];
4827 struct instruction *ip = t->ip;
4829 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4839 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4841 struct thread *t = &p->threads[p->thread_id];
4842 struct instruction *ip = t->ip;
4844 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4854 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4856 struct thread *t = &p->threads[p->thread_id];
4857 struct instruction *ip = t->ip;
4859 TRACE("[Thread %2u] shr\n", p->thread_id);
4869 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4871 struct thread *t = &p->threads[p->thread_id];
4872 struct instruction *ip = t->ip;
4874 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4884 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4886 struct thread *t = &p->threads[p->thread_id];
4887 struct instruction *ip = t->ip;
4889 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4899 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4901 struct thread *t = &p->threads[p->thread_id];
4902 struct instruction *ip = t->ip;
4904 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4914 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4916 struct thread *t = &p->threads[p->thread_id];
4917 struct instruction *ip = t->ip;
4919 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4929 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4931 struct thread *t = &p->threads[p->thread_id];
4932 struct instruction *ip = t->ip;
4934 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4944 instr_alu_and_exec(struct rte_swx_pipeline *p)
4946 struct thread *t = &p->threads[p->thread_id];
4947 struct instruction *ip = t->ip;
4949 TRACE("[Thread %2u] and\n", p->thread_id);
4959 instr_alu_and_mh_exec(struct rte_swx_pipeline *p)
4961 struct thread *t = &p->threads[p->thread_id];
4962 struct instruction *ip = t->ip;
4964 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
4974 instr_alu_and_hm_exec(struct rte_swx_pipeline *p)
4976 struct thread *t = &p->threads[p->thread_id];
4977 struct instruction *ip = t->ip;
4979 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
4982 ALU_HM_FAST(t, ip, &);
4989 instr_alu_and_hh_exec(struct rte_swx_pipeline *p)
4991 struct thread *t = &p->threads[p->thread_id];
4992 struct instruction *ip = t->ip;
4994 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
4997 ALU_HH_FAST(t, ip, &);
5004 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
5006 struct thread *t = &p->threads[p->thread_id];
5007 struct instruction *ip = t->ip;
5009 TRACE("[Thread %2u] and (i)\n", p->thread_id);
5019 instr_alu_or_exec(struct rte_swx_pipeline *p)
5021 struct thread *t = &p->threads[p->thread_id];
5022 struct instruction *ip = t->ip;
5024 TRACE("[Thread %2u] or\n", p->thread_id);
5034 instr_alu_or_mh_exec(struct rte_swx_pipeline *p)
5036 struct thread *t = &p->threads[p->thread_id];
5037 struct instruction *ip = t->ip;
5039 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
5049 instr_alu_or_hm_exec(struct rte_swx_pipeline *p)
5051 struct thread *t = &p->threads[p->thread_id];
5052 struct instruction *ip = t->ip;
5054 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
5057 ALU_HM_FAST(t, ip, |);
5064 instr_alu_or_hh_exec(struct rte_swx_pipeline *p)
5066 struct thread *t = &p->threads[p->thread_id];
5067 struct instruction *ip = t->ip;
5069 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
5072 ALU_HH_FAST(t, ip, |);
5079 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
5081 struct thread *t = &p->threads[p->thread_id];
5082 struct instruction *ip = t->ip;
5084 TRACE("[Thread %2u] or (i)\n", p->thread_id);
5094 instr_alu_xor_exec(struct rte_swx_pipeline *p)
5096 struct thread *t = &p->threads[p->thread_id];
5097 struct instruction *ip = t->ip;
5099 TRACE("[Thread %2u] xor\n", p->thread_id);
5109 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p)
5111 struct thread *t = &p->threads[p->thread_id];
5112 struct instruction *ip = t->ip;
5114 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
5124 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p)
5126 struct thread *t = &p->threads[p->thread_id];
5127 struct instruction *ip = t->ip;
5129 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
5132 ALU_HM_FAST(t, ip, ^);
5139 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p)
5141 struct thread *t = &p->threads[p->thread_id];
5142 struct instruction *ip = t->ip;
5144 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
5147 ALU_HH_FAST(t, ip, ^);
5154 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
5156 struct thread *t = &p->threads[p->thread_id];
5157 struct instruction *ip = t->ip;
5159 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
5169 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
5171 struct thread *t = &p->threads[p->thread_id];
5172 struct instruction *ip = t->ip;
5173 uint8_t *dst_struct, *src_struct;
5174 uint16_t *dst16_ptr, dst;
5175 uint64_t *src64_ptr, src64, src64_mask, src;
5178 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
5181 dst_struct = t->structs[ip->alu.dst.struct_id];
5182 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5185 src_struct = t->structs[ip->alu.src.struct_id];
5186 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5188 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5189 src = src64 & src64_mask;
5194 /* The first input (r) is a 16-bit number. The second and the third
5195 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
5196 * three numbers (output r) is a 34-bit number.
5198 r += (src >> 32) + (src & 0xFFFFFFFF);
5200 /* The first input is a 16-bit number. The second input is an 18-bit
5201 * number. In the worst case scenario, the sum of the two numbers is a
5204 r = (r & 0xFFFF) + (r >> 16);
5206 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5207 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
5209 r = (r & 0xFFFF) + (r >> 16);
5211 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5212 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5213 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
5214 * therefore the output r is always a 16-bit number.
5216 r = (r & 0xFFFF) + (r >> 16);
5221 *dst16_ptr = (uint16_t)r;
5228 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
5230 struct thread *t = &p->threads[p->thread_id];
5231 struct instruction *ip = t->ip;
5232 uint8_t *dst_struct, *src_struct;
5233 uint16_t *dst16_ptr, dst;
5234 uint64_t *src64_ptr, src64, src64_mask, src;
5237 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
5240 dst_struct = t->structs[ip->alu.dst.struct_id];
5241 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5244 src_struct = t->structs[ip->alu.src.struct_id];
5245 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5247 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5248 src = src64 & src64_mask;
5253 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
5254 * the following sequence of operations in 2's complement arithmetic:
5255 * a '- b = (a - b) % 0xFFFF.
5257 * In order to prevent an underflow for the below subtraction, in which
5258 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
5259 * minuend), we first add a multiple of the 0xFFFF modulus to the
5260 * minuend. The number we add to the minuend needs to be a 34-bit number
5261 * or higher, so for readability reasons we picked the 36-bit multiple.
5262 * We are effectively turning the 16-bit minuend into a 36-bit number:
5263 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
5265 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
5267 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
5268 * result (the output r) is a 36-bit number.
5270 r -= (src >> 32) + (src & 0xFFFFFFFF);
5272 /* The first input is a 16-bit number. The second input is a 20-bit
5273 * number. Their sum is a 21-bit number.
5275 r = (r & 0xFFFF) + (r >> 16);
5277 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5278 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
5280 r = (r & 0xFFFF) + (r >> 16);
5282 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5283 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5284 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5285 * generated, therefore the output r is always a 16-bit number.
5287 r = (r & 0xFFFF) + (r >> 16);
5292 *dst16_ptr = (uint16_t)r;
5299 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
5301 struct thread *t = &p->threads[p->thread_id];
5302 struct instruction *ip = t->ip;
5303 uint8_t *dst_struct, *src_struct;
5304 uint16_t *dst16_ptr;
5305 uint32_t *src32_ptr;
5308 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
5311 dst_struct = t->structs[ip->alu.dst.struct_id];
5312 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5314 src_struct = t->structs[ip->alu.src.struct_id];
5315 src32_ptr = (uint32_t *)&src_struct[0];
5317 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
5318 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
5319 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
5320 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
5321 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
5323 /* The first input is a 16-bit number. The second input is a 19-bit
5324 * number. Their sum is a 20-bit number.
5326 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5328 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5329 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
5331 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5333 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5334 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5335 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
5336 * generated, therefore the output r is always a 16-bit number.
5338 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5341 r0 = r0 ? r0 : 0xFFFF;
5343 *dst16_ptr = (uint16_t)r0;
5350 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
5352 struct thread *t = &p->threads[p->thread_id];
5353 struct instruction *ip = t->ip;
5354 uint8_t *dst_struct, *src_struct;
5355 uint16_t *dst16_ptr;
5356 uint32_t *src32_ptr;
5360 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
5363 dst_struct = t->structs[ip->alu.dst.struct_id];
5364 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5366 src_struct = t->structs[ip->alu.src.struct_id];
5367 src32_ptr = (uint32_t *)&src_struct[0];
5369 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
5370 * Therefore, in the worst case scenario, a 35-bit number is added to a
5371 * 16-bit number (the input r), so the output r is 36-bit number.
5373 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
5376 /* The first input is a 16-bit number. The second input is a 20-bit
5377 * number. Their sum is a 21-bit number.
5379 r = (r & 0xFFFF) + (r >> 16);
5381 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5382 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
5384 r = (r & 0xFFFF) + (r >> 16);
5386 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5387 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5388 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5389 * generated, therefore the output r is always a 16-bit number.
5391 r = (r & 0xFFFF) + (r >> 16);
5396 *dst16_ptr = (uint16_t)r;
5405 static struct regarray *
5406 regarray_find(struct rte_swx_pipeline *p, const char *name);
5409 instr_regprefetch_translate(struct rte_swx_pipeline *p,
5410 struct action *action,
5413 struct instruction *instr,
5414 struct instruction_data *data __rte_unused)
5416 char *regarray = tokens[1], *idx = tokens[2];
5419 uint32_t idx_struct_id, idx_val;
5421 CHECK(n_tokens == 3, EINVAL);
5423 r = regarray_find(p, regarray);
5426 /* REGPREFETCH_RH, REGPREFETCH_RM. */
5427 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5429 CHECK(!fidx->var_size, EINVAL);
5431 instr->type = INSTR_REGPREFETCH_RM;
5433 instr->type = INSTR_REGPREFETCH_RH;
5435 instr->regarray.regarray_id = r->id;
5436 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5437 instr->regarray.idx.n_bits = fidx->n_bits;
5438 instr->regarray.idx.offset = fidx->offset / 8;
5439 instr->regarray.dstsrc_val = 0; /* Unused. */
5443 /* REGPREFETCH_RI. */
5444 idx_val = strtoul(idx, &idx, 0);
5445 CHECK(!idx[0], EINVAL);
5447 instr->type = INSTR_REGPREFETCH_RI;
5448 instr->regarray.regarray_id = r->id;
5449 instr->regarray.idx_val = idx_val;
5450 instr->regarray.dstsrc_val = 0; /* Unused. */
5455 instr_regrd_translate(struct rte_swx_pipeline *p,
5456 struct action *action,
5459 struct instruction *instr,
5460 struct instruction_data *data __rte_unused)
5462 char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
5464 struct field *fdst, *fidx;
5465 uint32_t dst_struct_id, idx_struct_id, idx_val;
5467 CHECK(n_tokens == 4, EINVAL);
5469 r = regarray_find(p, regarray);
5472 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
5473 CHECK(fdst, EINVAL);
5474 CHECK(!fdst->var_size, EINVAL);
5476 /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
5477 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5479 CHECK(!fidx->var_size, EINVAL);
5481 instr->type = INSTR_REGRD_MRM;
5482 if (dst[0] == 'h' && idx[0] != 'h')
5483 instr->type = INSTR_REGRD_HRM;
5484 if (dst[0] != 'h' && idx[0] == 'h')
5485 instr->type = INSTR_REGRD_MRH;
5486 if (dst[0] == 'h' && idx[0] == 'h')
5487 instr->type = INSTR_REGRD_HRH;
5489 instr->regarray.regarray_id = r->id;
5490 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5491 instr->regarray.idx.n_bits = fidx->n_bits;
5492 instr->regarray.idx.offset = fidx->offset / 8;
5493 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5494 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5495 instr->regarray.dstsrc.offset = fdst->offset / 8;
5499 /* REGRD_MRI, REGRD_HRI. */
5500 idx_val = strtoul(idx, &idx, 0);
5501 CHECK(!idx[0], EINVAL);
5503 instr->type = INSTR_REGRD_MRI;
5505 instr->type = INSTR_REGRD_HRI;
5507 instr->regarray.regarray_id = r->id;
5508 instr->regarray.idx_val = idx_val;
5509 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5510 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5511 instr->regarray.dstsrc.offset = fdst->offset / 8;
5516 instr_regwr_translate(struct rte_swx_pipeline *p,
5517 struct action *action,
5520 struct instruction *instr,
5521 struct instruction_data *data __rte_unused)
5523 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5525 struct field *fidx, *fsrc;
5527 uint32_t idx_struct_id, idx_val, src_struct_id;
5529 CHECK(n_tokens == 4, EINVAL);
5531 r = regarray_find(p, regarray);
5534 /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
5535 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5536 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5538 CHECK(!fidx->var_size, EINVAL);
5539 CHECK(!fsrc->var_size, EINVAL);
5541 instr->type = INSTR_REGWR_RMM;
5542 if (idx[0] == 'h' && src[0] != 'h')
5543 instr->type = INSTR_REGWR_RHM;
5544 if (idx[0] != 'h' && src[0] == 'h')
5545 instr->type = INSTR_REGWR_RMH;
5546 if (idx[0] == 'h' && src[0] == 'h')
5547 instr->type = INSTR_REGWR_RHH;
5549 instr->regarray.regarray_id = r->id;
5550 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5551 instr->regarray.idx.n_bits = fidx->n_bits;
5552 instr->regarray.idx.offset = fidx->offset / 8;
5553 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5554 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5555 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5559 /* REGWR_RHI, REGWR_RMI. */
5560 if (fidx && !fsrc) {
5561 CHECK(!fidx->var_size, EINVAL);
5563 src_val = strtoull(src, &src, 0);
5564 CHECK(!src[0], EINVAL);
5566 instr->type = INSTR_REGWR_RMI;
5568 instr->type = INSTR_REGWR_RHI;
5570 instr->regarray.regarray_id = r->id;
5571 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5572 instr->regarray.idx.n_bits = fidx->n_bits;
5573 instr->regarray.idx.offset = fidx->offset / 8;
5574 instr->regarray.dstsrc_val = src_val;
5578 /* REGWR_RIH, REGWR_RIM. */
5579 if (!fidx && fsrc) {
5580 idx_val = strtoul(idx, &idx, 0);
5581 CHECK(!idx[0], EINVAL);
5583 CHECK(!fsrc->var_size, EINVAL);
5585 instr->type = INSTR_REGWR_RIM;
5587 instr->type = INSTR_REGWR_RIH;
5589 instr->regarray.regarray_id = r->id;
5590 instr->regarray.idx_val = idx_val;
5591 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5592 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5593 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5598 src_val = strtoull(src, &src, 0);
5599 CHECK(!src[0], EINVAL);
5601 idx_val = strtoul(idx, &idx, 0);
5602 CHECK(!idx[0], EINVAL);
5604 instr->type = INSTR_REGWR_RII;
5605 instr->regarray.idx_val = idx_val;
5606 instr->regarray.dstsrc_val = src_val;
5612 instr_regadd_translate(struct rte_swx_pipeline *p,
5613 struct action *action,
5616 struct instruction *instr,
5617 struct instruction_data *data __rte_unused)
5619 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5621 struct field *fidx, *fsrc;
5623 uint32_t idx_struct_id, idx_val, src_struct_id;
5625 CHECK(n_tokens == 4, EINVAL);
5627 r = regarray_find(p, regarray);
5630 /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
5631 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5632 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5634 CHECK(!fidx->var_size, EINVAL);
5635 CHECK(!fsrc->var_size, EINVAL);
5637 instr->type = INSTR_REGADD_RMM;
5638 if (idx[0] == 'h' && src[0] != 'h')
5639 instr->type = INSTR_REGADD_RHM;
5640 if (idx[0] != 'h' && src[0] == 'h')
5641 instr->type = INSTR_REGADD_RMH;
5642 if (idx[0] == 'h' && src[0] == 'h')
5643 instr->type = INSTR_REGADD_RHH;
5645 instr->regarray.regarray_id = r->id;
5646 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5647 instr->regarray.idx.n_bits = fidx->n_bits;
5648 instr->regarray.idx.offset = fidx->offset / 8;
5649 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5650 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5651 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5655 /* REGADD_RHI, REGADD_RMI. */
5656 if (fidx && !fsrc) {
5657 CHECK(!fidx->var_size, EINVAL);
5659 src_val = strtoull(src, &src, 0);
5660 CHECK(!src[0], EINVAL);
5662 instr->type = INSTR_REGADD_RMI;
5664 instr->type = INSTR_REGADD_RHI;
5666 instr->regarray.regarray_id = r->id;
5667 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5668 instr->regarray.idx.n_bits = fidx->n_bits;
5669 instr->regarray.idx.offset = fidx->offset / 8;
5670 instr->regarray.dstsrc_val = src_val;
5674 /* REGADD_RIH, REGADD_RIM. */
5675 if (!fidx && fsrc) {
5676 idx_val = strtoul(idx, &idx, 0);
5677 CHECK(!idx[0], EINVAL);
5679 CHECK(!fsrc->var_size, EINVAL);
5681 instr->type = INSTR_REGADD_RIM;
5683 instr->type = INSTR_REGADD_RIH;
5685 instr->regarray.regarray_id = r->id;
5686 instr->regarray.idx_val = idx_val;
5687 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5688 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5689 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5694 src_val = strtoull(src, &src, 0);
5695 CHECK(!src[0], EINVAL);
5697 idx_val = strtoul(idx, &idx, 0);
5698 CHECK(!idx[0], EINVAL);
5700 instr->type = INSTR_REGADD_RII;
5701 instr->regarray.idx_val = idx_val;
5702 instr->regarray.dstsrc_val = src_val;
5706 static inline uint64_t *
5707 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
5709 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5713 static inline uint64_t
5714 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5716 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5718 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5719 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5720 uint64_t idx64 = *idx64_ptr;
5721 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
5722 uint64_t idx = idx64 & idx64_mask & r->size_mask;
5727 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5729 static inline uint64_t
5730 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5732 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5734 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5735 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5736 uint64_t idx64 = *idx64_ptr;
5737 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
5744 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
5748 static inline uint64_t
5749 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
5751 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5753 uint64_t idx = ip->regarray.idx_val & r->size_mask;
5758 static inline uint64_t
5759 instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
5761 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5762 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5763 uint64_t src64 = *src64_ptr;
5764 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5765 uint64_t src = src64 & src64_mask;
5770 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5772 static inline uint64_t
5773 instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
5775 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5776 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5777 uint64_t src64 = *src64_ptr;
5778 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
5785 #define instr_regarray_src_nbo instr_regarray_src_hbo
5790 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5792 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5793 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5794 uint64_t dst64 = *dst64_ptr;
5795 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5797 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5801 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5804 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5806 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5807 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5808 uint64_t dst64 = *dst64_ptr;
5809 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5811 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
5812 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5817 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
5822 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
5824 struct thread *t = &p->threads[p->thread_id];
5825 struct instruction *ip = t->ip;
5826 uint64_t *regarray, idx;
5828 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
5831 regarray = instr_regarray_regarray(p, ip);
5832 idx = instr_regarray_idx_nbo(p, t, ip);
5833 rte_prefetch0(®array[idx]);
5840 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
5842 struct thread *t = &p->threads[p->thread_id];
5843 struct instruction *ip = t->ip;
5844 uint64_t *regarray, idx;
5846 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
5849 regarray = instr_regarray_regarray(p, ip);
5850 idx = instr_regarray_idx_hbo(p, t, ip);
5851 rte_prefetch0(®array[idx]);
5858 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
5860 struct thread *t = &p->threads[p->thread_id];
5861 struct instruction *ip = t->ip;
5862 uint64_t *regarray, idx;
5864 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
5867 regarray = instr_regarray_regarray(p, ip);
5868 idx = instr_regarray_idx_imm(p, ip);
5869 rte_prefetch0(®array[idx]);
5876 instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
5878 struct thread *t = &p->threads[p->thread_id];
5879 struct instruction *ip = t->ip;
5880 uint64_t *regarray, idx;
5882 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
5885 regarray = instr_regarray_regarray(p, ip);
5886 idx = instr_regarray_idx_nbo(p, t, ip);
5887 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5894 instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
5896 struct thread *t = &p->threads[p->thread_id];
5897 struct instruction *ip = t->ip;
5898 uint64_t *regarray, idx;
5900 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
5903 regarray = instr_regarray_regarray(p, ip);
5904 idx = instr_regarray_idx_hbo(p, t, ip);
5905 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5912 instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
5914 struct thread *t = &p->threads[p->thread_id];
5915 struct instruction *ip = t->ip;
5916 uint64_t *regarray, idx;
5918 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
5921 regarray = instr_regarray_regarray(p, ip);
5922 idx = instr_regarray_idx_nbo(p, t, ip);
5923 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5930 instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
5932 struct thread *t = &p->threads[p->thread_id];
5933 struct instruction *ip = t->ip;
5934 uint64_t *regarray, idx;
5937 regarray = instr_regarray_regarray(p, ip);
5938 idx = instr_regarray_idx_hbo(p, t, ip);
5939 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5946 instr_regrd_hri_exec(struct rte_swx_pipeline *p)
5948 struct thread *t = &p->threads[p->thread_id];
5949 struct instruction *ip = t->ip;
5950 uint64_t *regarray, idx;
5952 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
5955 regarray = instr_regarray_regarray(p, ip);
5956 idx = instr_regarray_idx_imm(p, ip);
5957 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5964 instr_regrd_mri_exec(struct rte_swx_pipeline *p)
5966 struct thread *t = &p->threads[p->thread_id];
5967 struct instruction *ip = t->ip;
5968 uint64_t *regarray, idx;
5970 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
5973 regarray = instr_regarray_regarray(p, ip);
5974 idx = instr_regarray_idx_imm(p, ip);
5975 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5982 instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
5984 struct thread *t = &p->threads[p->thread_id];
5985 struct instruction *ip = t->ip;
5986 uint64_t *regarray, idx, src;
5988 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
5991 regarray = instr_regarray_regarray(p, ip);
5992 idx = instr_regarray_idx_nbo(p, t, ip);
5993 src = instr_regarray_src_nbo(t, ip);
5994 regarray[idx] = src;
6001 instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
6003 struct thread *t = &p->threads[p->thread_id];
6004 struct instruction *ip = t->ip;
6005 uint64_t *regarray, idx, src;
6007 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
6010 regarray = instr_regarray_regarray(p, ip);
6011 idx = instr_regarray_idx_nbo(p, t, ip);
6012 src = instr_regarray_src_hbo(t, ip);
6013 regarray[idx] = src;
6020 instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
6022 struct thread *t = &p->threads[p->thread_id];
6023 struct instruction *ip = t->ip;
6024 uint64_t *regarray, idx, src;
6026 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
6029 regarray = instr_regarray_regarray(p, ip);
6030 idx = instr_regarray_idx_hbo(p, t, ip);
6031 src = instr_regarray_src_nbo(t, ip);
6032 regarray[idx] = src;
6039 instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
6041 struct thread *t = &p->threads[p->thread_id];
6042 struct instruction *ip = t->ip;
6043 uint64_t *regarray, idx, src;
6045 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
6048 regarray = instr_regarray_regarray(p, ip);
6049 idx = instr_regarray_idx_hbo(p, t, ip);
6050 src = instr_regarray_src_hbo(t, ip);
6051 regarray[idx] = src;
6058 instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
6060 struct thread *t = &p->threads[p->thread_id];
6061 struct instruction *ip = t->ip;
6062 uint64_t *regarray, idx, src;
6064 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
6067 regarray = instr_regarray_regarray(p, ip);
6068 idx = instr_regarray_idx_nbo(p, t, ip);
6069 src = ip->regarray.dstsrc_val;
6070 regarray[idx] = src;
6077 instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
6079 struct thread *t = &p->threads[p->thread_id];
6080 struct instruction *ip = t->ip;
6081 uint64_t *regarray, idx, src;
6083 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
6086 regarray = instr_regarray_regarray(p, ip);
6087 idx = instr_regarray_idx_hbo(p, t, ip);
6088 src = ip->regarray.dstsrc_val;
6089 regarray[idx] = src;
6096 instr_regwr_rih_exec(struct rte_swx_pipeline *p)
6098 struct thread *t = &p->threads[p->thread_id];
6099 struct instruction *ip = t->ip;
6100 uint64_t *regarray, idx, src;
6102 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
6105 regarray = instr_regarray_regarray(p, ip);
6106 idx = instr_regarray_idx_imm(p, ip);
6107 src = instr_regarray_src_nbo(t, ip);
6108 regarray[idx] = src;
6115 instr_regwr_rim_exec(struct rte_swx_pipeline *p)
6117 struct thread *t = &p->threads[p->thread_id];
6118 struct instruction *ip = t->ip;
6119 uint64_t *regarray, idx, src;
6121 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
6124 regarray = instr_regarray_regarray(p, ip);
6125 idx = instr_regarray_idx_imm(p, ip);
6126 src = instr_regarray_src_hbo(t, ip);
6127 regarray[idx] = src;
6134 instr_regwr_rii_exec(struct rte_swx_pipeline *p)
6136 struct thread *t = &p->threads[p->thread_id];
6137 struct instruction *ip = t->ip;
6138 uint64_t *regarray, idx, src;
6140 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
6143 regarray = instr_regarray_regarray(p, ip);
6144 idx = instr_regarray_idx_imm(p, ip);
6145 src = ip->regarray.dstsrc_val;
6146 regarray[idx] = src;
6153 instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
6155 struct thread *t = &p->threads[p->thread_id];
6156 struct instruction *ip = t->ip;
6157 uint64_t *regarray, idx, src;
6159 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
6162 regarray = instr_regarray_regarray(p, ip);
6163 idx = instr_regarray_idx_nbo(p, t, ip);
6164 src = instr_regarray_src_nbo(t, ip);
6165 regarray[idx] += src;
6172 instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
6174 struct thread *t = &p->threads[p->thread_id];
6175 struct instruction *ip = t->ip;
6176 uint64_t *regarray, idx, src;
6178 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
6181 regarray = instr_regarray_regarray(p, ip);
6182 idx = instr_regarray_idx_nbo(p, t, ip);
6183 src = instr_regarray_src_hbo(t, ip);
6184 regarray[idx] += src;
6191 instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
6193 struct thread *t = &p->threads[p->thread_id];
6194 struct instruction *ip = t->ip;
6195 uint64_t *regarray, idx, src;
6197 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
6200 regarray = instr_regarray_regarray(p, ip);
6201 idx = instr_regarray_idx_hbo(p, t, ip);
6202 src = instr_regarray_src_nbo(t, ip);
6203 regarray[idx] += src;
6210 instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
6212 struct thread *t = &p->threads[p->thread_id];
6213 struct instruction *ip = t->ip;
6214 uint64_t *regarray, idx, src;
6216 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
6219 regarray = instr_regarray_regarray(p, ip);
6220 idx = instr_regarray_idx_hbo(p, t, ip);
6221 src = instr_regarray_src_hbo(t, ip);
6222 regarray[idx] += src;
6229 instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
6231 struct thread *t = &p->threads[p->thread_id];
6232 struct instruction *ip = t->ip;
6233 uint64_t *regarray, idx, src;
6235 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
6238 regarray = instr_regarray_regarray(p, ip);
6239 idx = instr_regarray_idx_nbo(p, t, ip);
6240 src = ip->regarray.dstsrc_val;
6241 regarray[idx] += src;
6248 instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
6250 struct thread *t = &p->threads[p->thread_id];
6251 struct instruction *ip = t->ip;
6252 uint64_t *regarray, idx, src;
6254 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
6257 regarray = instr_regarray_regarray(p, ip);
6258 idx = instr_regarray_idx_hbo(p, t, ip);
6259 src = ip->regarray.dstsrc_val;
6260 regarray[idx] += src;
6267 instr_regadd_rih_exec(struct rte_swx_pipeline *p)
6269 struct thread *t = &p->threads[p->thread_id];
6270 struct instruction *ip = t->ip;
6271 uint64_t *regarray, idx, src;
6273 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
6276 regarray = instr_regarray_regarray(p, ip);
6277 idx = instr_regarray_idx_imm(p, ip);
6278 src = instr_regarray_src_nbo(t, ip);
6279 regarray[idx] += src;
6286 instr_regadd_rim_exec(struct rte_swx_pipeline *p)
6288 struct thread *t = &p->threads[p->thread_id];
6289 struct instruction *ip = t->ip;
6290 uint64_t *regarray, idx, src;
6292 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
6295 regarray = instr_regarray_regarray(p, ip);
6296 idx = instr_regarray_idx_imm(p, ip);
6297 src = instr_regarray_src_hbo(t, ip);
6298 regarray[idx] += src;
6305 instr_regadd_rii_exec(struct rte_swx_pipeline *p)
6307 struct thread *t = &p->threads[p->thread_id];
6308 struct instruction *ip = t->ip;
6309 uint64_t *regarray, idx, src;
6311 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
6314 regarray = instr_regarray_regarray(p, ip);
6315 idx = instr_regarray_idx_imm(p, ip);
6316 src = ip->regarray.dstsrc_val;
6317 regarray[idx] += src;
6326 static struct metarray *
6327 metarray_find(struct rte_swx_pipeline *p, const char *name);
6330 instr_metprefetch_translate(struct rte_swx_pipeline *p,
6331 struct action *action,
6334 struct instruction *instr,
6335 struct instruction_data *data __rte_unused)
6337 char *metarray = tokens[1], *idx = tokens[2];
6340 uint32_t idx_struct_id, idx_val;
6342 CHECK(n_tokens == 3, EINVAL);
6344 m = metarray_find(p, metarray);
6347 /* METPREFETCH_H, METPREFETCH_M. */
6348 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6350 CHECK(!fidx->var_size, EINVAL);
6352 instr->type = INSTR_METPREFETCH_M;
6354 instr->type = INSTR_METPREFETCH_H;
6356 instr->meter.metarray_id = m->id;
6357 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6358 instr->meter.idx.n_bits = fidx->n_bits;
6359 instr->meter.idx.offset = fidx->offset / 8;
6363 /* METPREFETCH_I. */
6364 idx_val = strtoul(idx, &idx, 0);
6365 CHECK(!idx[0], EINVAL);
6367 instr->type = INSTR_METPREFETCH_I;
6368 instr->meter.metarray_id = m->id;
6369 instr->meter.idx_val = idx_val;
6374 instr_meter_translate(struct rte_swx_pipeline *p,
6375 struct action *action,
6378 struct instruction *instr,
6379 struct instruction_data *data __rte_unused)
6381 char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
6382 char *color_in = tokens[4], *color_out = tokens[5];
6384 struct field *fidx, *flength, *fcin, *fcout;
6385 uint32_t idx_struct_id, length_struct_id;
6386 uint32_t color_in_struct_id, color_out_struct_id;
6388 CHECK(n_tokens == 6, EINVAL);
6390 m = metarray_find(p, metarray);
6393 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6395 flength = struct_field_parse(p, action, length, &length_struct_id);
6396 CHECK(flength, EINVAL);
6397 CHECK(!flength->var_size, EINVAL);
6399 fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
6401 fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
6402 CHECK(fcout, EINVAL);
6403 CHECK(!fcout->var_size, EINVAL);
6405 /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
6407 CHECK(!fidx->var_size, EINVAL);
6408 CHECK(!fcin->var_size, EINVAL);
6410 instr->type = INSTR_METER_MMM;
6411 if (idx[0] == 'h' && length[0] == 'h')
6412 instr->type = INSTR_METER_HHM;
6413 if (idx[0] == 'h' && length[0] != 'h')
6414 instr->type = INSTR_METER_HMM;
6415 if (idx[0] != 'h' && length[0] == 'h')
6416 instr->type = INSTR_METER_MHM;
6418 instr->meter.metarray_id = m->id;
6420 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6421 instr->meter.idx.n_bits = fidx->n_bits;
6422 instr->meter.idx.offset = fidx->offset / 8;
6424 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6425 instr->meter.length.n_bits = flength->n_bits;
6426 instr->meter.length.offset = flength->offset / 8;
6428 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6429 instr->meter.color_in.n_bits = fcin->n_bits;
6430 instr->meter.color_in.offset = fcin->offset / 8;
6432 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6433 instr->meter.color_out.n_bits = fcout->n_bits;
6434 instr->meter.color_out.offset = fcout->offset / 8;
6439 /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
6440 if (fidx && !fcin) {
6441 uint32_t color_in_val;
6443 CHECK(!fidx->var_size, EINVAL);
6445 color_in_val = strtoul(color_in, &color_in, 0);
6446 CHECK(!color_in[0], EINVAL);
6448 instr->type = INSTR_METER_MMI;
6449 if (idx[0] == 'h' && length[0] == 'h')
6450 instr->type = INSTR_METER_HHI;
6451 if (idx[0] == 'h' && length[0] != 'h')
6452 instr->type = INSTR_METER_HMI;
6453 if (idx[0] != 'h' && length[0] == 'h')
6454 instr->type = INSTR_METER_MHI;
6456 instr->meter.metarray_id = m->id;
6458 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6459 instr->meter.idx.n_bits = fidx->n_bits;
6460 instr->meter.idx.offset = fidx->offset / 8;
6462 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6463 instr->meter.length.n_bits = flength->n_bits;
6464 instr->meter.length.offset = flength->offset / 8;
6466 instr->meter.color_in_val = color_in_val;
6468 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6469 instr->meter.color_out.n_bits = fcout->n_bits;
6470 instr->meter.color_out.offset = fcout->offset / 8;
6475 /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
6476 if (!fidx && fcin) {
6479 idx_val = strtoul(idx, &idx, 0);
6480 CHECK(!idx[0], EINVAL);
6482 CHECK(!fcin->var_size, EINVAL);
6484 instr->type = INSTR_METER_IMM;
6485 if (length[0] == 'h')
6486 instr->type = INSTR_METER_IHM;
6488 instr->meter.metarray_id = m->id;
6490 instr->meter.idx_val = idx_val;
6492 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6493 instr->meter.length.n_bits = flength->n_bits;
6494 instr->meter.length.offset = flength->offset / 8;
6496 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6497 instr->meter.color_in.n_bits = fcin->n_bits;
6498 instr->meter.color_in.offset = fcin->offset / 8;
6500 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6501 instr->meter.color_out.n_bits = fcout->n_bits;
6502 instr->meter.color_out.offset = fcout->offset / 8;
6507 /* index = I, length = HMEFT, color_in = I, color_out = MEF. */
6508 if (!fidx && !fcin) {
6509 uint32_t idx_val, color_in_val;
6511 idx_val = strtoul(idx, &idx, 0);
6512 CHECK(!idx[0], EINVAL);
6514 color_in_val = strtoul(color_in, &color_in, 0);
6515 CHECK(!color_in[0], EINVAL);
6517 instr->type = INSTR_METER_IMI;
6518 if (length[0] == 'h')
6519 instr->type = INSTR_METER_IHI;
6521 instr->meter.metarray_id = m->id;
6523 instr->meter.idx_val = idx_val;
6525 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6526 instr->meter.length.n_bits = flength->n_bits;
6527 instr->meter.length.offset = flength->offset / 8;
6529 instr->meter.color_in_val = color_in_val;
6531 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6532 instr->meter.color_out.n_bits = fcout->n_bits;
6533 instr->meter.color_out.offset = fcout->offset / 8;
6541 static inline struct meter *
6542 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6544 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6546 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6547 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6548 uint64_t idx64 = *idx64_ptr;
6549 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
6550 uint64_t idx = idx64 & idx64_mask & r->size_mask;
6552 return &r->metarray[idx];
6555 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6557 static inline struct meter *
6558 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6560 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6562 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6563 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6564 uint64_t idx64 = *idx64_ptr;
6565 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
6567 return &r->metarray[idx];
6572 #define instr_meter_idx_nbo instr_meter_idx_hbo
6576 static inline struct meter *
6577 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
6579 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6581 uint64_t idx = ip->meter.idx_val & r->size_mask;
6583 return &r->metarray[idx];
6586 static inline uint32_t
6587 instr_meter_length_hbo(struct thread *t, struct instruction *ip)
6589 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6590 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6591 uint64_t src64 = *src64_ptr;
6592 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
6593 uint64_t src = src64 & src64_mask;
6595 return (uint32_t)src;
6598 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6600 static inline uint32_t
6601 instr_meter_length_nbo(struct thread *t, struct instruction *ip)
6603 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6604 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6605 uint64_t src64 = *src64_ptr;
6606 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
6608 return (uint32_t)src;
6613 #define instr_meter_length_nbo instr_meter_length_hbo
6617 static inline enum rte_color
6618 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
6620 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
6621 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
6622 uint64_t src64 = *src64_ptr;
6623 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
6624 uint64_t src = src64 & src64_mask;
6626 return (enum rte_color)src;
6630 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
6632 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
6633 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
6634 uint64_t dst64 = *dst64_ptr;
6635 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
6637 uint64_t src = (uint64_t)color_out;
6639 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
6643 instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
6645 struct thread *t = &p->threads[p->thread_id];
6646 struct instruction *ip = t->ip;
6649 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
6652 m = instr_meter_idx_nbo(p, t, ip);
6660 instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
6662 struct thread *t = &p->threads[p->thread_id];
6663 struct instruction *ip = t->ip;
6666 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
6669 m = instr_meter_idx_hbo(p, t, ip);
6677 instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
6679 struct thread *t = &p->threads[p->thread_id];
6680 struct instruction *ip = t->ip;
6683 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
6686 m = instr_meter_idx_imm(p, ip);
6694 instr_meter_hhm_exec(struct rte_swx_pipeline *p)
6696 struct thread *t = &p->threads[p->thread_id];
6697 struct instruction *ip = t->ip;
6699 uint64_t time, n_pkts, n_bytes;
6701 enum rte_color color_in, color_out;
6703 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
6706 m = instr_meter_idx_nbo(p, t, ip);
6707 rte_prefetch0(m->n_pkts);
6708 time = rte_get_tsc_cycles();
6709 length = instr_meter_length_nbo(t, ip);
6710 color_in = instr_meter_color_in_hbo(t, ip);
6712 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6713 &m->profile->profile,
6718 color_out &= m->color_mask;
6720 n_pkts = m->n_pkts[color_out];
6721 n_bytes = m->n_bytes[color_out];
6723 instr_meter_color_out_hbo_set(t, ip, color_out);
6725 m->n_pkts[color_out] = n_pkts + 1;
6726 m->n_bytes[color_out] = n_bytes + length;
6733 instr_meter_hhi_exec(struct rte_swx_pipeline *p)
6735 struct thread *t = &p->threads[p->thread_id];
6736 struct instruction *ip = t->ip;
6738 uint64_t time, n_pkts, n_bytes;
6740 enum rte_color color_in, color_out;
6742 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
6745 m = instr_meter_idx_nbo(p, t, ip);
6746 rte_prefetch0(m->n_pkts);
6747 time = rte_get_tsc_cycles();
6748 length = instr_meter_length_nbo(t, ip);
6749 color_in = (enum rte_color)ip->meter.color_in_val;
6751 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6752 &m->profile->profile,
6757 color_out &= m->color_mask;
6759 n_pkts = m->n_pkts[color_out];
6760 n_bytes = m->n_bytes[color_out];
6762 instr_meter_color_out_hbo_set(t, ip, color_out);
6764 m->n_pkts[color_out] = n_pkts + 1;
6765 m->n_bytes[color_out] = n_bytes + length;
6772 instr_meter_hmm_exec(struct rte_swx_pipeline *p)
6774 struct thread *t = &p->threads[p->thread_id];
6775 struct instruction *ip = t->ip;
6777 uint64_t time, n_pkts, n_bytes;
6779 enum rte_color color_in, color_out;
6781 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
6784 m = instr_meter_idx_nbo(p, t, ip);
6785 rte_prefetch0(m->n_pkts);
6786 time = rte_get_tsc_cycles();
6787 length = instr_meter_length_hbo(t, ip);
6788 color_in = instr_meter_color_in_hbo(t, ip);
6790 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6791 &m->profile->profile,
6796 color_out &= m->color_mask;
6798 n_pkts = m->n_pkts[color_out];
6799 n_bytes = m->n_bytes[color_out];
6801 instr_meter_color_out_hbo_set(t, ip, color_out);
6803 m->n_pkts[color_out] = n_pkts + 1;
6804 m->n_bytes[color_out] = n_bytes + length;
6810 instr_meter_hmi_exec(struct rte_swx_pipeline *p)
6812 struct thread *t = &p->threads[p->thread_id];
6813 struct instruction *ip = t->ip;
6815 uint64_t time, n_pkts, n_bytes;
6817 enum rte_color color_in, color_out;
6819 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
6822 m = instr_meter_idx_nbo(p, t, ip);
6823 rte_prefetch0(m->n_pkts);
6824 time = rte_get_tsc_cycles();
6825 length = instr_meter_length_hbo(t, ip);
6826 color_in = (enum rte_color)ip->meter.color_in_val;
6828 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6829 &m->profile->profile,
6834 color_out &= m->color_mask;
6836 n_pkts = m->n_pkts[color_out];
6837 n_bytes = m->n_bytes[color_out];
6839 instr_meter_color_out_hbo_set(t, ip, color_out);
6841 m->n_pkts[color_out] = n_pkts + 1;
6842 m->n_bytes[color_out] = n_bytes + length;
6849 instr_meter_mhm_exec(struct rte_swx_pipeline *p)
6851 struct thread *t = &p->threads[p->thread_id];
6852 struct instruction *ip = t->ip;
6854 uint64_t time, n_pkts, n_bytes;
6856 enum rte_color color_in, color_out;
6858 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
6861 m = instr_meter_idx_hbo(p, t, ip);
6862 rte_prefetch0(m->n_pkts);
6863 time = rte_get_tsc_cycles();
6864 length = instr_meter_length_nbo(t, ip);
6865 color_in = instr_meter_color_in_hbo(t, ip);
6867 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6868 &m->profile->profile,
6873 color_out &= m->color_mask;
6875 n_pkts = m->n_pkts[color_out];
6876 n_bytes = m->n_bytes[color_out];
6878 instr_meter_color_out_hbo_set(t, ip, color_out);
6880 m->n_pkts[color_out] = n_pkts + 1;
6881 m->n_bytes[color_out] = n_bytes + length;
6888 instr_meter_mhi_exec(struct rte_swx_pipeline *p)
6890 struct thread *t = &p->threads[p->thread_id];
6891 struct instruction *ip = t->ip;
6893 uint64_t time, n_pkts, n_bytes;
6895 enum rte_color color_in, color_out;
6897 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
6900 m = instr_meter_idx_hbo(p, t, ip);
6901 rte_prefetch0(m->n_pkts);
6902 time = rte_get_tsc_cycles();
6903 length = instr_meter_length_nbo(t, ip);
6904 color_in = (enum rte_color)ip->meter.color_in_val;
6906 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6907 &m->profile->profile,
6912 color_out &= m->color_mask;
6914 n_pkts = m->n_pkts[color_out];
6915 n_bytes = m->n_bytes[color_out];
6917 instr_meter_color_out_hbo_set(t, ip, color_out);
6919 m->n_pkts[color_out] = n_pkts + 1;
6920 m->n_bytes[color_out] = n_bytes + length;
6927 instr_meter_mmm_exec(struct rte_swx_pipeline *p)
6929 struct thread *t = &p->threads[p->thread_id];
6930 struct instruction *ip = t->ip;
6932 uint64_t time, n_pkts, n_bytes;
6934 enum rte_color color_in, color_out;
6936 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
6939 m = instr_meter_idx_hbo(p, t, ip);
6940 rte_prefetch0(m->n_pkts);
6941 time = rte_get_tsc_cycles();
6942 length = instr_meter_length_hbo(t, ip);
6943 color_in = instr_meter_color_in_hbo(t, ip);
6945 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6946 &m->profile->profile,
6951 color_out &= m->color_mask;
6953 n_pkts = m->n_pkts[color_out];
6954 n_bytes = m->n_bytes[color_out];
6956 instr_meter_color_out_hbo_set(t, ip, color_out);
6958 m->n_pkts[color_out] = n_pkts + 1;
6959 m->n_bytes[color_out] = n_bytes + length;
6966 instr_meter_mmi_exec(struct rte_swx_pipeline *p)
6968 struct thread *t = &p->threads[p->thread_id];
6969 struct instruction *ip = t->ip;
6971 uint64_t time, n_pkts, n_bytes;
6973 enum rte_color color_in, color_out;
6975 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
6978 m = instr_meter_idx_hbo(p, t, ip);
6979 rte_prefetch0(m->n_pkts);
6980 time = rte_get_tsc_cycles();
6981 length = instr_meter_length_hbo(t, ip);
6982 color_in = (enum rte_color)ip->meter.color_in_val;
6984 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6985 &m->profile->profile,
6990 color_out &= m->color_mask;
6992 n_pkts = m->n_pkts[color_out];
6993 n_bytes = m->n_bytes[color_out];
6995 instr_meter_color_out_hbo_set(t, ip, color_out);
6997 m->n_pkts[color_out] = n_pkts + 1;
6998 m->n_bytes[color_out] = n_bytes + length;
7005 instr_meter_ihm_exec(struct rte_swx_pipeline *p)
7007 struct thread *t = &p->threads[p->thread_id];
7008 struct instruction *ip = t->ip;
7010 uint64_t time, n_pkts, n_bytes;
7012 enum rte_color color_in, color_out;
7014 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
7017 m = instr_meter_idx_imm(p, ip);
7018 rte_prefetch0(m->n_pkts);
7019 time = rte_get_tsc_cycles();
7020 length = instr_meter_length_nbo(t, ip);
7021 color_in = instr_meter_color_in_hbo(t, ip);
7023 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7024 &m->profile->profile,
7029 color_out &= m->color_mask;
7031 n_pkts = m->n_pkts[color_out];
7032 n_bytes = m->n_bytes[color_out];
7034 instr_meter_color_out_hbo_set(t, ip, color_out);
7036 m->n_pkts[color_out] = n_pkts + 1;
7037 m->n_bytes[color_out] = n_bytes + length;
7044 instr_meter_ihi_exec(struct rte_swx_pipeline *p)
7046 struct thread *t = &p->threads[p->thread_id];
7047 struct instruction *ip = t->ip;
7049 uint64_t time, n_pkts, n_bytes;
7051 enum rte_color color_in, color_out;
7053 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
7056 m = instr_meter_idx_imm(p, ip);
7057 rte_prefetch0(m->n_pkts);
7058 time = rte_get_tsc_cycles();
7059 length = instr_meter_length_nbo(t, ip);
7060 color_in = (enum rte_color)ip->meter.color_in_val;
7062 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7063 &m->profile->profile,
7068 color_out &= m->color_mask;
7070 n_pkts = m->n_pkts[color_out];
7071 n_bytes = m->n_bytes[color_out];
7073 instr_meter_color_out_hbo_set(t, ip, color_out);
7075 m->n_pkts[color_out] = n_pkts + 1;
7076 m->n_bytes[color_out] = n_bytes + length;
7083 instr_meter_imm_exec(struct rte_swx_pipeline *p)
7085 struct thread *t = &p->threads[p->thread_id];
7086 struct instruction *ip = t->ip;
7088 uint64_t time, n_pkts, n_bytes;
7090 enum rte_color color_in, color_out;
7092 TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
7095 m = instr_meter_idx_imm(p, ip);
7096 rte_prefetch0(m->n_pkts);
7097 time = rte_get_tsc_cycles();
7098 length = instr_meter_length_hbo(t, ip);
7099 color_in = instr_meter_color_in_hbo(t, ip);
7101 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7102 &m->profile->profile,
7107 color_out &= m->color_mask;
7109 n_pkts = m->n_pkts[color_out];
7110 n_bytes = m->n_bytes[color_out];
7112 instr_meter_color_out_hbo_set(t, ip, color_out);
7114 m->n_pkts[color_out] = n_pkts + 1;
7115 m->n_bytes[color_out] = n_bytes + length;
7121 instr_meter_imi_exec(struct rte_swx_pipeline *p)
7123 struct thread *t = &p->threads[p->thread_id];
7124 struct instruction *ip = t->ip;
7126 uint64_t time, n_pkts, n_bytes;
7128 enum rte_color color_in, color_out;
7130 TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
7133 m = instr_meter_idx_imm(p, ip);
7134 rte_prefetch0(m->n_pkts);
7135 time = rte_get_tsc_cycles();
7136 length = instr_meter_length_hbo(t, ip);
7137 color_in = (enum rte_color)ip->meter.color_in_val;
7139 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7140 &m->profile->profile,
7145 color_out &= m->color_mask;
7147 n_pkts = m->n_pkts[color_out];
7148 n_bytes = m->n_bytes[color_out];
7150 instr_meter_color_out_hbo_set(t, ip, color_out);
7152 m->n_pkts[color_out] = n_pkts + 1;
7153 m->n_bytes[color_out] = n_bytes + length;
7162 static struct action *
7163 action_find(struct rte_swx_pipeline *p, const char *name);
7166 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
7167 struct action *action __rte_unused,
7170 struct instruction *instr,
7171 struct instruction_data *data)
7173 CHECK(n_tokens == 2, EINVAL);
7175 strcpy(data->jmp_label, tokens[1]);
7177 instr->type = INSTR_JMP;
7178 instr->jmp.ip = NULL; /* Resolved later. */
7183 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
7184 struct action *action __rte_unused,
7187 struct instruction *instr,
7188 struct instruction_data *data)
7192 CHECK(n_tokens == 3, EINVAL);
7194 strcpy(data->jmp_label, tokens[1]);
7196 h = header_parse(p, tokens[2]);
7199 instr->type = INSTR_JMP_VALID;
7200 instr->jmp.ip = NULL; /* Resolved later. */
7201 instr->jmp.header_id = h->id;
7206 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
7207 struct action *action __rte_unused,
7210 struct instruction *instr,
7211 struct instruction_data *data)
7215 CHECK(n_tokens == 3, EINVAL);
7217 strcpy(data->jmp_label, tokens[1]);
7219 h = header_parse(p, tokens[2]);
7222 instr->type = INSTR_JMP_INVALID;
7223 instr->jmp.ip = NULL; /* Resolved later. */
7224 instr->jmp.header_id = h->id;
7229 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
7230 struct action *action,
7233 struct instruction *instr,
7234 struct instruction_data *data)
7236 CHECK(!action, EINVAL);
7237 CHECK(n_tokens == 2, EINVAL);
7239 strcpy(data->jmp_label, tokens[1]);
7241 instr->type = INSTR_JMP_HIT;
7242 instr->jmp.ip = NULL; /* Resolved later. */
7247 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
7248 struct action *action,
7251 struct instruction *instr,
7252 struct instruction_data *data)
7254 CHECK(!action, EINVAL);
7255 CHECK(n_tokens == 2, EINVAL);
7257 strcpy(data->jmp_label, tokens[1]);
7259 instr->type = INSTR_JMP_MISS;
7260 instr->jmp.ip = NULL; /* Resolved later. */
7265 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
7266 struct action *action,
7269 struct instruction *instr,
7270 struct instruction_data *data)
7274 CHECK(!action, EINVAL);
7275 CHECK(n_tokens == 3, EINVAL);
7277 strcpy(data->jmp_label, tokens[1]);
7279 a = action_find(p, tokens[2]);
7282 instr->type = INSTR_JMP_ACTION_HIT;
7283 instr->jmp.ip = NULL; /* Resolved later. */
7284 instr->jmp.action_id = a->id;
7289 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
7290 struct action *action,
7293 struct instruction *instr,
7294 struct instruction_data *data)
7298 CHECK(!action, EINVAL);
7299 CHECK(n_tokens == 3, EINVAL);
7301 strcpy(data->jmp_label, tokens[1]);
7303 a = action_find(p, tokens[2]);
7306 instr->type = INSTR_JMP_ACTION_MISS;
7307 instr->jmp.ip = NULL; /* Resolved later. */
7308 instr->jmp.action_id = a->id;
7313 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
7314 struct action *action,
7317 struct instruction *instr,
7318 struct instruction_data *data)
7320 char *a = tokens[2], *b = tokens[3];
7321 struct field *fa, *fb;
7323 uint32_t a_struct_id, b_struct_id;
7325 CHECK(n_tokens == 4, EINVAL);
7327 strcpy(data->jmp_label, tokens[1]);
7329 fa = struct_field_parse(p, action, a, &a_struct_id);
7331 CHECK(!fa->var_size, EINVAL);
7333 /* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */
7334 fb = struct_field_parse(p, action, b, &b_struct_id);
7336 CHECK(!fb->var_size, EINVAL);
7338 instr->type = INSTR_JMP_EQ;
7339 if (a[0] != 'h' && b[0] == 'h')
7340 instr->type = INSTR_JMP_EQ_MH;
7341 if (a[0] == 'h' && b[0] != 'h')
7342 instr->type = INSTR_JMP_EQ_HM;
7343 if (a[0] == 'h' && b[0] == 'h')
7344 instr->type = INSTR_JMP_EQ_HH;
7345 instr->jmp.ip = NULL; /* Resolved later. */
7347 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7348 instr->jmp.a.n_bits = fa->n_bits;
7349 instr->jmp.a.offset = fa->offset / 8;
7350 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7351 instr->jmp.b.n_bits = fb->n_bits;
7352 instr->jmp.b.offset = fb->offset / 8;
7357 b_val = strtoull(b, &b, 0);
7358 CHECK(!b[0], EINVAL);
7361 b_val = hton64(b_val) >> (64 - fa->n_bits);
7363 instr->type = INSTR_JMP_EQ_I;
7364 instr->jmp.ip = NULL; /* Resolved later. */
7365 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7366 instr->jmp.a.n_bits = fa->n_bits;
7367 instr->jmp.a.offset = fa->offset / 8;
7368 instr->jmp.b_val = b_val;
7373 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
7374 struct action *action,
7377 struct instruction *instr,
7378 struct instruction_data *data)
7380 char *a = tokens[2], *b = tokens[3];
7381 struct field *fa, *fb;
7383 uint32_t a_struct_id, b_struct_id;
7385 CHECK(n_tokens == 4, EINVAL);
7387 strcpy(data->jmp_label, tokens[1]);
7389 fa = struct_field_parse(p, action, a, &a_struct_id);
7391 CHECK(!fa->var_size, EINVAL);
7393 /* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */
7394 fb = struct_field_parse(p, action, b, &b_struct_id);
7396 CHECK(!fb->var_size, EINVAL);
7398 instr->type = INSTR_JMP_NEQ;
7399 if (a[0] != 'h' && b[0] == 'h')
7400 instr->type = INSTR_JMP_NEQ_MH;
7401 if (a[0] == 'h' && b[0] != 'h')
7402 instr->type = INSTR_JMP_NEQ_HM;
7403 if (a[0] == 'h' && b[0] == 'h')
7404 instr->type = INSTR_JMP_NEQ_HH;
7405 instr->jmp.ip = NULL; /* Resolved later. */
7407 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7408 instr->jmp.a.n_bits = fa->n_bits;
7409 instr->jmp.a.offset = fa->offset / 8;
7410 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7411 instr->jmp.b.n_bits = fb->n_bits;
7412 instr->jmp.b.offset = fb->offset / 8;
7417 b_val = strtoull(b, &b, 0);
7418 CHECK(!b[0], EINVAL);
7421 b_val = hton64(b_val) >> (64 - fa->n_bits);
7423 instr->type = INSTR_JMP_NEQ_I;
7424 instr->jmp.ip = NULL; /* Resolved later. */
7425 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7426 instr->jmp.a.n_bits = fa->n_bits;
7427 instr->jmp.a.offset = fa->offset / 8;
7428 instr->jmp.b_val = b_val;
7433 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
7434 struct action *action,
7437 struct instruction *instr,
7438 struct instruction_data *data)
7440 char *a = tokens[2], *b = tokens[3];
7441 struct field *fa, *fb;
7443 uint32_t a_struct_id, b_struct_id;
7445 CHECK(n_tokens == 4, EINVAL);
7447 strcpy(data->jmp_label, tokens[1]);
7449 fa = struct_field_parse(p, action, a, &a_struct_id);
7451 CHECK(!fa->var_size, EINVAL);
7453 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
7454 fb = struct_field_parse(p, action, b, &b_struct_id);
7456 CHECK(!fb->var_size, EINVAL);
7458 instr->type = INSTR_JMP_LT;
7459 if (a[0] == 'h' && b[0] != 'h')
7460 instr->type = INSTR_JMP_LT_HM;
7461 if (a[0] != 'h' && b[0] == 'h')
7462 instr->type = INSTR_JMP_LT_MH;
7463 if (a[0] == 'h' && b[0] == 'h')
7464 instr->type = INSTR_JMP_LT_HH;
7465 instr->jmp.ip = NULL; /* Resolved later. */
7467 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7468 instr->jmp.a.n_bits = fa->n_bits;
7469 instr->jmp.a.offset = fa->offset / 8;
7470 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7471 instr->jmp.b.n_bits = fb->n_bits;
7472 instr->jmp.b.offset = fb->offset / 8;
7476 /* JMP_LT_MI, JMP_LT_HI. */
7477 b_val = strtoull(b, &b, 0);
7478 CHECK(!b[0], EINVAL);
7480 instr->type = INSTR_JMP_LT_MI;
7482 instr->type = INSTR_JMP_LT_HI;
7483 instr->jmp.ip = NULL; /* Resolved later. */
7485 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7486 instr->jmp.a.n_bits = fa->n_bits;
7487 instr->jmp.a.offset = fa->offset / 8;
7488 instr->jmp.b_val = b_val;
7493 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
7494 struct action *action,
7497 struct instruction *instr,
7498 struct instruction_data *data)
7500 char *a = tokens[2], *b = tokens[3];
7501 struct field *fa, *fb;
7503 uint32_t a_struct_id, b_struct_id;
7505 CHECK(n_tokens == 4, EINVAL);
7507 strcpy(data->jmp_label, tokens[1]);
7509 fa = struct_field_parse(p, action, a, &a_struct_id);
7511 CHECK(!fa->var_size, EINVAL);
7513 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
7514 fb = struct_field_parse(p, action, b, &b_struct_id);
7516 CHECK(!fb->var_size, EINVAL);
7518 instr->type = INSTR_JMP_GT;
7519 if (a[0] == 'h' && b[0] != 'h')
7520 instr->type = INSTR_JMP_GT_HM;
7521 if (a[0] != 'h' && b[0] == 'h')
7522 instr->type = INSTR_JMP_GT_MH;
7523 if (a[0] == 'h' && b[0] == 'h')
7524 instr->type = INSTR_JMP_GT_HH;
7525 instr->jmp.ip = NULL; /* Resolved later. */
7527 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7528 instr->jmp.a.n_bits = fa->n_bits;
7529 instr->jmp.a.offset = fa->offset / 8;
7530 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7531 instr->jmp.b.n_bits = fb->n_bits;
7532 instr->jmp.b.offset = fb->offset / 8;
7536 /* JMP_GT_MI, JMP_GT_HI. */
7537 b_val = strtoull(b, &b, 0);
7538 CHECK(!b[0], EINVAL);
7540 instr->type = INSTR_JMP_GT_MI;
7542 instr->type = INSTR_JMP_GT_HI;
7543 instr->jmp.ip = NULL; /* Resolved later. */
7545 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7546 instr->jmp.a.n_bits = fa->n_bits;
7547 instr->jmp.a.offset = fa->offset / 8;
7548 instr->jmp.b_val = b_val;
7553 instr_jmp_exec(struct rte_swx_pipeline *p)
7555 struct thread *t = &p->threads[p->thread_id];
7556 struct instruction *ip = t->ip;
7558 TRACE("[Thread %2u] jmp\n", p->thread_id);
7560 thread_ip_set(t, ip->jmp.ip);
7564 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
7566 struct thread *t = &p->threads[p->thread_id];
7567 struct instruction *ip = t->ip;
7568 uint32_t header_id = ip->jmp.header_id;
7570 TRACE("[Thread %2u] jmpv\n", p->thread_id);
7572 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
7576 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
7578 struct thread *t = &p->threads[p->thread_id];
7579 struct instruction *ip = t->ip;
7580 uint32_t header_id = ip->jmp.header_id;
7582 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
7584 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
7588 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
7590 struct thread *t = &p->threads[p->thread_id];
7591 struct instruction *ip = t->ip;
7592 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
7594 TRACE("[Thread %2u] jmph\n", p->thread_id);
7596 t->ip = ip_next[t->hit];
7600 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
7602 struct thread *t = &p->threads[p->thread_id];
7603 struct instruction *ip = t->ip;
7604 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
7606 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
7608 t->ip = ip_next[t->hit];
7612 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
7614 struct thread *t = &p->threads[p->thread_id];
7615 struct instruction *ip = t->ip;
7617 TRACE("[Thread %2u] jmpa\n", p->thread_id);
7619 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
7623 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
7625 struct thread *t = &p->threads[p->thread_id];
7626 struct instruction *ip = t->ip;
7628 TRACE("[Thread %2u] jmpna\n", p->thread_id);
7630 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
7634 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
7636 struct thread *t = &p->threads[p->thread_id];
7637 struct instruction *ip = t->ip;
7639 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
7645 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p)
7647 struct thread *t = &p->threads[p->thread_id];
7648 struct instruction *ip = t->ip;
7650 TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id);
7652 JMP_CMP_MH(t, ip, ==);
7656 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p)
7658 struct thread *t = &p->threads[p->thread_id];
7659 struct instruction *ip = t->ip;
7661 TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id);
7663 JMP_CMP_HM(t, ip, ==);
7667 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p)
7669 struct thread *t = &p->threads[p->thread_id];
7670 struct instruction *ip = t->ip;
7672 TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id);
7674 JMP_CMP_HH_FAST(t, ip, ==);
7678 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
7680 struct thread *t = &p->threads[p->thread_id];
7681 struct instruction *ip = t->ip;
7683 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
7685 JMP_CMP_I(t, ip, ==);
7689 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
7691 struct thread *t = &p->threads[p->thread_id];
7692 struct instruction *ip = t->ip;
7694 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
7700 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p)
7702 struct thread *t = &p->threads[p->thread_id];
7703 struct instruction *ip = t->ip;
7705 TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id);
7707 JMP_CMP_MH(t, ip, !=);
7711 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p)
7713 struct thread *t = &p->threads[p->thread_id];
7714 struct instruction *ip = t->ip;
7716 TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id);
7718 JMP_CMP_HM(t, ip, !=);
7722 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p)
7724 struct thread *t = &p->threads[p->thread_id];
7725 struct instruction *ip = t->ip;
7727 TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id);
7729 JMP_CMP_HH_FAST(t, ip, !=);
7733 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
7735 struct thread *t = &p->threads[p->thread_id];
7736 struct instruction *ip = t->ip;
7738 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
7740 JMP_CMP_I(t, ip, !=);
7744 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
7746 struct thread *t = &p->threads[p->thread_id];
7747 struct instruction *ip = t->ip;
7749 TRACE("[Thread %2u] jmplt\n", p->thread_id);
7755 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
7757 struct thread *t = &p->threads[p->thread_id];
7758 struct instruction *ip = t->ip;
7760 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
7762 JMP_CMP_MH(t, ip, <);
7766 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
7768 struct thread *t = &p->threads[p->thread_id];
7769 struct instruction *ip = t->ip;
7771 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
7773 JMP_CMP_HM(t, ip, <);
7777 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
7779 struct thread *t = &p->threads[p->thread_id];
7780 struct instruction *ip = t->ip;
7782 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
7784 JMP_CMP_HH(t, ip, <);
7788 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
7790 struct thread *t = &p->threads[p->thread_id];
7791 struct instruction *ip = t->ip;
7793 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
7795 JMP_CMP_MI(t, ip, <);
7799 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
7801 struct thread *t = &p->threads[p->thread_id];
7802 struct instruction *ip = t->ip;
7804 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
7806 JMP_CMP_HI(t, ip, <);
7810 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
7812 struct thread *t = &p->threads[p->thread_id];
7813 struct instruction *ip = t->ip;
7815 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
7821 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
7823 struct thread *t = &p->threads[p->thread_id];
7824 struct instruction *ip = t->ip;
7826 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
7828 JMP_CMP_MH(t, ip, >);
7832 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
7834 struct thread *t = &p->threads[p->thread_id];
7835 struct instruction *ip = t->ip;
7837 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
7839 JMP_CMP_HM(t, ip, >);
7843 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
7845 struct thread *t = &p->threads[p->thread_id];
7846 struct instruction *ip = t->ip;
7848 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
7850 JMP_CMP_HH(t, ip, >);
7854 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
7856 struct thread *t = &p->threads[p->thread_id];
7857 struct instruction *ip = t->ip;
7859 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
7861 JMP_CMP_MI(t, ip, >);
7865 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
7867 struct thread *t = &p->threads[p->thread_id];
7868 struct instruction *ip = t->ip;
7870 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
7872 JMP_CMP_HI(t, ip, >);
7879 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
7880 struct action *action,
7881 char **tokens __rte_unused,
7883 struct instruction *instr,
7884 struct instruction_data *data __rte_unused)
7886 CHECK(action, EINVAL);
7887 CHECK(n_tokens == 1, EINVAL);
7889 instr->type = INSTR_RETURN;
7894 instr_return_exec(struct rte_swx_pipeline *p)
7896 struct thread *t = &p->threads[p->thread_id];
7898 TRACE("[Thread %2u] return\n", p->thread_id);
7904 instr_translate(struct rte_swx_pipeline *p,
7905 struct action *action,
7907 struct instruction *instr,
7908 struct instruction_data *data)
7910 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
7911 int n_tokens = 0, tpos = 0;
7913 /* Parse the instruction string into tokens. */
7917 token = strtok_r(string, " \t\v", &string);
7921 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
7922 CHECK_NAME(token, EINVAL);
7924 tokens[n_tokens] = token;
7928 CHECK(n_tokens, EINVAL);
7930 /* Handle the optional instruction label. */
7931 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
7932 strcpy(data->label, tokens[0]);
7935 CHECK(n_tokens - tpos, EINVAL);
7938 /* Identify the instruction type. */
7939 if (!strcmp(tokens[tpos], "rx"))
7940 return instr_rx_translate(p,
7947 if (!strcmp(tokens[tpos], "tx"))
7948 return instr_tx_translate(p,
7955 if (!strcmp(tokens[tpos], "drop"))
7956 return instr_drop_translate(p,
7963 if (!strcmp(tokens[tpos], "extract"))
7964 return instr_hdr_extract_translate(p,
7971 if (!strcmp(tokens[tpos], "lookahead"))
7972 return instr_hdr_lookahead_translate(p,
7979 if (!strcmp(tokens[tpos], "emit"))
7980 return instr_hdr_emit_translate(p,
7987 if (!strcmp(tokens[tpos], "validate"))
7988 return instr_hdr_validate_translate(p,
7995 if (!strcmp(tokens[tpos], "invalidate"))
7996 return instr_hdr_invalidate_translate(p,
8003 if (!strcmp(tokens[tpos], "mov"))
8004 return instr_mov_translate(p,
8011 if (!strcmp(tokens[tpos], "add"))
8012 return instr_alu_add_translate(p,
8019 if (!strcmp(tokens[tpos], "sub"))
8020 return instr_alu_sub_translate(p,
8027 if (!strcmp(tokens[tpos], "ckadd"))
8028 return instr_alu_ckadd_translate(p,
8035 if (!strcmp(tokens[tpos], "cksub"))
8036 return instr_alu_cksub_translate(p,
8043 if (!strcmp(tokens[tpos], "and"))
8044 return instr_alu_and_translate(p,
8051 if (!strcmp(tokens[tpos], "or"))
8052 return instr_alu_or_translate(p,
8059 if (!strcmp(tokens[tpos], "xor"))
8060 return instr_alu_xor_translate(p,
8067 if (!strcmp(tokens[tpos], "shl"))
8068 return instr_alu_shl_translate(p,
8075 if (!strcmp(tokens[tpos], "shr"))
8076 return instr_alu_shr_translate(p,
8083 if (!strcmp(tokens[tpos], "regprefetch"))
8084 return instr_regprefetch_translate(p,
8091 if (!strcmp(tokens[tpos], "regrd"))
8092 return instr_regrd_translate(p,
8099 if (!strcmp(tokens[tpos], "regwr"))
8100 return instr_regwr_translate(p,
8107 if (!strcmp(tokens[tpos], "regadd"))
8108 return instr_regadd_translate(p,
8115 if (!strcmp(tokens[tpos], "metprefetch"))
8116 return instr_metprefetch_translate(p,
8123 if (!strcmp(tokens[tpos], "meter"))
8124 return instr_meter_translate(p,
8131 if (!strcmp(tokens[tpos], "table"))
8132 return instr_table_translate(p,
8139 if (!strcmp(tokens[tpos], "extern"))
8140 return instr_extern_translate(p,
8147 if (!strcmp(tokens[tpos], "jmp"))
8148 return instr_jmp_translate(p,
8155 if (!strcmp(tokens[tpos], "jmpv"))
8156 return instr_jmp_valid_translate(p,
8163 if (!strcmp(tokens[tpos], "jmpnv"))
8164 return instr_jmp_invalid_translate(p,
8171 if (!strcmp(tokens[tpos], "jmph"))
8172 return instr_jmp_hit_translate(p,
8179 if (!strcmp(tokens[tpos], "jmpnh"))
8180 return instr_jmp_miss_translate(p,
8187 if (!strcmp(tokens[tpos], "jmpa"))
8188 return instr_jmp_action_hit_translate(p,
8195 if (!strcmp(tokens[tpos], "jmpna"))
8196 return instr_jmp_action_miss_translate(p,
8203 if (!strcmp(tokens[tpos], "jmpeq"))
8204 return instr_jmp_eq_translate(p,
8211 if (!strcmp(tokens[tpos], "jmpneq"))
8212 return instr_jmp_neq_translate(p,
8219 if (!strcmp(tokens[tpos], "jmplt"))
8220 return instr_jmp_lt_translate(p,
8227 if (!strcmp(tokens[tpos], "jmpgt"))
8228 return instr_jmp_gt_translate(p,
8235 if (!strcmp(tokens[tpos], "return"))
8236 return instr_return_translate(p,
8246 static struct instruction_data *
8247 label_find(struct instruction_data *data, uint32_t n, const char *label)
8251 for (i = 0; i < n; i++)
8252 if (!strcmp(label, data[i].label))
8259 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
8261 uint32_t count = 0, i;
8266 for (i = 0; i < n; i++)
8267 if (!strcmp(label, data[i].jmp_label))
8274 instr_label_check(struct instruction_data *instruction_data,
8275 uint32_t n_instructions)
8279 /* Check that all instruction labels are unique. */
8280 for (i = 0; i < n_instructions; i++) {
8281 struct instruction_data *data = &instruction_data[i];
8282 char *label = data->label;
8288 for (j = i + 1; j < n_instructions; j++)
8289 CHECK(strcmp(label, data[j].label), EINVAL);
8292 /* Get users for each instruction label. */
8293 for (i = 0; i < n_instructions; i++) {
8294 struct instruction_data *data = &instruction_data[i];
8295 char *label = data->label;
8297 data->n_users = label_is_used(instruction_data,
8306 instr_jmp_resolve(struct instruction *instructions,
8307 struct instruction_data *instruction_data,
8308 uint32_t n_instructions)
8312 for (i = 0; i < n_instructions; i++) {
8313 struct instruction *instr = &instructions[i];
8314 struct instruction_data *data = &instruction_data[i];
8315 struct instruction_data *found;
8317 if (!instruction_is_jmp(instr))
8320 found = label_find(instruction_data,
8323 CHECK(found, EINVAL);
8325 instr->jmp.ip = &instructions[found - instruction_data];
8332 instr_verify(struct rte_swx_pipeline *p __rte_unused,
8334 struct instruction *instr,
8335 struct instruction_data *data __rte_unused,
8336 uint32_t n_instructions)
8339 enum instruction_type type;
8342 /* Check that the first instruction is rx. */
8343 CHECK(instr[0].type == INSTR_RX, EINVAL);
8345 /* Check that there is at least one tx instruction. */
8346 for (i = 0; i < n_instructions; i++) {
8347 type = instr[i].type;
8349 if (instruction_is_tx(type))
8352 CHECK(i < n_instructions, EINVAL);
8354 /* Check that the last instruction is either tx or unconditional
8357 type = instr[n_instructions - 1].type;
8358 CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
8362 enum instruction_type type;
8365 /* Check that there is at least one return or tx instruction. */
8366 for (i = 0; i < n_instructions; i++) {
8367 type = instr[i].type;
8369 if ((type == INSTR_RETURN) || instruction_is_tx(type))
8372 CHECK(i < n_instructions, EINVAL);
8379 instr_compact(struct instruction *instructions,
8380 struct instruction_data *instruction_data,
8381 uint32_t n_instructions)
8383 uint32_t i, pos = 0;
8385 /* Eliminate the invalid instructions that have been optimized out. */
8386 for (i = 0; i < n_instructions; i++) {
8387 struct instruction *instr = &instructions[i];
8388 struct instruction_data *data = &instruction_data[i];
8394 memcpy(&instructions[pos], instr, sizeof(*instr));
8395 memcpy(&instruction_data[pos], data, sizeof(*data));
8405 instr_pattern_extract_many_search(struct instruction *instr,
8406 struct instruction_data *data,
8408 uint32_t *n_pattern_instr)
8412 for (i = 0; i < n_instr; i++) {
8413 if (data[i].invalid)
8416 if (instr[i].type != INSTR_HDR_EXTRACT)
8419 if (i == RTE_DIM(instr->io.hdr.header_id))
8422 if (i && data[i].n_users)
8429 *n_pattern_instr = i;
8434 instr_pattern_extract_many_replace(struct instruction *instr,
8435 struct instruction_data *data,
8440 for (i = 1; i < n_instr; i++) {
8442 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8443 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8444 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8446 data[i].invalid = 1;
8451 instr_pattern_extract_many_optimize(struct instruction *instructions,
8452 struct instruction_data *instruction_data,
8453 uint32_t n_instructions)
8457 for (i = 0; i < n_instructions; ) {
8458 struct instruction *instr = &instructions[i];
8459 struct instruction_data *data = &instruction_data[i];
8460 uint32_t n_instr = 0;
8464 detected = instr_pattern_extract_many_search(instr,
8469 instr_pattern_extract_many_replace(instr,
8476 /* No pattern starting at the current instruction. */
8480 /* Eliminate the invalid instructions that have been optimized out. */
8481 n_instructions = instr_compact(instructions,
8485 return n_instructions;
8489 instr_pattern_emit_many_tx_search(struct instruction *instr,
8490 struct instruction_data *data,
8492 uint32_t *n_pattern_instr)
8496 for (i = 0; i < n_instr; i++) {
8497 if (data[i].invalid)
8500 if (instr[i].type != INSTR_HDR_EMIT)
8503 if (i == RTE_DIM(instr->io.hdr.header_id))
8506 if (i && data[i].n_users)
8513 if (!instruction_is_tx(instr[i].type))
8516 if (data[i].n_users)
8521 *n_pattern_instr = i;
8526 instr_pattern_emit_many_tx_replace(struct instruction *instr,
8527 struct instruction_data *data,
8532 /* Any emit instruction in addition to the first one. */
8533 for (i = 1; i < n_instr - 1; i++) {
8535 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8536 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8537 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8539 data[i].invalid = 1;
8542 /* The TX instruction is the last one in the pattern. */
8544 instr[0].io.io.offset = instr[i].io.io.offset;
8545 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
8546 data[i].invalid = 1;
8550 instr_pattern_emit_many_tx_optimize(struct instruction *instructions,
8551 struct instruction_data *instruction_data,
8552 uint32_t n_instructions)
8556 for (i = 0; i < n_instructions; ) {
8557 struct instruction *instr = &instructions[i];
8558 struct instruction_data *data = &instruction_data[i];
8559 uint32_t n_instr = 0;
8562 /* Emit many + TX. */
8563 detected = instr_pattern_emit_many_tx_search(instr,
8568 instr_pattern_emit_many_tx_replace(instr,
8575 /* No pattern starting at the current instruction. */
8579 /* Eliminate the invalid instructions that have been optimized out. */
8580 n_instructions = instr_compact(instructions,
8584 return n_instructions;
8588 action_arg_src_mov_count(struct action *a,
8590 struct instruction *instructions,
8591 struct instruction_data *instruction_data,
8592 uint32_t n_instructions);
8595 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p,
8597 struct instruction *instr,
8598 struct instruction_data *data,
8600 struct instruction *instructions,
8601 struct instruction_data *instruction_data,
8602 uint32_t n_instructions,
8603 uint32_t *n_pattern_instr)
8606 uint32_t src_field_id, i, j;
8608 /* Prerequisites. */
8612 /* First instruction: MOV_HM. */
8613 if (data[0].invalid || (instr[0].type != INSTR_MOV_HM))
8616 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8617 if (!h || h->st->var_size)
8620 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8621 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8624 if (src_field_id == a->st->n_fields)
8627 if (instr[0].mov.dst.offset ||
8628 (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) ||
8629 instr[0].mov.src.struct_id ||
8630 (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) ||
8631 (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits))
8634 if ((n_instr < h->st->n_fields + 1) ||
8635 (a->st->n_fields < src_field_id + h->st->n_fields + 1))
8638 /* Subsequent instructions: MOV_HM. */
8639 for (i = 1; i < h->st->n_fields; i++)
8640 if (data[i].invalid ||
8642 (instr[i].type != INSTR_MOV_HM) ||
8643 (instr[i].mov.dst.struct_id != h->struct_id) ||
8644 (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) ||
8645 (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) ||
8646 instr[i].mov.src.struct_id ||
8647 (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) ||
8648 (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) ||
8649 (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits))
8652 /* Last instruction: HDR_VALIDATE. */
8653 if ((instr[i].type != INSTR_HDR_VALIDATE) ||
8654 (instr[i].valid.header_id != h->id))
8657 /* Check that none of the action args that are used as source for this
8658 * DMA transfer are not used as source in any other mov instruction.
8660 for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) {
8663 n_users = action_arg_src_mov_count(a,
8672 *n_pattern_instr = 1 + i;
8677 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p,
8679 struct instruction *instr,
8680 struct instruction_data *data,
8684 uint32_t src_field_id, src_offset, i;
8686 /* Read from the instructions before they are modified. */
8687 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8691 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8692 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8695 if (src_field_id == a->st->n_fields)
8698 src_offset = instr[0].mov.src.offset;
8700 /* Modify the instructions. */
8701 instr[0].type = INSTR_DMA_HT;
8702 instr[0].dma.dst.header_id[0] = h->id;
8703 instr[0].dma.dst.struct_id[0] = h->struct_id;
8704 instr[0].dma.src.offset[0] = (uint8_t)src_offset;
8705 instr[0].dma.n_bytes[0] = h->st->n_bits / 8;
8707 for (i = 1; i < n_instr; i++)
8708 data[i].invalid = 1;
8710 /* Update the endianness of the action arguments to header endianness. */
8711 for (i = 0; i < h->st->n_fields; i++)
8712 a->args_endianness[src_field_id + i] = 1;
8716 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p,
8718 struct instruction *instructions,
8719 struct instruction_data *instruction_data,
8720 uint32_t n_instructions)
8725 return n_instructions;
8727 for (i = 0; i < n_instructions; ) {
8728 struct instruction *instr = &instructions[i];
8729 struct instruction_data *data = &instruction_data[i];
8730 uint32_t n_instr = 0;
8733 /* Mov all + validate. */
8734 detected = instr_pattern_mov_all_validate_search(p,
8744 instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr);
8749 /* No pattern starting at the current instruction. */
8753 /* Eliminate the invalid instructions that have been optimized out. */
8754 n_instructions = instr_compact(instructions,
8758 return n_instructions;
8762 instr_pattern_dma_many_search(struct instruction *instr,
8763 struct instruction_data *data,
8765 uint32_t *n_pattern_instr)
8769 for (i = 0; i < n_instr; i++) {
8770 if (data[i].invalid)
8773 if (instr[i].type != INSTR_DMA_HT)
8776 if (i == RTE_DIM(instr->dma.dst.header_id))
8779 if (i && data[i].n_users)
8786 *n_pattern_instr = i;
8791 instr_pattern_dma_many_replace(struct instruction *instr,
8792 struct instruction_data *data,
8797 for (i = 1; i < n_instr; i++) {
8799 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
8800 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
8801 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
8802 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
8804 data[i].invalid = 1;
8809 instr_pattern_dma_many_optimize(struct instruction *instructions,
8810 struct instruction_data *instruction_data,
8811 uint32_t n_instructions)
8815 for (i = 0; i < n_instructions; ) {
8816 struct instruction *instr = &instructions[i];
8817 struct instruction_data *data = &instruction_data[i];
8818 uint32_t n_instr = 0;
8822 detected = instr_pattern_dma_many_search(instr,
8827 instr_pattern_dma_many_replace(instr, data, n_instr);
8832 /* No pattern starting at the current instruction. */
8836 /* Eliminate the invalid instructions that have been optimized out. */
8837 n_instructions = instr_compact(instructions,
8841 return n_instructions;
8845 instr_optimize(struct rte_swx_pipeline *p,
8847 struct instruction *instructions,
8848 struct instruction_data *instruction_data,
8849 uint32_t n_instructions)
8852 n_instructions = instr_pattern_extract_many_optimize(instructions,
8856 /* Emit many + TX. */
8857 n_instructions = instr_pattern_emit_many_tx_optimize(instructions,
8861 /* Mov all + validate. */
8862 n_instructions = instr_pattern_mov_all_validate_optimize(p,
8869 n_instructions = instr_pattern_dma_many_optimize(instructions,
8873 return n_instructions;
8877 instruction_config(struct rte_swx_pipeline *p,
8879 const char **instructions,
8880 uint32_t n_instructions)
8882 struct instruction *instr = NULL;
8883 struct instruction_data *data = NULL;
8887 CHECK(n_instructions, EINVAL);
8888 CHECK(instructions, EINVAL);
8889 for (i = 0; i < n_instructions; i++)
8890 CHECK_INSTRUCTION(instructions[i], EINVAL);
8892 /* Memory allocation. */
8893 instr = calloc(n_instructions, sizeof(struct instruction));
8899 data = calloc(n_instructions, sizeof(struct instruction_data));
8905 for (i = 0; i < n_instructions; i++) {
8906 char *string = strdup(instructions[i]);
8912 err = instr_translate(p, a, string, &instr[i], &data[i]);
8921 err = instr_label_check(data, n_instructions);
8925 err = instr_verify(p, a, instr, data, n_instructions);
8929 n_instructions = instr_optimize(p, a, instr, data, n_instructions);
8931 err = instr_jmp_resolve(instr, data, n_instructions);
8936 a->instructions = instr;
8937 a->n_instructions = n_instructions;
8939 p->instructions = instr;
8940 p->n_instructions = n_instructions;
8952 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
8954 static instr_exec_t instruction_table[] = {
8955 [INSTR_RX] = instr_rx_exec,
8956 [INSTR_TX] = instr_tx_exec,
8957 [INSTR_TX_I] = instr_tx_i_exec,
8959 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
8960 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
8961 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
8962 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
8963 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
8964 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
8965 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
8966 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
8967 [INSTR_HDR_EXTRACT_M] = instr_hdr_extract_m_exec,
8968 [INSTR_HDR_LOOKAHEAD] = instr_hdr_lookahead_exec,
8970 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
8971 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
8972 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
8973 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
8974 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
8975 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
8976 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
8977 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
8978 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
8980 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
8981 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
8983 [INSTR_MOV] = instr_mov_exec,
8984 [INSTR_MOV_MH] = instr_mov_mh_exec,
8985 [INSTR_MOV_HM] = instr_mov_hm_exec,
8986 [INSTR_MOV_HH] = instr_mov_hh_exec,
8987 [INSTR_MOV_I] = instr_mov_i_exec,
8989 [INSTR_DMA_HT] = instr_dma_ht_exec,
8990 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
8991 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
8992 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
8993 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
8994 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
8995 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
8996 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
8998 [INSTR_ALU_ADD] = instr_alu_add_exec,
8999 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
9000 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
9001 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
9002 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
9003 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
9005 [INSTR_ALU_SUB] = instr_alu_sub_exec,
9006 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
9007 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
9008 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
9009 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
9010 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
9012 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
9013 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
9014 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
9015 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
9017 [INSTR_ALU_AND] = instr_alu_and_exec,
9018 [INSTR_ALU_AND_MH] = instr_alu_and_mh_exec,
9019 [INSTR_ALU_AND_HM] = instr_alu_and_hm_exec,
9020 [INSTR_ALU_AND_HH] = instr_alu_and_hh_exec,
9021 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
9023 [INSTR_ALU_OR] = instr_alu_or_exec,
9024 [INSTR_ALU_OR_MH] = instr_alu_or_mh_exec,
9025 [INSTR_ALU_OR_HM] = instr_alu_or_hm_exec,
9026 [INSTR_ALU_OR_HH] = instr_alu_or_hh_exec,
9027 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
9029 [INSTR_ALU_XOR] = instr_alu_xor_exec,
9030 [INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec,
9031 [INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec,
9032 [INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec,
9033 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
9035 [INSTR_ALU_SHL] = instr_alu_shl_exec,
9036 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
9037 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
9038 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
9039 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
9040 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
9042 [INSTR_ALU_SHR] = instr_alu_shr_exec,
9043 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
9044 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
9045 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
9046 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
9047 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
9049 [INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec,
9050 [INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec,
9051 [INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec,
9053 [INSTR_REGRD_HRH] = instr_regrd_hrh_exec,
9054 [INSTR_REGRD_HRM] = instr_regrd_hrm_exec,
9055 [INSTR_REGRD_MRH] = instr_regrd_mrh_exec,
9056 [INSTR_REGRD_MRM] = instr_regrd_mrm_exec,
9057 [INSTR_REGRD_HRI] = instr_regrd_hri_exec,
9058 [INSTR_REGRD_MRI] = instr_regrd_mri_exec,
9060 [INSTR_REGWR_RHH] = instr_regwr_rhh_exec,
9061 [INSTR_REGWR_RHM] = instr_regwr_rhm_exec,
9062 [INSTR_REGWR_RMH] = instr_regwr_rmh_exec,
9063 [INSTR_REGWR_RMM] = instr_regwr_rmm_exec,
9064 [INSTR_REGWR_RHI] = instr_regwr_rhi_exec,
9065 [INSTR_REGWR_RMI] = instr_regwr_rmi_exec,
9066 [INSTR_REGWR_RIH] = instr_regwr_rih_exec,
9067 [INSTR_REGWR_RIM] = instr_regwr_rim_exec,
9068 [INSTR_REGWR_RII] = instr_regwr_rii_exec,
9070 [INSTR_REGADD_RHH] = instr_regadd_rhh_exec,
9071 [INSTR_REGADD_RHM] = instr_regadd_rhm_exec,
9072 [INSTR_REGADD_RMH] = instr_regadd_rmh_exec,
9073 [INSTR_REGADD_RMM] = instr_regadd_rmm_exec,
9074 [INSTR_REGADD_RHI] = instr_regadd_rhi_exec,
9075 [INSTR_REGADD_RMI] = instr_regadd_rmi_exec,
9076 [INSTR_REGADD_RIH] = instr_regadd_rih_exec,
9077 [INSTR_REGADD_RIM] = instr_regadd_rim_exec,
9078 [INSTR_REGADD_RII] = instr_regadd_rii_exec,
9080 [INSTR_METPREFETCH_H] = instr_metprefetch_h_exec,
9081 [INSTR_METPREFETCH_M] = instr_metprefetch_m_exec,
9082 [INSTR_METPREFETCH_I] = instr_metprefetch_i_exec,
9084 [INSTR_METER_HHM] = instr_meter_hhm_exec,
9085 [INSTR_METER_HHI] = instr_meter_hhi_exec,
9086 [INSTR_METER_HMM] = instr_meter_hmm_exec,
9087 [INSTR_METER_HMI] = instr_meter_hmi_exec,
9088 [INSTR_METER_MHM] = instr_meter_mhm_exec,
9089 [INSTR_METER_MHI] = instr_meter_mhi_exec,
9090 [INSTR_METER_MMM] = instr_meter_mmm_exec,
9091 [INSTR_METER_MMI] = instr_meter_mmi_exec,
9092 [INSTR_METER_IHM] = instr_meter_ihm_exec,
9093 [INSTR_METER_IHI] = instr_meter_ihi_exec,
9094 [INSTR_METER_IMM] = instr_meter_imm_exec,
9095 [INSTR_METER_IMI] = instr_meter_imi_exec,
9097 [INSTR_TABLE] = instr_table_exec,
9098 [INSTR_SELECTOR] = instr_selector_exec,
9099 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
9100 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
9102 [INSTR_JMP] = instr_jmp_exec,
9103 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
9104 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
9105 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
9106 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
9107 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
9108 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
9110 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
9111 [INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec,
9112 [INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec,
9113 [INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec,
9114 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
9116 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
9117 [INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec,
9118 [INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec,
9119 [INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec,
9120 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
9122 [INSTR_JMP_LT] = instr_jmp_lt_exec,
9123 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
9124 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
9125 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
9126 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
9127 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
9129 [INSTR_JMP_GT] = instr_jmp_gt_exec,
9130 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
9131 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
9132 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
9133 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
9134 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
9136 [INSTR_RETURN] = instr_return_exec,
9140 instr_exec(struct rte_swx_pipeline *p)
9142 struct thread *t = &p->threads[p->thread_id];
9143 struct instruction *ip = t->ip;
9144 instr_exec_t instr = instruction_table[ip->type];
9152 static struct action *
9153 action_find(struct rte_swx_pipeline *p, const char *name)
9155 struct action *elem;
9160 TAILQ_FOREACH(elem, &p->actions, node)
9161 if (strcmp(elem->name, name) == 0)
9167 static struct action *
9168 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9170 struct action *action = NULL;
9172 TAILQ_FOREACH(action, &p->actions, node)
9173 if (action->id == id)
9179 static struct field *
9180 action_field_find(struct action *a, const char *name)
9182 return a->st ? struct_type_field_find(a->st, name) : NULL;
9185 static struct field *
9186 action_field_parse(struct action *action, const char *name)
9188 if (name[0] != 't' || name[1] != '.')
9191 return action_field_find(action, &name[2]);
9195 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
9197 const char *args_struct_type_name,
9198 const char **instructions,
9199 uint32_t n_instructions)
9201 struct struct_type *args_struct_type = NULL;
9207 CHECK_NAME(name, EINVAL);
9208 CHECK(!action_find(p, name), EEXIST);
9210 if (args_struct_type_name) {
9211 CHECK_NAME(args_struct_type_name, EINVAL);
9212 args_struct_type = struct_type_find(p, args_struct_type_name);
9213 CHECK(args_struct_type, EINVAL);
9214 CHECK(!args_struct_type->var_size, EINVAL);
9217 /* Node allocation. */
9218 a = calloc(1, sizeof(struct action));
9220 if (args_struct_type) {
9221 a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int));
9222 if (!a->args_endianness) {
9228 /* Node initialization. */
9229 strcpy(a->name, name);
9230 a->st = args_struct_type;
9231 a->id = p->n_actions;
9233 /* Instruction translation. */
9234 err = instruction_config(p, a, instructions, n_instructions);
9236 free(a->args_endianness);
9241 /* Node add to tailq. */
9242 TAILQ_INSERT_TAIL(&p->actions, a, node);
9249 action_build(struct rte_swx_pipeline *p)
9251 struct action *action;
9253 p->action_instructions = calloc(p->n_actions,
9254 sizeof(struct instruction *));
9255 CHECK(p->action_instructions, ENOMEM);
9257 TAILQ_FOREACH(action, &p->actions, node)
9258 p->action_instructions[action->id] = action->instructions;
9264 action_build_free(struct rte_swx_pipeline *p)
9266 free(p->action_instructions);
9267 p->action_instructions = NULL;
9271 action_free(struct rte_swx_pipeline *p)
9273 action_build_free(p);
9276 struct action *action;
9278 action = TAILQ_FIRST(&p->actions);
9282 TAILQ_REMOVE(&p->actions, action, node);
9283 free(action->instructions);
9289 action_arg_src_mov_count(struct action *a,
9291 struct instruction *instructions,
9292 struct instruction_data *instruction_data,
9293 uint32_t n_instructions)
9295 uint32_t offset, n_users = 0, i;
9298 (arg_id >= a->st->n_fields) ||
9300 !instruction_data ||
9304 offset = a->st->fields[arg_id].offset / 8;
9306 for (i = 0; i < n_instructions; i++) {
9307 struct instruction *instr = &instructions[i];
9308 struct instruction_data *data = &instruction_data[i];
9310 if (data->invalid ||
9311 ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) ||
9312 instr->mov.src.struct_id ||
9313 (instr->mov.src.offset != offset))
9325 static struct table_type *
9326 table_type_find(struct rte_swx_pipeline *p, const char *name)
9328 struct table_type *elem;
9330 TAILQ_FOREACH(elem, &p->table_types, node)
9331 if (strcmp(elem->name, name) == 0)
9337 static struct table_type *
9338 table_type_resolve(struct rte_swx_pipeline *p,
9339 const char *recommended_type_name,
9340 enum rte_swx_table_match_type match_type)
9342 struct table_type *elem;
9344 /* Only consider the recommended type if the match type is correct. */
9345 if (recommended_type_name)
9346 TAILQ_FOREACH(elem, &p->table_types, node)
9347 if (!strcmp(elem->name, recommended_type_name) &&
9348 (elem->match_type == match_type))
9351 /* Ignore the recommended type and get the first element with this match
9354 TAILQ_FOREACH(elem, &p->table_types, node)
9355 if (elem->match_type == match_type)
9361 static struct table *
9362 table_find(struct rte_swx_pipeline *p, const char *name)
9366 TAILQ_FOREACH(elem, &p->tables, node)
9367 if (strcmp(elem->name, name) == 0)
9373 static struct table *
9374 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9376 struct table *table = NULL;
9378 TAILQ_FOREACH(table, &p->tables, node)
9379 if (table->id == id)
9386 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
9388 enum rte_swx_table_match_type match_type,
9389 struct rte_swx_table_ops *ops)
9391 struct table_type *elem;
9395 CHECK_NAME(name, EINVAL);
9396 CHECK(!table_type_find(p, name), EEXIST);
9399 CHECK(ops->create, EINVAL);
9400 CHECK(ops->lkp, EINVAL);
9401 CHECK(ops->free, EINVAL);
9403 /* Node allocation. */
9404 elem = calloc(1, sizeof(struct table_type));
9405 CHECK(elem, ENOMEM);
9407 /* Node initialization. */
9408 strcpy(elem->name, name);
9409 elem->match_type = match_type;
9410 memcpy(&elem->ops, ops, sizeof(*ops));
9412 /* Node add to tailq. */
9413 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
9419 table_match_type_resolve(struct rte_swx_match_field_params *fields,
9421 enum rte_swx_table_match_type *match_type)
9423 uint32_t n_fields_em = 0, n_fields_lpm = 0, i;
9425 for (i = 0; i < n_fields; i++) {
9426 struct rte_swx_match_field_params *f = &fields[i];
9428 if (f->match_type == RTE_SWX_TABLE_MATCH_EXACT)
9431 if (f->match_type == RTE_SWX_TABLE_MATCH_LPM)
9435 if ((n_fields_lpm > 1) ||
9436 (n_fields_lpm && (n_fields_em != n_fields - 1)))
9439 *match_type = (n_fields_em == n_fields) ?
9440 RTE_SWX_TABLE_MATCH_EXACT :
9441 RTE_SWX_TABLE_MATCH_WILDCARD;
9447 table_match_fields_check(struct rte_swx_pipeline *p,
9448 struct rte_swx_pipeline_table_params *params,
9449 struct header **header)
9451 struct header *h0 = NULL;
9452 struct field *hf, *mf;
9453 uint32_t *offset = NULL, i;
9456 /* Return if no match fields. */
9457 if (!params->n_fields) {
9458 if (params->fields) {
9469 /* Memory allocation. */
9470 offset = calloc(params->n_fields, sizeof(uint32_t));
9476 /* Check that all the match fields belong to either the same header or
9479 hf = header_field_parse(p, params->fields[0].name, &h0);
9480 mf = metadata_field_parse(p, params->fields[0].name);
9481 if ((!hf && !mf) || (hf && hf->var_size)) {
9486 offset[0] = h0 ? hf->offset : mf->offset;
9488 for (i = 1; i < params->n_fields; i++)
9492 hf = header_field_parse(p, params->fields[i].name, &h);
9493 if (!hf || (h->id != h0->id) || hf->var_size) {
9498 offset[i] = hf->offset;
9500 mf = metadata_field_parse(p, params->fields[i].name);
9506 offset[i] = mf->offset;
9509 /* Check that there are no duplicated match fields. */
9510 for (i = 0; i < params->n_fields; i++) {
9513 for (j = 0; j < i; j++)
9514 if (offset[j] == offset[i]) {
9530 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
9532 struct rte_swx_pipeline_table_params *params,
9533 const char *recommended_table_type_name,
9537 struct table_type *type;
9539 struct action *default_action;
9540 struct header *header = NULL;
9541 uint32_t action_data_size_max = 0, i;
9546 CHECK_NAME(name, EINVAL);
9547 CHECK(!table_find(p, name), EEXIST);
9548 CHECK(!selector_find(p, name), EEXIST);
9550 CHECK(params, EINVAL);
9553 status = table_match_fields_check(p, params, &header);
9557 /* Action checks. */
9558 CHECK(params->n_actions, EINVAL);
9559 CHECK(params->action_names, EINVAL);
9560 for (i = 0; i < params->n_actions; i++) {
9561 const char *action_name = params->action_names[i];
9563 uint32_t action_data_size;
9565 CHECK_NAME(action_name, EINVAL);
9567 a = action_find(p, action_name);
9570 action_data_size = a->st ? a->st->n_bits / 8 : 0;
9571 if (action_data_size > action_data_size_max)
9572 action_data_size_max = action_data_size;
9575 CHECK_NAME(params->default_action_name, EINVAL);
9576 for (i = 0; i < p->n_actions; i++)
9577 if (!strcmp(params->action_names[i],
9578 params->default_action_name))
9580 CHECK(i < params->n_actions, EINVAL);
9581 default_action = action_find(p, params->default_action_name);
9582 CHECK((default_action->st && params->default_action_data) ||
9583 !params->default_action_data, EINVAL);
9585 /* Table type checks. */
9586 if (recommended_table_type_name)
9587 CHECK_NAME(recommended_table_type_name, EINVAL);
9589 if (params->n_fields) {
9590 enum rte_swx_table_match_type match_type;
9592 status = table_match_type_resolve(params->fields, params->n_fields, &match_type);
9596 type = table_type_resolve(p, recommended_table_type_name, match_type);
9597 CHECK(type, EINVAL);
9602 /* Memory allocation. */
9603 t = calloc(1, sizeof(struct table));
9606 t->fields = calloc(params->n_fields, sizeof(struct match_field));
9612 t->actions = calloc(params->n_actions, sizeof(struct action *));
9619 if (action_data_size_max) {
9620 t->default_action_data = calloc(1, action_data_size_max);
9621 if (!t->default_action_data) {
9629 /* Node initialization. */
9630 strcpy(t->name, name);
9631 if (args && args[0])
9632 strcpy(t->args, args);
9635 for (i = 0; i < params->n_fields; i++) {
9636 struct rte_swx_match_field_params *field = ¶ms->fields[i];
9637 struct match_field *f = &t->fields[i];
9639 f->match_type = field->match_type;
9641 header_field_parse(p, field->name, NULL) :
9642 metadata_field_parse(p, field->name);
9644 t->n_fields = params->n_fields;
9647 for (i = 0; i < params->n_actions; i++)
9648 t->actions[i] = action_find(p, params->action_names[i]);
9649 t->default_action = default_action;
9650 if (default_action->st)
9651 memcpy(t->default_action_data,
9652 params->default_action_data,
9653 default_action->st->n_bits / 8);
9654 t->n_actions = params->n_actions;
9655 t->default_action_is_const = params->default_action_is_const;
9656 t->action_data_size_max = action_data_size_max;
9659 t->id = p->n_tables;
9661 /* Node add to tailq. */
9662 TAILQ_INSERT_TAIL(&p->tables, t, node);
9668 static struct rte_swx_table_params *
9669 table_params_get(struct table *table)
9671 struct rte_swx_table_params *params;
9672 struct field *first, *last;
9674 uint32_t key_size, key_offset, action_data_size, i;
9676 /* Memory allocation. */
9677 params = calloc(1, sizeof(struct rte_swx_table_params));
9681 /* Find first (smallest offset) and last (biggest offset) match fields. */
9682 first = table->fields[0].field;
9683 last = table->fields[0].field;
9685 for (i = 0; i < table->n_fields; i++) {
9686 struct field *f = table->fields[i].field;
9688 if (f->offset < first->offset)
9691 if (f->offset > last->offset)
9695 /* Key offset and size. */
9696 key_offset = first->offset / 8;
9697 key_size = (last->offset + last->n_bits - first->offset) / 8;
9699 /* Memory allocation. */
9700 key_mask = calloc(1, key_size);
9707 for (i = 0; i < table->n_fields; i++) {
9708 struct field *f = table->fields[i].field;
9709 uint32_t start = (f->offset - first->offset) / 8;
9710 size_t size = f->n_bits / 8;
9712 memset(&key_mask[start], 0xFF, size);
9715 /* Action data size. */
9716 action_data_size = 0;
9717 for (i = 0; i < table->n_actions; i++) {
9718 struct action *action = table->actions[i];
9719 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
9721 if (ads > action_data_size)
9722 action_data_size = ads;
9726 params->match_type = table->type->match_type;
9727 params->key_size = key_size;
9728 params->key_offset = key_offset;
9729 params->key_mask0 = key_mask;
9730 params->action_data_size = action_data_size;
9731 params->n_keys_max = table->size;
9737 table_params_free(struct rte_swx_table_params *params)
9742 free(params->key_mask0);
9747 table_stub_lkp(void *table __rte_unused,
9748 void *mailbox __rte_unused,
9749 uint8_t **key __rte_unused,
9750 uint64_t *action_id __rte_unused,
9751 uint8_t **action_data __rte_unused,
9755 return 1; /* DONE. */
9759 table_build(struct rte_swx_pipeline *p)
9763 /* Per pipeline: table statistics. */
9764 p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics));
9765 CHECK(p->table_stats, ENOMEM);
9767 for (i = 0; i < p->n_tables; i++) {
9768 p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t));
9769 CHECK(p->table_stats[i].n_pkts_action, ENOMEM);
9772 /* Per thread: table runt-time. */
9773 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9774 struct thread *t = &p->threads[i];
9775 struct table *table;
9777 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
9778 CHECK(t->tables, ENOMEM);
9780 TAILQ_FOREACH(table, &p->tables, node) {
9781 struct table_runtime *r = &t->tables[table->id];
9786 size = table->type->ops.mailbox_size_get();
9789 r->func = table->type->ops.lkp;
9793 r->mailbox = calloc(1, size);
9794 CHECK(r->mailbox, ENOMEM);
9798 r->key = table->header ?
9799 &t->structs[table->header->struct_id] :
9800 &t->structs[p->metadata_struct_id];
9802 r->func = table_stub_lkp;
9811 table_build_free(struct rte_swx_pipeline *p)
9815 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9816 struct thread *t = &p->threads[i];
9822 for (j = 0; j < p->n_tables; j++) {
9823 struct table_runtime *r = &t->tables[j];
9832 if (p->table_stats) {
9833 for (i = 0; i < p->n_tables; i++)
9834 free(p->table_stats[i].n_pkts_action);
9836 free(p->table_stats);
9841 table_free(struct rte_swx_pipeline *p)
9843 table_build_free(p);
9849 elem = TAILQ_FIRST(&p->tables);
9853 TAILQ_REMOVE(&p->tables, elem, node);
9855 free(elem->actions);
9856 free(elem->default_action_data);
9862 struct table_type *elem;
9864 elem = TAILQ_FIRST(&p->table_types);
9868 TAILQ_REMOVE(&p->table_types, elem, node);
9876 static struct selector *
9877 selector_find(struct rte_swx_pipeline *p, const char *name)
9881 TAILQ_FOREACH(s, &p->selectors, node)
9882 if (strcmp(s->name, name) == 0)
9888 static struct selector *
9889 selector_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9891 struct selector *s = NULL;
9893 TAILQ_FOREACH(s, &p->selectors, node)
9901 selector_fields_check(struct rte_swx_pipeline *p,
9902 struct rte_swx_pipeline_selector_params *params,
9903 struct header **header)
9905 struct header *h0 = NULL;
9906 struct field *hf, *mf;
9909 /* Return if no selector fields. */
9910 if (!params->n_selector_fields || !params->selector_field_names)
9913 /* Check that all the selector fields either belong to the same header
9914 * or are all meta-data fields.
9916 hf = header_field_parse(p, params->selector_field_names[0], &h0);
9917 mf = metadata_field_parse(p, params->selector_field_names[0]);
9921 for (i = 1; i < params->n_selector_fields; i++)
9925 hf = header_field_parse(p, params->selector_field_names[i], &h);
9926 if (!hf || (h->id != h0->id))
9929 mf = metadata_field_parse(p, params->selector_field_names[i]);
9934 /* Check that there are no duplicated match fields. */
9935 for (i = 0; i < params->n_selector_fields; i++) {
9936 const char *field_name = params->selector_field_names[i];
9939 for (j = i + 1; j < params->n_selector_fields; j++)
9940 if (!strcmp(params->selector_field_names[j], field_name))
9952 rte_swx_pipeline_selector_config(struct rte_swx_pipeline *p,
9954 struct rte_swx_pipeline_selector_params *params)
9957 struct header *selector_header = NULL;
9958 struct field *group_id_field, *member_id_field;
9964 CHECK_NAME(name, EINVAL);
9965 CHECK(!table_find(p, name), EEXIST);
9966 CHECK(!selector_find(p, name), EEXIST);
9968 CHECK(params, EINVAL);
9970 CHECK_NAME(params->group_id_field_name, EINVAL);
9971 group_id_field = metadata_field_parse(p, params->group_id_field_name);
9972 CHECK(group_id_field, EINVAL);
9974 for (i = 0; i < params->n_selector_fields; i++) {
9975 const char *field_name = params->selector_field_names[i];
9977 CHECK_NAME(field_name, EINVAL);
9979 status = selector_fields_check(p, params, &selector_header);
9983 CHECK_NAME(params->member_id_field_name, EINVAL);
9984 member_id_field = metadata_field_parse(p, params->member_id_field_name);
9985 CHECK(member_id_field, EINVAL);
9987 CHECK(params->n_groups_max, EINVAL);
9989 CHECK(params->n_members_per_group_max, EINVAL);
9991 /* Memory allocation. */
9992 s = calloc(1, sizeof(struct selector));
9998 s->selector_fields = calloc(params->n_selector_fields, sizeof(struct field *));
9999 if (!s->selector_fields) {
10004 /* Node initialization. */
10005 strcpy(s->name, name);
10007 s->group_id_field = group_id_field;
10009 for (i = 0; i < params->n_selector_fields; i++) {
10010 const char *field_name = params->selector_field_names[i];
10012 s->selector_fields[i] = selector_header ?
10013 header_field_parse(p, field_name, NULL) :
10014 metadata_field_parse(p, field_name);
10017 s->n_selector_fields = params->n_selector_fields;
10019 s->selector_header = selector_header;
10021 s->member_id_field = member_id_field;
10023 s->n_groups_max = params->n_groups_max;
10025 s->n_members_per_group_max = params->n_members_per_group_max;
10027 s->id = p->n_selectors;
10029 /* Node add to tailq. */
10030 TAILQ_INSERT_TAIL(&p->selectors, s, node);
10039 free(s->selector_fields);
10047 selector_params_free(struct rte_swx_table_selector_params *params)
10052 free(params->selector_mask);
10057 static struct rte_swx_table_selector_params *
10058 selector_table_params_get(struct selector *s)
10060 struct rte_swx_table_selector_params *params = NULL;
10061 struct field *first, *last;
10064 /* Memory allocation. */
10065 params = calloc(1, sizeof(struct rte_swx_table_selector_params));
10070 params->group_id_offset = s->group_id_field->offset / 8;
10072 /* Find first (smallest offset) and last (biggest offset) selector fields. */
10073 first = s->selector_fields[0];
10074 last = s->selector_fields[0];
10076 for (i = 0; i < s->n_selector_fields; i++) {
10077 struct field *f = s->selector_fields[i];
10079 if (f->offset < first->offset)
10082 if (f->offset > last->offset)
10086 /* Selector offset and size. */
10087 params->selector_offset = first->offset / 8;
10088 params->selector_size = (last->offset + last->n_bits - first->offset) / 8;
10090 /* Memory allocation. */
10091 params->selector_mask = calloc(1, params->selector_size);
10092 if (!params->selector_mask)
10095 /* Selector mask. */
10096 for (i = 0; i < s->n_selector_fields; i++) {
10097 struct field *f = s->selector_fields[i];
10098 uint32_t start = (f->offset - first->offset) / 8;
10099 size_t size = f->n_bits / 8;
10101 memset(¶ms->selector_mask[start], 0xFF, size);
10105 params->member_id_offset = s->member_id_field->offset / 8;
10107 /* Maximum number of groups. */
10108 params->n_groups_max = s->n_groups_max;
10110 /* Maximum number of members per group. */
10111 params->n_members_per_group_max = s->n_members_per_group_max;
10116 selector_params_free(params);
10121 selector_build_free(struct rte_swx_pipeline *p)
10125 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10126 struct thread *t = &p->threads[i];
10132 for (j = 0; j < p->n_selectors; j++) {
10133 struct selector_runtime *r = &t->selectors[j];
10138 free(t->selectors);
10139 t->selectors = NULL;
10142 free(p->selector_stats);
10143 p->selector_stats = NULL;
10147 selector_build(struct rte_swx_pipeline *p)
10152 /* Per pipeline: selector statistics. */
10153 p->selector_stats = calloc(p->n_selectors, sizeof(struct selector_statistics));
10154 if (!p->selector_stats) {
10159 /* Per thread: selector run-time. */
10160 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10161 struct thread *t = &p->threads[i];
10162 struct selector *s;
10164 t->selectors = calloc(p->n_selectors, sizeof(struct selector_runtime));
10165 if (!t->selectors) {
10170 TAILQ_FOREACH(s, &p->selectors, node) {
10171 struct selector_runtime *r = &t->selectors[s->id];
10175 size = rte_swx_table_selector_mailbox_size_get();
10177 r->mailbox = calloc(1, size);
10184 /* r->group_id_buffer. */
10185 r->group_id_buffer = &t->structs[p->metadata_struct_id];
10187 /* r->selector_buffer. */
10188 r->selector_buffer = s->selector_header ?
10189 &t->structs[s->selector_header->struct_id] :
10190 &t->structs[p->metadata_struct_id];
10192 /* r->member_id_buffer. */
10193 r->member_id_buffer = &t->structs[p->metadata_struct_id];
10200 selector_build_free(p);
10205 selector_free(struct rte_swx_pipeline *p)
10207 selector_build_free(p);
10209 /* Selector tables. */
10211 struct selector *elem;
10213 elem = TAILQ_FIRST(&p->selectors);
10217 TAILQ_REMOVE(&p->selectors, elem, node);
10218 free(elem->selector_fields);
10227 table_state_build(struct rte_swx_pipeline *p)
10229 struct table *table;
10230 struct selector *s;
10232 p->table_state = calloc(p->n_tables + p->n_selectors,
10233 sizeof(struct rte_swx_table_state));
10234 CHECK(p->table_state, ENOMEM);
10236 TAILQ_FOREACH(table, &p->tables, node) {
10237 struct rte_swx_table_state *ts = &p->table_state[table->id];
10240 struct rte_swx_table_params *params;
10243 params = table_params_get(table);
10244 CHECK(params, ENOMEM);
10246 ts->obj = table->type->ops.create(params,
10251 table_params_free(params);
10252 CHECK(ts->obj, ENODEV);
10255 /* ts->default_action_data. */
10256 if (table->action_data_size_max) {
10257 ts->default_action_data =
10258 malloc(table->action_data_size_max);
10259 CHECK(ts->default_action_data, ENOMEM);
10261 memcpy(ts->default_action_data,
10262 table->default_action_data,
10263 table->action_data_size_max);
10266 /* ts->default_action_id. */
10267 ts->default_action_id = table->default_action->id;
10270 TAILQ_FOREACH(s, &p->selectors, node) {
10271 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + s->id];
10272 struct rte_swx_table_selector_params *params;
10275 params = selector_table_params_get(s);
10276 CHECK(params, ENOMEM);
10278 ts->obj = rte_swx_table_selector_create(params, NULL, p->numa_node);
10280 selector_params_free(params);
10281 CHECK(ts->obj, ENODEV);
10288 table_state_build_free(struct rte_swx_pipeline *p)
10292 if (!p->table_state)
10295 for (i = 0; i < p->n_tables; i++) {
10296 struct rte_swx_table_state *ts = &p->table_state[i];
10297 struct table *table = table_find_by_id(p, i);
10300 if (table->type && ts->obj)
10301 table->type->ops.free(ts->obj);
10303 /* ts->default_action_data. */
10304 free(ts->default_action_data);
10307 for (i = 0; i < p->n_selectors; i++) {
10308 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + i];
10312 rte_swx_table_selector_free(ts->obj);
10315 free(p->table_state);
10316 p->table_state = NULL;
10320 table_state_free(struct rte_swx_pipeline *p)
10322 table_state_build_free(p);
10328 static struct regarray *
10329 regarray_find(struct rte_swx_pipeline *p, const char *name)
10331 struct regarray *elem;
10333 TAILQ_FOREACH(elem, &p->regarrays, node)
10334 if (!strcmp(elem->name, name))
10340 static struct regarray *
10341 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10343 struct regarray *elem = NULL;
10345 TAILQ_FOREACH(elem, &p->regarrays, node)
10346 if (elem->id == id)
10353 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p,
10358 struct regarray *r;
10362 CHECK_NAME(name, EINVAL);
10363 CHECK(!regarray_find(p, name), EEXIST);
10365 CHECK(size, EINVAL);
10366 size = rte_align32pow2(size);
10368 /* Memory allocation. */
10369 r = calloc(1, sizeof(struct regarray));
10372 /* Node initialization. */
10373 strcpy(r->name, name);
10374 r->init_val = init_val;
10376 r->id = p->n_regarrays;
10378 /* Node add to tailq. */
10379 TAILQ_INSERT_TAIL(&p->regarrays, r, node);
10386 regarray_build(struct rte_swx_pipeline *p)
10388 struct regarray *regarray;
10390 if (!p->n_regarrays)
10393 p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime));
10394 CHECK(p->regarray_runtime, ENOMEM);
10396 TAILQ_FOREACH(regarray, &p->regarrays, node) {
10397 struct regarray_runtime *r = &p->regarray_runtime[regarray->id];
10400 r->regarray = env_malloc(regarray->size * sizeof(uint64_t),
10401 RTE_CACHE_LINE_SIZE,
10403 CHECK(r->regarray, ENOMEM);
10405 if (regarray->init_val)
10406 for (i = 0; i < regarray->size; i++)
10407 r->regarray[i] = regarray->init_val;
10409 r->size_mask = regarray->size - 1;
10416 regarray_build_free(struct rte_swx_pipeline *p)
10420 if (!p->regarray_runtime)
10423 for (i = 0; i < p->n_regarrays; i++) {
10424 struct regarray *regarray = regarray_find_by_id(p, i);
10425 struct regarray_runtime *r = &p->regarray_runtime[i];
10427 env_free(r->regarray, regarray->size * sizeof(uint64_t));
10430 free(p->regarray_runtime);
10431 p->regarray_runtime = NULL;
10435 regarray_free(struct rte_swx_pipeline *p)
10437 regarray_build_free(p);
10440 struct regarray *elem;
10442 elem = TAILQ_FIRST(&p->regarrays);
10446 TAILQ_REMOVE(&p->regarrays, elem, node);
10454 static struct meter_profile *
10455 meter_profile_find(struct rte_swx_pipeline *p, const char *name)
10457 struct meter_profile *elem;
10459 TAILQ_FOREACH(elem, &p->meter_profiles, node)
10460 if (!strcmp(elem->name, name))
10466 static struct metarray *
10467 metarray_find(struct rte_swx_pipeline *p, const char *name)
10469 struct metarray *elem;
10471 TAILQ_FOREACH(elem, &p->metarrays, node)
10472 if (!strcmp(elem->name, name))
10478 static struct metarray *
10479 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10481 struct metarray *elem = NULL;
10483 TAILQ_FOREACH(elem, &p->metarrays, node)
10484 if (elem->id == id)
10491 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p,
10495 struct metarray *m;
10499 CHECK_NAME(name, EINVAL);
10500 CHECK(!metarray_find(p, name), EEXIST);
10502 CHECK(size, EINVAL);
10503 size = rte_align32pow2(size);
10505 /* Memory allocation. */
10506 m = calloc(1, sizeof(struct metarray));
10509 /* Node initialization. */
10510 strcpy(m->name, name);
10512 m->id = p->n_metarrays;
10514 /* Node add to tailq. */
10515 TAILQ_INSERT_TAIL(&p->metarrays, m, node);
10521 struct meter_profile meter_profile_default = {
10530 .cir_bytes_per_period = 1,
10532 .pir_bytes_per_period = 1,
10539 meter_init(struct meter *m)
10541 memset(m, 0, sizeof(struct meter));
10542 rte_meter_trtcm_config(&m->m, &meter_profile_default.profile);
10543 m->profile = &meter_profile_default;
10544 m->color_mask = RTE_COLOR_GREEN;
10546 meter_profile_default.n_users++;
10550 metarray_build(struct rte_swx_pipeline *p)
10552 struct metarray *m;
10554 if (!p->n_metarrays)
10557 p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime));
10558 CHECK(p->metarray_runtime, ENOMEM);
10560 TAILQ_FOREACH(m, &p->metarrays, node) {
10561 struct metarray_runtime *r = &p->metarray_runtime[m->id];
10564 r->metarray = env_malloc(m->size * sizeof(struct meter),
10565 RTE_CACHE_LINE_SIZE,
10567 CHECK(r->metarray, ENOMEM);
10569 for (i = 0; i < m->size; i++)
10570 meter_init(&r->metarray[i]);
10572 r->size_mask = m->size - 1;
10579 metarray_build_free(struct rte_swx_pipeline *p)
10583 if (!p->metarray_runtime)
10586 for (i = 0; i < p->n_metarrays; i++) {
10587 struct metarray *m = metarray_find_by_id(p, i);
10588 struct metarray_runtime *r = &p->metarray_runtime[i];
10590 env_free(r->metarray, m->size * sizeof(struct meter));
10593 free(p->metarray_runtime);
10594 p->metarray_runtime = NULL;
10598 metarray_free(struct rte_swx_pipeline *p)
10600 metarray_build_free(p);
10602 /* Meter arrays. */
10604 struct metarray *elem;
10606 elem = TAILQ_FIRST(&p->metarrays);
10610 TAILQ_REMOVE(&p->metarrays, elem, node);
10614 /* Meter profiles. */
10616 struct meter_profile *elem;
10618 elem = TAILQ_FIRST(&p->meter_profiles);
10622 TAILQ_REMOVE(&p->meter_profiles, elem, node);
10631 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
10633 struct rte_swx_pipeline *pipeline;
10635 /* Check input parameters. */
10638 /* Memory allocation. */
10639 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
10640 CHECK(pipeline, ENOMEM);
10642 /* Initialization. */
10643 TAILQ_INIT(&pipeline->struct_types);
10644 TAILQ_INIT(&pipeline->port_in_types);
10645 TAILQ_INIT(&pipeline->ports_in);
10646 TAILQ_INIT(&pipeline->port_out_types);
10647 TAILQ_INIT(&pipeline->ports_out);
10648 TAILQ_INIT(&pipeline->extern_types);
10649 TAILQ_INIT(&pipeline->extern_objs);
10650 TAILQ_INIT(&pipeline->extern_funcs);
10651 TAILQ_INIT(&pipeline->headers);
10652 TAILQ_INIT(&pipeline->actions);
10653 TAILQ_INIT(&pipeline->table_types);
10654 TAILQ_INIT(&pipeline->tables);
10655 TAILQ_INIT(&pipeline->selectors);
10656 TAILQ_INIT(&pipeline->regarrays);
10657 TAILQ_INIT(&pipeline->meter_profiles);
10658 TAILQ_INIT(&pipeline->metarrays);
10660 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
10661 pipeline->numa_node = numa_node;
10668 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
10673 free(p->instructions);
10677 table_state_free(p);
10683 extern_func_free(p);
10684 extern_obj_free(p);
10693 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
10694 const char **instructions,
10695 uint32_t n_instructions)
10700 err = instruction_config(p, NULL, instructions, n_instructions);
10704 /* Thread instruction pointer reset. */
10705 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10706 struct thread *t = &p->threads[i];
10708 thread_ip_reset(p, t);
10715 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
10720 CHECK(p->build_done == 0, EEXIST);
10722 status = port_in_build(p);
10726 status = port_out_build(p);
10730 status = struct_build(p);
10734 status = extern_obj_build(p);
10738 status = extern_func_build(p);
10742 status = header_build(p);
10746 status = metadata_build(p);
10750 status = action_build(p);
10754 status = table_build(p);
10758 status = selector_build(p);
10762 status = table_state_build(p);
10766 status = regarray_build(p);
10770 status = metarray_build(p);
10778 metarray_build_free(p);
10779 regarray_build_free(p);
10780 table_state_build_free(p);
10781 selector_build_free(p);
10782 table_build_free(p);
10783 action_build_free(p);
10784 metadata_build_free(p);
10785 header_build_free(p);
10786 extern_func_build_free(p);
10787 extern_obj_build_free(p);
10788 port_out_build_free(p);
10789 port_in_build_free(p);
10790 struct_build_free(p);
10796 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
10800 for (i = 0; i < n_instructions; i++)
10805 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
10809 for (i = 0; i < p->n_ports_out; i++) {
10810 struct port_out_runtime *port = &p->out[i];
10813 port->flush(port->obj);
10821 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
10822 struct rte_swx_ctl_pipeline_info *pipeline)
10824 struct action *action;
10825 struct table *table;
10826 uint32_t n_actions = 0, n_tables = 0;
10828 if (!p || !pipeline)
10831 TAILQ_FOREACH(action, &p->actions, node)
10834 TAILQ_FOREACH(table, &p->tables, node)
10837 pipeline->n_ports_in = p->n_ports_in;
10838 pipeline->n_ports_out = p->n_ports_out;
10839 pipeline->n_actions = n_actions;
10840 pipeline->n_tables = n_tables;
10841 pipeline->n_selectors = p->n_selectors;
10842 pipeline->n_regarrays = p->n_regarrays;
10843 pipeline->n_metarrays = p->n_metarrays;
10849 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
10851 if (!p || !numa_node)
10854 *numa_node = p->numa_node;
10859 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
10860 uint32_t action_id,
10861 struct rte_swx_ctl_action_info *action)
10863 struct action *a = NULL;
10865 if (!p || (action_id >= p->n_actions) || !action)
10868 a = action_find_by_id(p, action_id);
10872 strcpy(action->name, a->name);
10873 action->n_args = a->st ? a->st->n_fields : 0;
10878 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
10879 uint32_t action_id,
10880 uint32_t action_arg_id,
10881 struct rte_swx_ctl_action_arg_info *action_arg)
10883 struct action *a = NULL;
10884 struct field *arg = NULL;
10886 if (!p || (action_id >= p->n_actions) || !action_arg)
10889 a = action_find_by_id(p, action_id);
10890 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
10893 arg = &a->st->fields[action_arg_id];
10894 strcpy(action_arg->name, arg->name);
10895 action_arg->n_bits = arg->n_bits;
10896 action_arg->is_network_byte_order = a->args_endianness[action_arg_id];
10902 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
10904 struct rte_swx_ctl_table_info *table)
10906 struct table *t = NULL;
10911 t = table_find_by_id(p, table_id);
10915 strcpy(table->name, t->name);
10916 strcpy(table->args, t->args);
10917 table->n_match_fields = t->n_fields;
10918 table->n_actions = t->n_actions;
10919 table->default_action_is_const = t->default_action_is_const;
10920 table->size = t->size;
10925 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
10927 uint32_t match_field_id,
10928 struct rte_swx_ctl_table_match_field_info *match_field)
10931 struct match_field *f;
10933 if (!p || (table_id >= p->n_tables) || !match_field)
10936 t = table_find_by_id(p, table_id);
10937 if (!t || (match_field_id >= t->n_fields))
10940 f = &t->fields[match_field_id];
10941 match_field->match_type = f->match_type;
10942 match_field->is_header = t->header ? 1 : 0;
10943 match_field->n_bits = f->field->n_bits;
10944 match_field->offset = f->field->offset;
10950 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
10952 uint32_t table_action_id,
10953 struct rte_swx_ctl_table_action_info *table_action)
10957 if (!p || (table_id >= p->n_tables) || !table_action)
10960 t = table_find_by_id(p, table_id);
10961 if (!t || (table_action_id >= t->n_actions))
10964 table_action->action_id = t->actions[table_action_id]->id;
10970 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
10972 struct rte_swx_table_ops *table_ops,
10977 if (!p || (table_id >= p->n_tables))
10980 t = table_find_by_id(p, table_id);
10986 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
10996 rte_swx_ctl_selector_info_get(struct rte_swx_pipeline *p,
10997 uint32_t selector_id,
10998 struct rte_swx_ctl_selector_info *selector)
11000 struct selector *s = NULL;
11002 if (!p || !selector)
11005 s = selector_find_by_id(p, selector_id);
11009 strcpy(selector->name, s->name);
11011 selector->n_selector_fields = s->n_selector_fields;
11012 selector->n_groups_max = s->n_groups_max;
11013 selector->n_members_per_group_max = s->n_members_per_group_max;
11019 rte_swx_ctl_selector_group_id_field_info_get(struct rte_swx_pipeline *p,
11020 uint32_t selector_id,
11021 struct rte_swx_ctl_table_match_field_info *field)
11023 struct selector *s;
11025 if (!p || (selector_id >= p->n_selectors) || !field)
11028 s = selector_find_by_id(p, selector_id);
11032 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
11033 field->is_header = 0;
11034 field->n_bits = s->group_id_field->n_bits;
11035 field->offset = s->group_id_field->offset;
11041 rte_swx_ctl_selector_field_info_get(struct rte_swx_pipeline *p,
11042 uint32_t selector_id,
11043 uint32_t selector_field_id,
11044 struct rte_swx_ctl_table_match_field_info *field)
11046 struct selector *s;
11049 if (!p || (selector_id >= p->n_selectors) || !field)
11052 s = selector_find_by_id(p, selector_id);
11053 if (!s || (selector_field_id >= s->n_selector_fields))
11056 f = s->selector_fields[selector_field_id];
11057 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
11058 field->is_header = s->selector_header ? 1 : 0;
11059 field->n_bits = f->n_bits;
11060 field->offset = f->offset;
11066 rte_swx_ctl_selector_member_id_field_info_get(struct rte_swx_pipeline *p,
11067 uint32_t selector_id,
11068 struct rte_swx_ctl_table_match_field_info *field)
11070 struct selector *s;
11072 if (!p || (selector_id >= p->n_selectors) || !field)
11075 s = selector_find_by_id(p, selector_id);
11079 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
11080 field->is_header = 0;
11081 field->n_bits = s->member_id_field->n_bits;
11082 field->offset = s->member_id_field->offset;
11088 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
11089 struct rte_swx_table_state **table_state)
11091 if (!p || !table_state || !p->build_done)
11094 *table_state = p->table_state;
11099 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
11100 struct rte_swx_table_state *table_state)
11102 if (!p || !table_state || !p->build_done)
11105 p->table_state = table_state;
11110 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
11112 struct rte_swx_port_in_stats *stats)
11114 struct port_in *port;
11119 port = port_in_find(p, port_id);
11123 port->type->ops.stats_read(port->obj, stats);
11128 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
11130 struct rte_swx_port_out_stats *stats)
11132 struct port_out *port;
11137 port = port_out_find(p, port_id);
11141 port->type->ops.stats_read(port->obj, stats);
11146 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p,
11147 const char *table_name,
11148 struct rte_swx_table_stats *stats)
11150 struct table *table;
11151 struct table_statistics *table_stats;
11153 if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action)
11156 table = table_find(p, table_name);
11160 table_stats = &p->table_stats[table->id];
11162 memcpy(stats->n_pkts_action,
11163 table_stats->n_pkts_action,
11164 p->n_actions * sizeof(uint64_t));
11166 stats->n_pkts_hit = table_stats->n_pkts_hit[1];
11167 stats->n_pkts_miss = table_stats->n_pkts_hit[0];
11173 rte_swx_ctl_pipeline_selector_stats_read(struct rte_swx_pipeline *p,
11174 const char *selector_name,
11175 struct rte_swx_pipeline_selector_stats *stats)
11177 struct selector *s;
11179 if (!p || !selector_name || !selector_name[0] || !stats)
11182 s = selector_find(p, selector_name);
11186 stats->n_pkts = p->selector_stats[s->id].n_pkts;
11192 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p,
11193 uint32_t regarray_id,
11194 struct rte_swx_ctl_regarray_info *regarray)
11196 struct regarray *r;
11198 if (!p || !regarray)
11201 r = regarray_find_by_id(p, regarray_id);
11205 strcpy(regarray->name, r->name);
11206 regarray->size = r->size;
11211 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p,
11212 const char *regarray_name,
11213 uint32_t regarray_index,
11216 struct regarray *regarray;
11217 struct regarray_runtime *r;
11219 if (!p || !regarray_name || !value)
11222 regarray = regarray_find(p, regarray_name);
11223 if (!regarray || (regarray_index >= regarray->size))
11226 r = &p->regarray_runtime[regarray->id];
11227 *value = r->regarray[regarray_index];
11232 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p,
11233 const char *regarray_name,
11234 uint32_t regarray_index,
11237 struct regarray *regarray;
11238 struct regarray_runtime *r;
11240 if (!p || !regarray_name)
11243 regarray = regarray_find(p, regarray_name);
11244 if (!regarray || (regarray_index >= regarray->size))
11247 r = &p->regarray_runtime[regarray->id];
11248 r->regarray[regarray_index] = value;
11253 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p,
11254 uint32_t metarray_id,
11255 struct rte_swx_ctl_metarray_info *metarray)
11257 struct metarray *m;
11259 if (!p || !metarray)
11262 m = metarray_find_by_id(p, metarray_id);
11266 strcpy(metarray->name, m->name);
11267 metarray->size = m->size;
11272 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p,
11274 struct rte_meter_trtcm_params *params)
11276 struct meter_profile *mp;
11280 CHECK_NAME(name, EINVAL);
11281 CHECK(params, EINVAL);
11282 CHECK(!meter_profile_find(p, name), EEXIST);
11284 /* Node allocation. */
11285 mp = calloc(1, sizeof(struct meter_profile));
11288 /* Node initialization. */
11289 strcpy(mp->name, name);
11290 memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params));
11291 status = rte_meter_trtcm_profile_config(&mp->profile, params);
11297 /* Node add to tailq. */
11298 TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node);
11304 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p,
11307 struct meter_profile *mp;
11310 CHECK_NAME(name, EINVAL);
11312 mp = meter_profile_find(p, name);
11314 CHECK(!mp->n_users, EBUSY);
11316 /* Remove node from tailq. */
11317 TAILQ_REMOVE(&p->meter_profiles, mp, node);
11324 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p,
11325 const char *metarray_name,
11326 uint32_t metarray_index)
11328 struct meter_profile *mp_old;
11329 struct metarray *metarray;
11330 struct metarray_runtime *metarray_runtime;
11334 CHECK_NAME(metarray_name, EINVAL);
11336 metarray = metarray_find(p, metarray_name);
11337 CHECK(metarray, EINVAL);
11338 CHECK(metarray_index < metarray->size, EINVAL);
11340 metarray_runtime = &p->metarray_runtime[metarray->id];
11341 m = &metarray_runtime->metarray[metarray_index];
11342 mp_old = m->profile;
11352 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p,
11353 const char *metarray_name,
11354 uint32_t metarray_index,
11355 const char *profile_name)
11357 struct meter_profile *mp, *mp_old;
11358 struct metarray *metarray;
11359 struct metarray_runtime *metarray_runtime;
11363 CHECK_NAME(metarray_name, EINVAL);
11365 metarray = metarray_find(p, metarray_name);
11366 CHECK(metarray, EINVAL);
11367 CHECK(metarray_index < metarray->size, EINVAL);
11369 mp = meter_profile_find(p, profile_name);
11372 metarray_runtime = &p->metarray_runtime[metarray->id];
11373 m = &metarray_runtime->metarray[metarray_index];
11374 mp_old = m->profile;
11376 memset(m, 0, sizeof(struct meter));
11377 rte_meter_trtcm_config(&m->m, &mp->profile);
11379 m->color_mask = RTE_COLORS;
11388 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p,
11389 const char *metarray_name,
11390 uint32_t metarray_index,
11391 struct rte_swx_ctl_meter_stats *stats)
11393 struct metarray *metarray;
11394 struct metarray_runtime *metarray_runtime;
11398 CHECK_NAME(metarray_name, EINVAL);
11400 metarray = metarray_find(p, metarray_name);
11401 CHECK(metarray, EINVAL);
11402 CHECK(metarray_index < metarray->size, EINVAL);
11404 CHECK(stats, EINVAL);
11406 metarray_runtime = &p->metarray_runtime[metarray->id];
11407 m = &metarray_runtime->metarray[metarray_index];
11409 memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts));
11410 memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes));