1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <arpa/inet.h>
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15 #include <rte_cycles.h>
16 #include <rte_meter.h>
18 #include "rte_swx_pipeline.h"
19 #include "rte_swx_ctl.h"
21 #define CHECK(condition, err_code) \
27 #define CHECK_NAME(name, err_code) \
30 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
33 #define CHECK_INSTRUCTION(instr, err_code) \
36 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
37 RTE_SWX_INSTRUCTION_SIZE), \
45 #define TRACE(...) printf(__VA_ARGS__)
53 #define ntoh64(x) rte_be_to_cpu_64(x)
54 #define hton64(x) rte_cpu_to_be_64(x)
56 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE
58 #include <rte_malloc.h>
61 env_malloc(size_t size, size_t alignment, int numa_node)
63 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
67 env_free(void *start, size_t size __rte_unused)
77 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
81 if (numa_available() == -1)
84 start = numa_alloc_onnode(size, numa_node);
88 memset(start, 0, size);
93 env_free(void *start, size_t size)
95 if (numa_available() == -1)
98 numa_free(start, size);
107 char name[RTE_SWX_NAME_SIZE];
113 TAILQ_ENTRY(struct_type) node;
114 char name[RTE_SWX_NAME_SIZE];
115 struct field *fields;
120 TAILQ_HEAD(struct_type_tailq, struct_type);
125 struct port_in_type {
126 TAILQ_ENTRY(port_in_type) node;
127 char name[RTE_SWX_NAME_SIZE];
128 struct rte_swx_port_in_ops ops;
131 TAILQ_HEAD(port_in_type_tailq, port_in_type);
134 TAILQ_ENTRY(port_in) node;
135 struct port_in_type *type;
140 TAILQ_HEAD(port_in_tailq, port_in);
142 struct port_in_runtime {
143 rte_swx_port_in_pkt_rx_t pkt_rx;
150 struct port_out_type {
151 TAILQ_ENTRY(port_out_type) node;
152 char name[RTE_SWX_NAME_SIZE];
153 struct rte_swx_port_out_ops ops;
156 TAILQ_HEAD(port_out_type_tailq, port_out_type);
159 TAILQ_ENTRY(port_out) node;
160 struct port_out_type *type;
165 TAILQ_HEAD(port_out_tailq, port_out);
167 struct port_out_runtime {
168 rte_swx_port_out_pkt_tx_t pkt_tx;
169 rte_swx_port_out_flush_t flush;
176 struct extern_type_member_func {
177 TAILQ_ENTRY(extern_type_member_func) node;
178 char name[RTE_SWX_NAME_SIZE];
179 rte_swx_extern_type_member_func_t func;
183 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
186 TAILQ_ENTRY(extern_type) node;
187 char name[RTE_SWX_NAME_SIZE];
188 struct struct_type *mailbox_struct_type;
189 rte_swx_extern_type_constructor_t constructor;
190 rte_swx_extern_type_destructor_t destructor;
191 struct extern_type_member_func_tailq funcs;
195 TAILQ_HEAD(extern_type_tailq, extern_type);
198 TAILQ_ENTRY(extern_obj) node;
199 char name[RTE_SWX_NAME_SIZE];
200 struct extern_type *type;
206 TAILQ_HEAD(extern_obj_tailq, extern_obj);
208 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
209 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
212 struct extern_obj_runtime {
215 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
222 TAILQ_ENTRY(extern_func) node;
223 char name[RTE_SWX_NAME_SIZE];
224 struct struct_type *mailbox_struct_type;
225 rte_swx_extern_func_t func;
230 TAILQ_HEAD(extern_func_tailq, extern_func);
232 struct extern_func_runtime {
234 rte_swx_extern_func_t func;
241 TAILQ_ENTRY(header) node;
242 char name[RTE_SWX_NAME_SIZE];
243 struct struct_type *st;
248 TAILQ_HEAD(header_tailq, header);
250 struct header_runtime {
254 struct header_out_runtime {
264 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
265 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
266 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
267 * when transferred to packet meta-data and in NBO when transferred to packet
271 /* Notation conventions:
272 * -Header field: H = h.header.field (dst/src)
273 * -Meta-data field: M = m.field (dst/src)
274 * -Extern object mailbox field: E = e.field (dst/src)
275 * -Extern function mailbox field: F = f.field (dst/src)
276 * -Table action data field: T = t.field (src only)
277 * -Immediate value: I = 32-bit unsigned value (src only)
280 enum instruction_type {
287 INSTR_TX, /* port_out = M */
288 INSTR_TX_I, /* port_out = I */
290 /* extract h.header */
311 /* validate h.header */
314 /* invalidate h.header */
315 INSTR_HDR_INVALIDATE,
319 * dst = HMEF, src = HMEFTI
321 INSTR_MOV, /* dst = MEF, src = MEFT */
322 INSTR_MOV_MH, /* dst = MEF, src = H */
323 INSTR_MOV_HM, /* dst = H, src = MEFT */
324 INSTR_MOV_HH, /* dst = H, src = H */
325 INSTR_MOV_I, /* dst = HMEF, src = I */
327 /* dma h.header t.field
328 * memcpy(h.header, t.field, sizeof(h.header))
341 * dst = HMEF, src = HMEFTI
343 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
344 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
345 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
346 INSTR_ALU_ADD_HH, /* dst = H, src = H */
347 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
348 INSTR_ALU_ADD_HI, /* dst = H, src = I */
352 * dst = HMEF, src = HMEFTI
354 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
355 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
356 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
357 INSTR_ALU_SUB_HH, /* dst = H, src = H */
358 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
359 INSTR_ALU_SUB_HI, /* dst = H, src = I */
362 * dst = dst '+ src[0:1] '+ src[2:3] + ...
363 * dst = H, src = {H, h.header}
365 INSTR_ALU_CKADD_FIELD, /* src = H */
366 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
367 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
373 INSTR_ALU_CKSUB_FIELD,
377 * dst = HMEF, src = HMEFTI
379 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
380 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
381 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
382 INSTR_ALU_AND_HH, /* dst = H, src = H */
383 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
387 * dst = HMEF, src = HMEFTI
389 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
390 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
391 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
392 INSTR_ALU_OR_HH, /* dst = H, src = H */
393 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
397 * dst = HMEF, src = HMEFTI
399 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
400 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
401 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
402 INSTR_ALU_XOR_HH, /* dst = H, src = H */
403 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
407 * dst = HMEF, src = HMEFTI
409 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
410 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
411 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
412 INSTR_ALU_SHL_HH, /* dst = H, src = H */
413 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
414 INSTR_ALU_SHL_HI, /* dst = H, src = I */
418 * dst = HMEF, src = HMEFTI
420 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
421 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
422 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
423 INSTR_ALU_SHR_HH, /* dst = H, src = H */
424 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
425 INSTR_ALU_SHR_HI, /* dst = H, src = I */
427 /* regprefetch REGARRAY index
428 * prefetch REGARRAY[index]
431 INSTR_REGPREFETCH_RH, /* index = H */
432 INSTR_REGPREFETCH_RM, /* index = MEFT */
433 INSTR_REGPREFETCH_RI, /* index = I */
435 /* regrd dst REGARRAY index
436 * dst = REGARRAY[index]
437 * dst = HMEF, index = HMEFTI
439 INSTR_REGRD_HRH, /* dst = H, index = H */
440 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
441 INSTR_REGRD_HRI, /* dst = H, index = I */
442 INSTR_REGRD_MRH, /* dst = MEF, index = H */
443 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
444 INSTR_REGRD_MRI, /* dst = MEF, index = I */
446 /* regwr REGARRAY index src
447 * REGARRAY[index] = src
448 * index = HMEFTI, src = HMEFTI
450 INSTR_REGWR_RHH, /* index = H, src = H */
451 INSTR_REGWR_RHM, /* index = H, src = MEFT */
452 INSTR_REGWR_RHI, /* index = H, src = I */
453 INSTR_REGWR_RMH, /* index = MEFT, src = H */
454 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
455 INSTR_REGWR_RMI, /* index = MEFT, src = I */
456 INSTR_REGWR_RIH, /* index = I, src = H */
457 INSTR_REGWR_RIM, /* index = I, src = MEFT */
458 INSTR_REGWR_RII, /* index = I, src = I */
460 /* regadd REGARRAY index src
461 * REGARRAY[index] += src
462 * index = HMEFTI, src = HMEFTI
464 INSTR_REGADD_RHH, /* index = H, src = H */
465 INSTR_REGADD_RHM, /* index = H, src = MEFT */
466 INSTR_REGADD_RHI, /* index = H, src = I */
467 INSTR_REGADD_RMH, /* index = MEFT, src = H */
468 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
469 INSTR_REGADD_RMI, /* index = MEFT, src = I */
470 INSTR_REGADD_RIH, /* index = I, src = H */
471 INSTR_REGADD_RIM, /* index = I, src = MEFT */
472 INSTR_REGADD_RII, /* index = I, src = I */
474 /* metprefetch METARRAY index
475 * prefetch METARRAY[index]
478 INSTR_METPREFETCH_H, /* index = H */
479 INSTR_METPREFETCH_M, /* index = MEFT */
480 INSTR_METPREFETCH_I, /* index = I */
482 /* meter METARRAY index length color_in color_out
483 * color_out = meter(METARRAY[index], length, color_in)
484 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
486 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
487 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
488 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
489 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
490 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
491 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
492 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
493 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
494 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
495 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
496 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
497 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
502 /* extern e.obj.func */
513 /* jmpv LABEL h.header
514 * Jump if header is valid
518 /* jmpnv LABEL h.header
519 * Jump if header is invalid
524 * Jump if table lookup hit
529 * Jump if table lookup miss
536 INSTR_JMP_ACTION_HIT,
538 /* jmpna LABEL ACTION
539 * Jump if action not run
541 INSTR_JMP_ACTION_MISS,
544 * Jump if a is equal to b
545 * a = HMEFT, b = HMEFTI
547 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
548 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
549 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
550 INSTR_JMP_EQ_HH, /* a = H, b = H */
551 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
554 * Jump if a is not equal to b
555 * a = HMEFT, b = HMEFTI
557 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
558 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
559 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
560 INSTR_JMP_NEQ_HH, /* a = H, b = H */
561 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
564 * Jump if a is less than b
565 * a = HMEFT, b = HMEFTI
567 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
568 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
569 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
570 INSTR_JMP_LT_HH, /* a = H, b = H */
571 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
572 INSTR_JMP_LT_HI, /* a = H, b = I */
575 * Jump if a is greater than b
576 * a = HMEFT, b = HMEFTI
578 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
579 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
580 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
581 INSTR_JMP_GT_HH, /* a = H, b = H */
582 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
583 INSTR_JMP_GT_HI, /* a = H, b = I */
591 struct instr_operand {
612 uint8_t header_id[8];
613 uint8_t struct_id[8];
618 struct instr_hdr_validity {
626 struct instr_extern_obj {
631 struct instr_extern_func {
635 struct instr_dst_src {
636 struct instr_operand dst;
638 struct instr_operand src;
643 struct instr_regarray {
648 struct instr_operand idx;
653 struct instr_operand dstsrc;
663 struct instr_operand idx;
667 struct instr_operand length;
670 struct instr_operand color_in;
671 uint32_t color_in_val;
674 struct instr_operand color_out;
679 uint8_t header_id[8];
680 uint8_t struct_id[8];
691 struct instruction *ip;
694 struct instr_operand a;
700 struct instr_operand b;
706 enum instruction_type type;
709 struct instr_hdr_validity valid;
710 struct instr_dst_src mov;
711 struct instr_regarray regarray;
712 struct instr_meter meter;
713 struct instr_dma dma;
714 struct instr_dst_src alu;
715 struct instr_table table;
716 struct instr_extern_obj ext_obj;
717 struct instr_extern_func ext_func;
718 struct instr_jmp jmp;
722 struct instruction_data {
723 char label[RTE_SWX_NAME_SIZE];
724 char jmp_label[RTE_SWX_NAME_SIZE];
725 uint32_t n_users; /* user = jmp instruction to this instruction. */
733 TAILQ_ENTRY(action) node;
734 char name[RTE_SWX_NAME_SIZE];
735 struct struct_type *st;
736 int *args_endianness; /* 0 = Host Byte Order (HBO). */
737 struct instruction *instructions;
738 uint32_t n_instructions;
742 TAILQ_HEAD(action_tailq, action);
748 TAILQ_ENTRY(table_type) node;
749 char name[RTE_SWX_NAME_SIZE];
750 enum rte_swx_table_match_type match_type;
751 struct rte_swx_table_ops ops;
754 TAILQ_HEAD(table_type_tailq, table_type);
757 enum rte_swx_table_match_type match_type;
762 TAILQ_ENTRY(table) node;
763 char name[RTE_SWX_NAME_SIZE];
764 char args[RTE_SWX_NAME_SIZE];
765 struct table_type *type; /* NULL when n_fields == 0. */
768 struct match_field *fields;
770 struct header *header; /* Only valid when n_fields > 0. */
773 struct action **actions;
774 struct action *default_action;
775 uint8_t *default_action_data;
777 int default_action_is_const;
778 uint32_t action_data_size_max;
784 TAILQ_HEAD(table_tailq, table);
786 struct table_runtime {
787 rte_swx_table_lookup_t func;
792 struct table_statistics {
793 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
794 uint64_t *n_pkts_action;
801 TAILQ_ENTRY(regarray) node;
802 char name[RTE_SWX_NAME_SIZE];
808 TAILQ_HEAD(regarray_tailq, regarray);
810 struct regarray_runtime {
818 struct meter_profile {
819 TAILQ_ENTRY(meter_profile) node;
820 char name[RTE_SWX_NAME_SIZE];
821 struct rte_meter_trtcm_params params;
822 struct rte_meter_trtcm_profile profile;
826 TAILQ_HEAD(meter_profile_tailq, meter_profile);
829 TAILQ_ENTRY(metarray) node;
830 char name[RTE_SWX_NAME_SIZE];
835 TAILQ_HEAD(metarray_tailq, metarray);
838 struct rte_meter_trtcm m;
839 struct meter_profile *profile;
840 enum rte_color color_mask;
843 uint64_t n_pkts[RTE_COLORS];
844 uint64_t n_bytes[RTE_COLORS];
847 struct metarray_runtime {
848 struct meter *metarray;
857 struct rte_swx_pkt pkt;
863 /* Packet headers. */
864 struct header_runtime *headers; /* Extracted or generated headers. */
865 struct header_out_runtime *headers_out; /* Emitted headers. */
866 uint8_t *header_storage;
867 uint8_t *header_out_storage;
868 uint64_t valid_headers;
869 uint32_t n_headers_out;
871 /* Packet meta-data. */
875 struct table_runtime *tables;
876 struct rte_swx_table_state *table_state;
878 int hit; /* 0 = Miss, 1 = Hit. */
880 /* Extern objects and functions. */
881 struct extern_obj_runtime *extern_objs;
882 struct extern_func_runtime *extern_funcs;
885 struct instruction *ip;
886 struct instruction *ret;
889 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
890 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
891 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
893 #define HEADER_VALID(thread, header_id) \
894 MASK64_BIT_GET((thread)->valid_headers, header_id)
896 #define ALU(thread, ip, operator) \
898 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
899 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
900 uint64_t dst64 = *dst64_ptr; \
901 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
902 uint64_t dst = dst64 & dst64_mask; \
904 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
905 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
906 uint64_t src64 = *src64_ptr; \
907 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
908 uint64_t src = src64 & src64_mask; \
910 uint64_t result = dst operator src; \
912 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
915 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
917 #define ALU_MH(thread, ip, operator) \
919 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
920 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
921 uint64_t dst64 = *dst64_ptr; \
922 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
923 uint64_t dst = dst64 & dst64_mask; \
925 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
926 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
927 uint64_t src64 = *src64_ptr; \
928 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
930 uint64_t result = dst operator src; \
932 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
935 #define ALU_HM(thread, ip, operator) \
937 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
938 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
939 uint64_t dst64 = *dst64_ptr; \
940 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
941 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
943 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
944 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
945 uint64_t src64 = *src64_ptr; \
946 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
947 uint64_t src = src64 & src64_mask; \
949 uint64_t result = dst operator src; \
950 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
952 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
955 #define ALU_HM_FAST(thread, ip, operator) \
957 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
958 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
959 uint64_t dst64 = *dst64_ptr; \
960 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
961 uint64_t dst = dst64 & dst64_mask; \
963 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
964 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
965 uint64_t src64 = *src64_ptr; \
966 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
967 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
969 uint64_t result = dst operator src; \
971 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
974 #define ALU_HH(thread, ip, operator) \
976 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
977 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
978 uint64_t dst64 = *dst64_ptr; \
979 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
980 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
982 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
983 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
984 uint64_t src64 = *src64_ptr; \
985 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
987 uint64_t result = dst operator src; \
988 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
990 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
993 #define ALU_HH_FAST(thread, ip, operator) \
995 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
996 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
997 uint64_t dst64 = *dst64_ptr; \
998 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
999 uint64_t dst = dst64 & dst64_mask; \
1001 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1002 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1003 uint64_t src64 = *src64_ptr; \
1004 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1006 uint64_t result = dst operator src; \
1008 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1015 #define ALU_HM_FAST ALU
1017 #define ALU_HH_FAST ALU
1021 #define ALU_I(thread, ip, operator) \
1023 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1024 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1025 uint64_t dst64 = *dst64_ptr; \
1026 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1027 uint64_t dst = dst64 & dst64_mask; \
1029 uint64_t src = (ip)->alu.src_val; \
1031 uint64_t result = dst operator src; \
1033 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1036 #define ALU_MI ALU_I
1038 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1040 #define ALU_HI(thread, ip, operator) \
1042 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1043 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1044 uint64_t dst64 = *dst64_ptr; \
1045 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1046 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1048 uint64_t src = (ip)->alu.src_val; \
1050 uint64_t result = dst operator src; \
1051 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1053 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1058 #define ALU_HI ALU_I
1062 #define MOV(thread, ip) \
1064 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1065 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1066 uint64_t dst64 = *dst64_ptr; \
1067 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1069 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1070 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1071 uint64_t src64 = *src64_ptr; \
1072 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1073 uint64_t src = src64 & src64_mask; \
1075 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1078 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1080 #define MOV_MH(thread, ip) \
1082 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1083 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1084 uint64_t dst64 = *dst64_ptr; \
1085 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1087 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1088 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1089 uint64_t src64 = *src64_ptr; \
1090 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1092 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1095 #define MOV_HM(thread, ip) \
1097 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1098 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1099 uint64_t dst64 = *dst64_ptr; \
1100 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1102 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1103 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1104 uint64_t src64 = *src64_ptr; \
1105 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1106 uint64_t src = src64 & src64_mask; \
1108 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1109 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1112 #define MOV_HH(thread, ip) \
1114 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1115 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1116 uint64_t dst64 = *dst64_ptr; \
1117 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1119 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1120 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1121 uint64_t src64 = *src64_ptr; \
1123 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1124 src = src >> (64 - (ip)->mov.dst.n_bits); \
1125 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1136 #define MOV_I(thread, ip) \
1138 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1139 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1140 uint64_t dst64 = *dst64_ptr; \
1141 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1143 uint64_t src = (ip)->mov.src_val; \
1145 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1148 #define JMP_CMP(thread, ip, operator) \
1150 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1151 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1152 uint64_t a64 = *a64_ptr; \
1153 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1154 uint64_t a = a64 & a64_mask; \
1156 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1157 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1158 uint64_t b64 = *b64_ptr; \
1159 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1160 uint64_t b = b64 & b64_mask; \
1162 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1165 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1167 #define JMP_CMP_MH(thread, ip, operator) \
1169 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1170 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1171 uint64_t a64 = *a64_ptr; \
1172 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1173 uint64_t a = a64 & a64_mask; \
1175 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1176 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1177 uint64_t b64 = *b64_ptr; \
1178 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1180 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1183 #define JMP_CMP_HM(thread, ip, operator) \
1185 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1186 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1187 uint64_t a64 = *a64_ptr; \
1188 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1190 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1191 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1192 uint64_t b64 = *b64_ptr; \
1193 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1194 uint64_t b = b64 & b64_mask; \
1196 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1199 #define JMP_CMP_HH(thread, ip, operator) \
1201 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1202 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1203 uint64_t a64 = *a64_ptr; \
1204 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1206 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1207 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1208 uint64_t b64 = *b64_ptr; \
1209 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1211 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1214 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1216 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1217 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1218 uint64_t a64 = *a64_ptr; \
1219 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1221 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1222 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1223 uint64_t b64 = *b64_ptr; \
1224 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1226 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1231 #define JMP_CMP_MH JMP_CMP
1232 #define JMP_CMP_HM JMP_CMP
1233 #define JMP_CMP_HH JMP_CMP
1234 #define JMP_CMP_HH_FAST JMP_CMP
1238 #define JMP_CMP_I(thread, ip, operator) \
1240 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1241 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1242 uint64_t a64 = *a64_ptr; \
1243 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1244 uint64_t a = a64 & a64_mask; \
1246 uint64_t b = (ip)->jmp.b_val; \
1248 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1251 #define JMP_CMP_MI JMP_CMP_I
1253 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1255 #define JMP_CMP_HI(thread, ip, operator) \
1257 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1258 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1259 uint64_t a64 = *a64_ptr; \
1260 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1262 uint64_t b = (ip)->jmp.b_val; \
1264 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1269 #define JMP_CMP_HI JMP_CMP_I
1273 #define METADATA_READ(thread, offset, n_bits) \
1275 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1276 uint64_t m64 = *m64_ptr; \
1277 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1281 #define METADATA_WRITE(thread, offset, n_bits, value) \
1283 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1284 uint64_t m64 = *m64_ptr; \
1285 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1287 uint64_t m_new = value; \
1289 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1292 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1293 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1296 struct rte_swx_pipeline {
1297 struct struct_type_tailq struct_types;
1298 struct port_in_type_tailq port_in_types;
1299 struct port_in_tailq ports_in;
1300 struct port_out_type_tailq port_out_types;
1301 struct port_out_tailq ports_out;
1302 struct extern_type_tailq extern_types;
1303 struct extern_obj_tailq extern_objs;
1304 struct extern_func_tailq extern_funcs;
1305 struct header_tailq headers;
1306 struct struct_type *metadata_st;
1307 uint32_t metadata_struct_id;
1308 struct action_tailq actions;
1309 struct table_type_tailq table_types;
1310 struct table_tailq tables;
1311 struct regarray_tailq regarrays;
1312 struct meter_profile_tailq meter_profiles;
1313 struct metarray_tailq metarrays;
1315 struct port_in_runtime *in;
1316 struct port_out_runtime *out;
1317 struct instruction **action_instructions;
1318 struct rte_swx_table_state *table_state;
1319 struct table_statistics *table_stats;
1320 struct regarray_runtime *regarray_runtime;
1321 struct metarray_runtime *metarray_runtime;
1322 struct instruction *instructions;
1323 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1326 uint32_t n_ports_in;
1327 uint32_t n_ports_out;
1328 uint32_t n_extern_objs;
1329 uint32_t n_extern_funcs;
1332 uint32_t n_regarrays;
1333 uint32_t n_metarrays;
1337 uint32_t n_instructions;
1345 static struct struct_type *
1346 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1348 struct struct_type *elem;
1350 TAILQ_FOREACH(elem, &p->struct_types, node)
1351 if (strcmp(elem->name, name) == 0)
1357 static struct field *
1358 struct_type_field_find(struct struct_type *st, const char *name)
1362 for (i = 0; i < st->n_fields; i++) {
1363 struct field *f = &st->fields[i];
1365 if (strcmp(f->name, name) == 0)
1373 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1375 struct rte_swx_field_params *fields,
1378 struct struct_type *st;
1382 CHECK_NAME(name, EINVAL);
1383 CHECK(fields, EINVAL);
1384 CHECK(n_fields, EINVAL);
1386 for (i = 0; i < n_fields; i++) {
1387 struct rte_swx_field_params *f = &fields[i];
1390 CHECK_NAME(f->name, EINVAL);
1391 CHECK(f->n_bits, EINVAL);
1392 CHECK(f->n_bits <= 64, EINVAL);
1393 CHECK((f->n_bits & 7) == 0, EINVAL);
1395 for (j = 0; j < i; j++) {
1396 struct rte_swx_field_params *f_prev = &fields[j];
1398 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1402 CHECK(!struct_type_find(p, name), EEXIST);
1404 /* Node allocation. */
1405 st = calloc(1, sizeof(struct struct_type));
1408 st->fields = calloc(n_fields, sizeof(struct field));
1414 /* Node initialization. */
1415 strcpy(st->name, name);
1416 for (i = 0; i < n_fields; i++) {
1417 struct field *dst = &st->fields[i];
1418 struct rte_swx_field_params *src = &fields[i];
1420 strcpy(dst->name, src->name);
1421 dst->n_bits = src->n_bits;
1422 dst->offset = st->n_bits;
1424 st->n_bits += src->n_bits;
1426 st->n_fields = n_fields;
1428 /* Node add to tailq. */
1429 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1435 struct_build(struct rte_swx_pipeline *p)
1439 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1440 struct thread *t = &p->threads[i];
1442 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1443 CHECK(t->structs, ENOMEM);
1450 struct_build_free(struct rte_swx_pipeline *p)
1454 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1455 struct thread *t = &p->threads[i];
1463 struct_free(struct rte_swx_pipeline *p)
1465 struct_build_free(p);
1469 struct struct_type *elem;
1471 elem = TAILQ_FIRST(&p->struct_types);
1475 TAILQ_REMOVE(&p->struct_types, elem, node);
1484 static struct port_in_type *
1485 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1487 struct port_in_type *elem;
1492 TAILQ_FOREACH(elem, &p->port_in_types, node)
1493 if (strcmp(elem->name, name) == 0)
1500 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1502 struct rte_swx_port_in_ops *ops)
1504 struct port_in_type *elem;
1507 CHECK_NAME(name, EINVAL);
1509 CHECK(ops->create, EINVAL);
1510 CHECK(ops->free, EINVAL);
1511 CHECK(ops->pkt_rx, EINVAL);
1512 CHECK(ops->stats_read, EINVAL);
1514 CHECK(!port_in_type_find(p, name), EEXIST);
1516 /* Node allocation. */
1517 elem = calloc(1, sizeof(struct port_in_type));
1518 CHECK(elem, ENOMEM);
1520 /* Node initialization. */
1521 strcpy(elem->name, name);
1522 memcpy(&elem->ops, ops, sizeof(*ops));
1524 /* Node add to tailq. */
1525 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1530 static struct port_in *
1531 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1533 struct port_in *port;
1535 TAILQ_FOREACH(port, &p->ports_in, node)
1536 if (port->id == port_id)
1543 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1545 const char *port_type_name,
1548 struct port_in_type *type = NULL;
1549 struct port_in *port = NULL;
1554 CHECK(!port_in_find(p, port_id), EINVAL);
1556 CHECK_NAME(port_type_name, EINVAL);
1557 type = port_in_type_find(p, port_type_name);
1558 CHECK(type, EINVAL);
1560 obj = type->ops.create(args);
1563 /* Node allocation. */
1564 port = calloc(1, sizeof(struct port_in));
1565 CHECK(port, ENOMEM);
1567 /* Node initialization. */
1572 /* Node add to tailq. */
1573 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1574 if (p->n_ports_in < port_id + 1)
1575 p->n_ports_in = port_id + 1;
1581 port_in_build(struct rte_swx_pipeline *p)
1583 struct port_in *port;
1586 CHECK(p->n_ports_in, EINVAL);
1587 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1589 for (i = 0; i < p->n_ports_in; i++)
1590 CHECK(port_in_find(p, i), EINVAL);
1592 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1593 CHECK(p->in, ENOMEM);
1595 TAILQ_FOREACH(port, &p->ports_in, node) {
1596 struct port_in_runtime *in = &p->in[port->id];
1598 in->pkt_rx = port->type->ops.pkt_rx;
1599 in->obj = port->obj;
1606 port_in_build_free(struct rte_swx_pipeline *p)
1613 port_in_free(struct rte_swx_pipeline *p)
1615 port_in_build_free(p);
1619 struct port_in *port;
1621 port = TAILQ_FIRST(&p->ports_in);
1625 TAILQ_REMOVE(&p->ports_in, port, node);
1626 port->type->ops.free(port->obj);
1630 /* Input port types. */
1632 struct port_in_type *elem;
1634 elem = TAILQ_FIRST(&p->port_in_types);
1638 TAILQ_REMOVE(&p->port_in_types, elem, node);
1646 static struct port_out_type *
1647 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1649 struct port_out_type *elem;
1654 TAILQ_FOREACH(elem, &p->port_out_types, node)
1655 if (!strcmp(elem->name, name))
1662 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1664 struct rte_swx_port_out_ops *ops)
1666 struct port_out_type *elem;
1669 CHECK_NAME(name, EINVAL);
1671 CHECK(ops->create, EINVAL);
1672 CHECK(ops->free, EINVAL);
1673 CHECK(ops->pkt_tx, EINVAL);
1674 CHECK(ops->stats_read, EINVAL);
1676 CHECK(!port_out_type_find(p, name), EEXIST);
1678 /* Node allocation. */
1679 elem = calloc(1, sizeof(struct port_out_type));
1680 CHECK(elem, ENOMEM);
1682 /* Node initialization. */
1683 strcpy(elem->name, name);
1684 memcpy(&elem->ops, ops, sizeof(*ops));
1686 /* Node add to tailq. */
1687 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1692 static struct port_out *
1693 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1695 struct port_out *port;
1697 TAILQ_FOREACH(port, &p->ports_out, node)
1698 if (port->id == port_id)
1705 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1707 const char *port_type_name,
1710 struct port_out_type *type = NULL;
1711 struct port_out *port = NULL;
1716 CHECK(!port_out_find(p, port_id), EINVAL);
1718 CHECK_NAME(port_type_name, EINVAL);
1719 type = port_out_type_find(p, port_type_name);
1720 CHECK(type, EINVAL);
1722 obj = type->ops.create(args);
1725 /* Node allocation. */
1726 port = calloc(1, sizeof(struct port_out));
1727 CHECK(port, ENOMEM);
1729 /* Node initialization. */
1734 /* Node add to tailq. */
1735 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1736 if (p->n_ports_out < port_id + 1)
1737 p->n_ports_out = port_id + 1;
1743 port_out_build(struct rte_swx_pipeline *p)
1745 struct port_out *port;
1748 CHECK(p->n_ports_out, EINVAL);
1750 for (i = 0; i < p->n_ports_out; i++)
1751 CHECK(port_out_find(p, i), EINVAL);
1753 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1754 CHECK(p->out, ENOMEM);
1756 TAILQ_FOREACH(port, &p->ports_out, node) {
1757 struct port_out_runtime *out = &p->out[port->id];
1759 out->pkt_tx = port->type->ops.pkt_tx;
1760 out->flush = port->type->ops.flush;
1761 out->obj = port->obj;
1768 port_out_build_free(struct rte_swx_pipeline *p)
1775 port_out_free(struct rte_swx_pipeline *p)
1777 port_out_build_free(p);
1781 struct port_out *port;
1783 port = TAILQ_FIRST(&p->ports_out);
1787 TAILQ_REMOVE(&p->ports_out, port, node);
1788 port->type->ops.free(port->obj);
1792 /* Output port types. */
1794 struct port_out_type *elem;
1796 elem = TAILQ_FIRST(&p->port_out_types);
1800 TAILQ_REMOVE(&p->port_out_types, elem, node);
1808 static struct extern_type *
1809 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1811 struct extern_type *elem;
1813 TAILQ_FOREACH(elem, &p->extern_types, node)
1814 if (strcmp(elem->name, name) == 0)
1820 static struct extern_type_member_func *
1821 extern_type_member_func_find(struct extern_type *type, const char *name)
1823 struct extern_type_member_func *elem;
1825 TAILQ_FOREACH(elem, &type->funcs, node)
1826 if (strcmp(elem->name, name) == 0)
1832 static struct extern_obj *
1833 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1835 struct extern_obj *elem;
1837 TAILQ_FOREACH(elem, &p->extern_objs, node)
1838 if (strcmp(elem->name, name) == 0)
1844 static struct extern_type_member_func *
1845 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1847 struct extern_obj **obj)
1849 struct extern_obj *object;
1850 struct extern_type_member_func *func;
1851 char *object_name, *func_name;
1853 if (name[0] != 'e' || name[1] != '.')
1856 object_name = strdup(&name[2]);
1860 func_name = strchr(object_name, '.');
1869 object = extern_obj_find(p, object_name);
1875 func = extern_type_member_func_find(object->type, func_name);
1888 static struct field *
1889 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1891 struct extern_obj **object)
1893 struct extern_obj *obj;
1895 char *obj_name, *field_name;
1897 if ((name[0] != 'e') || (name[1] != '.'))
1900 obj_name = strdup(&name[2]);
1904 field_name = strchr(obj_name, '.');
1913 obj = extern_obj_find(p, obj_name);
1919 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1933 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1935 const char *mailbox_struct_type_name,
1936 rte_swx_extern_type_constructor_t constructor,
1937 rte_swx_extern_type_destructor_t destructor)
1939 struct extern_type *elem;
1940 struct struct_type *mailbox_struct_type;
1944 CHECK_NAME(name, EINVAL);
1945 CHECK(!extern_type_find(p, name), EEXIST);
1947 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1948 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1949 CHECK(mailbox_struct_type, EINVAL);
1951 CHECK(constructor, EINVAL);
1952 CHECK(destructor, EINVAL);
1954 /* Node allocation. */
1955 elem = calloc(1, sizeof(struct extern_type));
1956 CHECK(elem, ENOMEM);
1958 /* Node initialization. */
1959 strcpy(elem->name, name);
1960 elem->mailbox_struct_type = mailbox_struct_type;
1961 elem->constructor = constructor;
1962 elem->destructor = destructor;
1963 TAILQ_INIT(&elem->funcs);
1965 /* Node add to tailq. */
1966 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1972 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1973 const char *extern_type_name,
1975 rte_swx_extern_type_member_func_t member_func)
1977 struct extern_type *type;
1978 struct extern_type_member_func *type_member;
1982 CHECK_NAME(extern_type_name, EINVAL);
1983 type = extern_type_find(p, extern_type_name);
1984 CHECK(type, EINVAL);
1985 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1987 CHECK_NAME(name, EINVAL);
1988 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1990 CHECK(member_func, EINVAL);
1992 /* Node allocation. */
1993 type_member = calloc(1, sizeof(struct extern_type_member_func));
1994 CHECK(type_member, ENOMEM);
1996 /* Node initialization. */
1997 strcpy(type_member->name, name);
1998 type_member->func = member_func;
1999 type_member->id = type->n_funcs;
2001 /* Node add to tailq. */
2002 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
2009 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
2010 const char *extern_type_name,
2014 struct extern_type *type;
2015 struct extern_obj *obj;
2020 CHECK_NAME(extern_type_name, EINVAL);
2021 type = extern_type_find(p, extern_type_name);
2022 CHECK(type, EINVAL);
2024 CHECK_NAME(name, EINVAL);
2025 CHECK(!extern_obj_find(p, name), EEXIST);
2027 /* Node allocation. */
2028 obj = calloc(1, sizeof(struct extern_obj));
2031 /* Object construction. */
2032 obj_handle = type->constructor(args);
2038 /* Node initialization. */
2039 strcpy(obj->name, name);
2041 obj->obj = obj_handle;
2042 obj->struct_id = p->n_structs;
2043 obj->id = p->n_extern_objs;
2045 /* Node add to tailq. */
2046 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
2054 extern_obj_build(struct rte_swx_pipeline *p)
2058 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2059 struct thread *t = &p->threads[i];
2060 struct extern_obj *obj;
2062 t->extern_objs = calloc(p->n_extern_objs,
2063 sizeof(struct extern_obj_runtime));
2064 CHECK(t->extern_objs, ENOMEM);
2066 TAILQ_FOREACH(obj, &p->extern_objs, node) {
2067 struct extern_obj_runtime *r =
2068 &t->extern_objs[obj->id];
2069 struct extern_type_member_func *func;
2070 uint32_t mailbox_size =
2071 obj->type->mailbox_struct_type->n_bits / 8;
2075 r->mailbox = calloc(1, mailbox_size);
2076 CHECK(r->mailbox, ENOMEM);
2078 TAILQ_FOREACH(func, &obj->type->funcs, node)
2079 r->funcs[func->id] = func->func;
2081 t->structs[obj->struct_id] = r->mailbox;
2089 extern_obj_build_free(struct rte_swx_pipeline *p)
2093 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2094 struct thread *t = &p->threads[i];
2097 if (!t->extern_objs)
2100 for (j = 0; j < p->n_extern_objs; j++) {
2101 struct extern_obj_runtime *r = &t->extern_objs[j];
2106 free(t->extern_objs);
2107 t->extern_objs = NULL;
2112 extern_obj_free(struct rte_swx_pipeline *p)
2114 extern_obj_build_free(p);
2116 /* Extern objects. */
2118 struct extern_obj *elem;
2120 elem = TAILQ_FIRST(&p->extern_objs);
2124 TAILQ_REMOVE(&p->extern_objs, elem, node);
2126 elem->type->destructor(elem->obj);
2132 struct extern_type *elem;
2134 elem = TAILQ_FIRST(&p->extern_types);
2138 TAILQ_REMOVE(&p->extern_types, elem, node);
2141 struct extern_type_member_func *func;
2143 func = TAILQ_FIRST(&elem->funcs);
2147 TAILQ_REMOVE(&elem->funcs, func, node);
2158 static struct extern_func *
2159 extern_func_find(struct rte_swx_pipeline *p, const char *name)
2161 struct extern_func *elem;
2163 TAILQ_FOREACH(elem, &p->extern_funcs, node)
2164 if (strcmp(elem->name, name) == 0)
2170 static struct extern_func *
2171 extern_func_parse(struct rte_swx_pipeline *p,
2174 if (name[0] != 'f' || name[1] != '.')
2177 return extern_func_find(p, &name[2]);
2180 static struct field *
2181 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
2183 struct extern_func **function)
2185 struct extern_func *func;
2187 char *func_name, *field_name;
2189 if ((name[0] != 'f') || (name[1] != '.'))
2192 func_name = strdup(&name[2]);
2196 field_name = strchr(func_name, '.');
2205 func = extern_func_find(p, func_name);
2211 f = struct_type_field_find(func->mailbox_struct_type, field_name);
2225 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
2227 const char *mailbox_struct_type_name,
2228 rte_swx_extern_func_t func)
2230 struct extern_func *f;
2231 struct struct_type *mailbox_struct_type;
2235 CHECK_NAME(name, EINVAL);
2236 CHECK(!extern_func_find(p, name), EEXIST);
2238 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2239 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2240 CHECK(mailbox_struct_type, EINVAL);
2242 CHECK(func, EINVAL);
2244 /* Node allocation. */
2245 f = calloc(1, sizeof(struct extern_func));
2246 CHECK(func, ENOMEM);
2248 /* Node initialization. */
2249 strcpy(f->name, name);
2250 f->mailbox_struct_type = mailbox_struct_type;
2252 f->struct_id = p->n_structs;
2253 f->id = p->n_extern_funcs;
2255 /* Node add to tailq. */
2256 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
2257 p->n_extern_funcs++;
2264 extern_func_build(struct rte_swx_pipeline *p)
2268 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2269 struct thread *t = &p->threads[i];
2270 struct extern_func *func;
2272 /* Memory allocation. */
2273 t->extern_funcs = calloc(p->n_extern_funcs,
2274 sizeof(struct extern_func_runtime));
2275 CHECK(t->extern_funcs, ENOMEM);
2277 /* Extern function. */
2278 TAILQ_FOREACH(func, &p->extern_funcs, node) {
2279 struct extern_func_runtime *r =
2280 &t->extern_funcs[func->id];
2281 uint32_t mailbox_size =
2282 func->mailbox_struct_type->n_bits / 8;
2284 r->func = func->func;
2286 r->mailbox = calloc(1, mailbox_size);
2287 CHECK(r->mailbox, ENOMEM);
2289 t->structs[func->struct_id] = r->mailbox;
2297 extern_func_build_free(struct rte_swx_pipeline *p)
2301 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2302 struct thread *t = &p->threads[i];
2305 if (!t->extern_funcs)
2308 for (j = 0; j < p->n_extern_funcs; j++) {
2309 struct extern_func_runtime *r = &t->extern_funcs[j];
2314 free(t->extern_funcs);
2315 t->extern_funcs = NULL;
2320 extern_func_free(struct rte_swx_pipeline *p)
2322 extern_func_build_free(p);
2325 struct extern_func *elem;
2327 elem = TAILQ_FIRST(&p->extern_funcs);
2331 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2339 static struct header *
2340 header_find(struct rte_swx_pipeline *p, const char *name)
2342 struct header *elem;
2344 TAILQ_FOREACH(elem, &p->headers, node)
2345 if (strcmp(elem->name, name) == 0)
2351 static struct header *
2352 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id)
2354 struct header *elem;
2356 TAILQ_FOREACH(elem, &p->headers, node)
2357 if (elem->struct_id == struct_id)
2363 static struct header *
2364 header_parse(struct rte_swx_pipeline *p,
2367 if (name[0] != 'h' || name[1] != '.')
2370 return header_find(p, &name[2]);
2373 static struct field *
2374 header_field_parse(struct rte_swx_pipeline *p,
2376 struct header **header)
2380 char *header_name, *field_name;
2382 if ((name[0] != 'h') || (name[1] != '.'))
2385 header_name = strdup(&name[2]);
2389 field_name = strchr(header_name, '.');
2398 h = header_find(p, header_name);
2404 f = struct_type_field_find(h->st, field_name);
2418 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2420 const char *struct_type_name)
2422 struct struct_type *st;
2424 size_t n_headers_max;
2427 CHECK_NAME(name, EINVAL);
2428 CHECK_NAME(struct_type_name, EINVAL);
2430 CHECK(!header_find(p, name), EEXIST);
2432 st = struct_type_find(p, struct_type_name);
2435 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2436 CHECK(p->n_headers < n_headers_max, ENOSPC);
2438 /* Node allocation. */
2439 h = calloc(1, sizeof(struct header));
2442 /* Node initialization. */
2443 strcpy(h->name, name);
2445 h->struct_id = p->n_structs;
2446 h->id = p->n_headers;
2448 /* Node add to tailq. */
2449 TAILQ_INSERT_TAIL(&p->headers, h, node);
2457 header_build(struct rte_swx_pipeline *p)
2460 uint32_t n_bytes = 0, i;
2462 TAILQ_FOREACH(h, &p->headers, node) {
2463 n_bytes += h->st->n_bits / 8;
2466 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2467 struct thread *t = &p->threads[i];
2468 uint32_t offset = 0;
2470 t->headers = calloc(p->n_headers,
2471 sizeof(struct header_runtime));
2472 CHECK(t->headers, ENOMEM);
2474 t->headers_out = calloc(p->n_headers,
2475 sizeof(struct header_out_runtime));
2476 CHECK(t->headers_out, ENOMEM);
2478 t->header_storage = calloc(1, n_bytes);
2479 CHECK(t->header_storage, ENOMEM);
2481 t->header_out_storage = calloc(1, n_bytes);
2482 CHECK(t->header_out_storage, ENOMEM);
2484 TAILQ_FOREACH(h, &p->headers, node) {
2485 uint8_t *header_storage;
2487 header_storage = &t->header_storage[offset];
2488 offset += h->st->n_bits / 8;
2490 t->headers[h->id].ptr0 = header_storage;
2491 t->structs[h->struct_id] = header_storage;
2499 header_build_free(struct rte_swx_pipeline *p)
2503 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2504 struct thread *t = &p->threads[i];
2506 free(t->headers_out);
2507 t->headers_out = NULL;
2512 free(t->header_out_storage);
2513 t->header_out_storage = NULL;
2515 free(t->header_storage);
2516 t->header_storage = NULL;
2521 header_free(struct rte_swx_pipeline *p)
2523 header_build_free(p);
2526 struct header *elem;
2528 elem = TAILQ_FIRST(&p->headers);
2532 TAILQ_REMOVE(&p->headers, elem, node);
2540 static struct field *
2541 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2543 if (!p->metadata_st)
2546 if (name[0] != 'm' || name[1] != '.')
2549 return struct_type_field_find(p->metadata_st, &name[2]);
2553 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2554 const char *struct_type_name)
2556 struct struct_type *st = NULL;
2560 CHECK_NAME(struct_type_name, EINVAL);
2561 st = struct_type_find(p, struct_type_name);
2563 CHECK(!p->metadata_st, EINVAL);
2565 p->metadata_st = st;
2566 p->metadata_struct_id = p->n_structs;
2574 metadata_build(struct rte_swx_pipeline *p)
2576 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2579 /* Thread-level initialization. */
2580 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2581 struct thread *t = &p->threads[i];
2584 metadata = calloc(1, n_bytes);
2585 CHECK(metadata, ENOMEM);
2587 t->metadata = metadata;
2588 t->structs[p->metadata_struct_id] = metadata;
2595 metadata_build_free(struct rte_swx_pipeline *p)
2599 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2600 struct thread *t = &p->threads[i];
2608 metadata_free(struct rte_swx_pipeline *p)
2610 metadata_build_free(p);
2617 instruction_is_tx(enum instruction_type type)
2630 instruction_is_jmp(struct instruction *instr)
2632 switch (instr->type) {
2634 case INSTR_JMP_VALID:
2635 case INSTR_JMP_INVALID:
2637 case INSTR_JMP_MISS:
2638 case INSTR_JMP_ACTION_HIT:
2639 case INSTR_JMP_ACTION_MISS:
2641 case INSTR_JMP_EQ_MH:
2642 case INSTR_JMP_EQ_HM:
2643 case INSTR_JMP_EQ_HH:
2644 case INSTR_JMP_EQ_I:
2646 case INSTR_JMP_NEQ_MH:
2647 case INSTR_JMP_NEQ_HM:
2648 case INSTR_JMP_NEQ_HH:
2649 case INSTR_JMP_NEQ_I:
2651 case INSTR_JMP_LT_MH:
2652 case INSTR_JMP_LT_HM:
2653 case INSTR_JMP_LT_HH:
2654 case INSTR_JMP_LT_MI:
2655 case INSTR_JMP_LT_HI:
2657 case INSTR_JMP_GT_MH:
2658 case INSTR_JMP_GT_HM:
2659 case INSTR_JMP_GT_HH:
2660 case INSTR_JMP_GT_MI:
2661 case INSTR_JMP_GT_HI:
2669 static struct field *
2670 action_field_parse(struct action *action, const char *name);
2672 static struct field *
2673 struct_field_parse(struct rte_swx_pipeline *p,
2674 struct action *action,
2676 uint32_t *struct_id)
2683 struct header *header;
2685 f = header_field_parse(p, name, &header);
2689 *struct_id = header->struct_id;
2695 f = metadata_field_parse(p, name);
2699 *struct_id = p->metadata_struct_id;
2708 f = action_field_parse(action, name);
2718 struct extern_obj *obj;
2720 f = extern_obj_mailbox_field_parse(p, name, &obj);
2724 *struct_id = obj->struct_id;
2730 struct extern_func *func;
2732 f = extern_func_mailbox_field_parse(p, name, &func);
2736 *struct_id = func->struct_id;
2746 pipeline_port_inc(struct rte_swx_pipeline *p)
2748 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2752 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2754 t->ip = p->instructions;
2758 thread_ip_set(struct thread *t, struct instruction *ip)
2764 thread_ip_action_call(struct rte_swx_pipeline *p,
2769 t->ip = p->action_instructions[action_id];
2773 thread_ip_inc(struct rte_swx_pipeline *p);
2776 thread_ip_inc(struct rte_swx_pipeline *p)
2778 struct thread *t = &p->threads[p->thread_id];
2784 thread_ip_inc_cond(struct thread *t, int cond)
2790 thread_yield(struct rte_swx_pipeline *p)
2792 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2796 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2798 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2805 instr_rx_translate(struct rte_swx_pipeline *p,
2806 struct action *action,
2809 struct instruction *instr,
2810 struct instruction_data *data __rte_unused)
2814 CHECK(!action, EINVAL);
2815 CHECK(n_tokens == 2, EINVAL);
2817 f = metadata_field_parse(p, tokens[1]);
2820 instr->type = INSTR_RX;
2821 instr->io.io.offset = f->offset / 8;
2822 instr->io.io.n_bits = f->n_bits;
2827 instr_rx_exec(struct rte_swx_pipeline *p);
2830 instr_rx_exec(struct rte_swx_pipeline *p)
2832 struct thread *t = &p->threads[p->thread_id];
2833 struct instruction *ip = t->ip;
2834 struct port_in_runtime *port = &p->in[p->port_id];
2835 struct rte_swx_pkt *pkt = &t->pkt;
2839 pkt_received = port->pkt_rx(port->obj, pkt);
2840 t->ptr = &pkt->pkt[pkt->offset];
2841 rte_prefetch0(t->ptr);
2843 TRACE("[Thread %2u] rx %s from port %u\n",
2845 pkt_received ? "1 pkt" : "0 pkts",
2849 t->valid_headers = 0;
2850 t->n_headers_out = 0;
2853 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2856 t->table_state = p->table_state;
2859 pipeline_port_inc(p);
2860 thread_ip_inc_cond(t, pkt_received);
2868 instr_tx_translate(struct rte_swx_pipeline *p,
2869 struct action *action __rte_unused,
2872 struct instruction *instr,
2873 struct instruction_data *data __rte_unused)
2875 char *port = tokens[1];
2879 CHECK(n_tokens == 2, EINVAL);
2881 f = metadata_field_parse(p, port);
2883 instr->type = INSTR_TX;
2884 instr->io.io.offset = f->offset / 8;
2885 instr->io.io.n_bits = f->n_bits;
2890 port_val = strtoul(port, &port, 0);
2891 CHECK(!port[0], EINVAL);
2893 instr->type = INSTR_TX_I;
2894 instr->io.io.val = port_val;
2899 instr_drop_translate(struct rte_swx_pipeline *p,
2900 struct action *action __rte_unused,
2901 char **tokens __rte_unused,
2903 struct instruction *instr,
2904 struct instruction_data *data __rte_unused)
2906 CHECK(n_tokens == 1, EINVAL);
2909 instr->type = INSTR_TX_I;
2910 instr->io.io.val = p->n_ports_out - 1;
2915 emit_handler(struct thread *t)
2917 struct header_out_runtime *h0 = &t->headers_out[0];
2918 struct header_out_runtime *h1 = &t->headers_out[1];
2919 uint32_t offset = 0, i;
2921 /* No header change or header decapsulation. */
2922 if ((t->n_headers_out == 1) &&
2923 (h0->ptr + h0->n_bytes == t->ptr)) {
2924 TRACE("Emit handler: no header change or header decap.\n");
2926 t->pkt.offset -= h0->n_bytes;
2927 t->pkt.length += h0->n_bytes;
2932 /* Header encapsulation (optionally, with prior header decasulation). */
2933 if ((t->n_headers_out == 2) &&
2934 (h1->ptr + h1->n_bytes == t->ptr) &&
2935 (h0->ptr == h0->ptr0)) {
2938 TRACE("Emit handler: header encapsulation.\n");
2940 offset = h0->n_bytes + h1->n_bytes;
2941 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2942 t->pkt.offset -= offset;
2943 t->pkt.length += offset;
2948 /* Header insertion. */
2951 /* Header extraction. */
2954 /* For any other case. */
2955 TRACE("Emit handler: complex case.\n");
2957 for (i = 0; i < t->n_headers_out; i++) {
2958 struct header_out_runtime *h = &t->headers_out[i];
2960 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2961 offset += h->n_bytes;
2965 memcpy(t->ptr - offset, t->header_out_storage, offset);
2966 t->pkt.offset -= offset;
2967 t->pkt.length += offset;
2972 instr_tx_exec(struct rte_swx_pipeline *p);
2975 instr_tx_exec(struct rte_swx_pipeline *p)
2977 struct thread *t = &p->threads[p->thread_id];
2978 struct instruction *ip = t->ip;
2979 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2980 struct port_out_runtime *port = &p->out[port_id];
2981 struct rte_swx_pkt *pkt = &t->pkt;
2983 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2991 port->pkt_tx(port->obj, pkt);
2994 thread_ip_reset(p, t);
2999 instr_tx_i_exec(struct rte_swx_pipeline *p)
3001 struct thread *t = &p->threads[p->thread_id];
3002 struct instruction *ip = t->ip;
3003 uint64_t port_id = ip->io.io.val;
3004 struct port_out_runtime *port = &p->out[port_id];
3005 struct rte_swx_pkt *pkt = &t->pkt;
3007 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
3015 port->pkt_tx(port->obj, pkt);
3018 thread_ip_reset(p, t);
3026 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
3027 struct action *action,
3030 struct instruction *instr,
3031 struct instruction_data *data __rte_unused)
3035 CHECK(!action, EINVAL);
3036 CHECK(n_tokens == 2, EINVAL);
3038 h = header_parse(p, tokens[1]);
3041 instr->type = INSTR_HDR_EXTRACT;
3042 instr->io.hdr.header_id[0] = h->id;
3043 instr->io.hdr.struct_id[0] = h->struct_id;
3044 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3049 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
3052 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
3054 struct thread *t = &p->threads[p->thread_id];
3055 struct instruction *ip = t->ip;
3056 uint64_t valid_headers = t->valid_headers;
3057 uint8_t *ptr = t->ptr;
3058 uint32_t offset = t->pkt.offset;
3059 uint32_t length = t->pkt.length;
3062 for (i = 0; i < n_extract; i++) {
3063 uint32_t header_id = ip->io.hdr.header_id[i];
3064 uint32_t struct_id = ip->io.hdr.struct_id[i];
3065 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3067 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
3073 t->structs[struct_id] = ptr;
3074 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3083 t->valid_headers = valid_headers;
3086 t->pkt.offset = offset;
3087 t->pkt.length = length;
3092 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
3094 __instr_hdr_extract_exec(p, 1);
3101 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
3103 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3106 __instr_hdr_extract_exec(p, 2);
3113 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
3115 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3118 __instr_hdr_extract_exec(p, 3);
3125 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
3127 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3130 __instr_hdr_extract_exec(p, 4);
3137 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
3139 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3142 __instr_hdr_extract_exec(p, 5);
3149 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
3151 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3154 __instr_hdr_extract_exec(p, 6);
3161 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
3163 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3166 __instr_hdr_extract_exec(p, 7);
3173 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
3175 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3178 __instr_hdr_extract_exec(p, 8);
3188 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
3189 struct action *action __rte_unused,
3192 struct instruction *instr,
3193 struct instruction_data *data __rte_unused)
3197 CHECK(n_tokens == 2, EINVAL);
3199 h = header_parse(p, tokens[1]);
3202 instr->type = INSTR_HDR_EMIT;
3203 instr->io.hdr.header_id[0] = h->id;
3204 instr->io.hdr.struct_id[0] = h->struct_id;
3205 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3210 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
3213 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
3215 struct thread *t = &p->threads[p->thread_id];
3216 struct instruction *ip = t->ip;
3217 uint64_t valid_headers = t->valid_headers;
3218 uint32_t n_headers_out = t->n_headers_out;
3219 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
3220 uint8_t *ho_ptr = NULL;
3221 uint32_t ho_nbytes = 0, first = 1, i;
3223 for (i = 0; i < n_emit; i++) {
3224 uint32_t header_id = ip->io.hdr.header_id[i];
3225 uint32_t struct_id = ip->io.hdr.struct_id[i];
3226 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3228 struct header_runtime *hi = &t->headers[header_id];
3229 uint8_t *hi_ptr = t->structs[struct_id];
3231 if (!MASK64_BIT_GET(valid_headers, header_id))
3234 TRACE("[Thread %2u]: emit header %u\n",
3242 if (!t->n_headers_out) {
3243 ho = &t->headers_out[0];
3245 ho->ptr0 = hi->ptr0;
3249 ho_nbytes = n_bytes;
3256 ho_nbytes = ho->n_bytes;
3260 if (ho_ptr + ho_nbytes == hi_ptr) {
3261 ho_nbytes += n_bytes;
3263 ho->n_bytes = ho_nbytes;
3266 ho->ptr0 = hi->ptr0;
3270 ho_nbytes = n_bytes;
3276 ho->n_bytes = ho_nbytes;
3277 t->n_headers_out = n_headers_out;
3281 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
3283 __instr_hdr_emit_exec(p, 1);
3290 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
3292 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3295 __instr_hdr_emit_exec(p, 1);
3300 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
3302 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3305 __instr_hdr_emit_exec(p, 2);
3310 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
3312 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3315 __instr_hdr_emit_exec(p, 3);
3320 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
3322 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3325 __instr_hdr_emit_exec(p, 4);
3330 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
3332 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3335 __instr_hdr_emit_exec(p, 5);
3340 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
3342 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3345 __instr_hdr_emit_exec(p, 6);
3350 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
3352 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3355 __instr_hdr_emit_exec(p, 7);
3360 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
3362 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
3365 __instr_hdr_emit_exec(p, 8);
3373 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
3374 struct action *action __rte_unused,
3377 struct instruction *instr,
3378 struct instruction_data *data __rte_unused)
3382 CHECK(n_tokens == 2, EINVAL);
3384 h = header_parse(p, tokens[1]);
3387 instr->type = INSTR_HDR_VALIDATE;
3388 instr->valid.header_id = h->id;
3393 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
3395 struct thread *t = &p->threads[p->thread_id];
3396 struct instruction *ip = t->ip;
3397 uint32_t header_id = ip->valid.header_id;
3399 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
3402 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
3412 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
3413 struct action *action __rte_unused,
3416 struct instruction *instr,
3417 struct instruction_data *data __rte_unused)
3421 CHECK(n_tokens == 2, EINVAL);
3423 h = header_parse(p, tokens[1]);
3426 instr->type = INSTR_HDR_INVALIDATE;
3427 instr->valid.header_id = h->id;
3432 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3434 struct thread *t = &p->threads[p->thread_id];
3435 struct instruction *ip = t->ip;
3436 uint32_t header_id = ip->valid.header_id;
3438 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3441 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3450 static struct table *
3451 table_find(struct rte_swx_pipeline *p, const char *name);
3454 instr_table_translate(struct rte_swx_pipeline *p,
3455 struct action *action,
3458 struct instruction *instr,
3459 struct instruction_data *data __rte_unused)
3463 CHECK(!action, EINVAL);
3464 CHECK(n_tokens == 2, EINVAL);
3466 t = table_find(p, tokens[1]);
3469 instr->type = INSTR_TABLE;
3470 instr->table.table_id = t->id;
3475 instr_table_exec(struct rte_swx_pipeline *p)
3477 struct thread *t = &p->threads[p->thread_id];
3478 struct instruction *ip = t->ip;
3479 uint32_t table_id = ip->table.table_id;
3480 struct rte_swx_table_state *ts = &t->table_state[table_id];
3481 struct table_runtime *table = &t->tables[table_id];
3482 struct table_statistics *stats = &p->table_stats[table_id];
3483 uint64_t action_id, n_pkts_hit, n_pkts_action;
3484 uint8_t *action_data;
3488 done = table->func(ts->obj,
3496 TRACE("[Thread %2u] table %u (not finalized)\n",
3504 action_id = hit ? action_id : ts->default_action_id;
3505 action_data = hit ? action_data : ts->default_action_data;
3506 n_pkts_hit = stats->n_pkts_hit[hit];
3507 n_pkts_action = stats->n_pkts_action[action_id];
3509 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3512 hit ? "hit" : "miss",
3513 (uint32_t)action_id);
3515 t->action_id = action_id;
3516 t->structs[0] = action_data;
3518 stats->n_pkts_hit[hit] = n_pkts_hit + 1;
3519 stats->n_pkts_action[action_id] = n_pkts_action + 1;
3522 thread_ip_action_call(p, t, action_id);
3529 instr_extern_translate(struct rte_swx_pipeline *p,
3530 struct action *action __rte_unused,
3533 struct instruction *instr,
3534 struct instruction_data *data __rte_unused)
3536 char *token = tokens[1];
3538 CHECK(n_tokens == 2, EINVAL);
3540 if (token[0] == 'e') {
3541 struct extern_obj *obj;
3542 struct extern_type_member_func *func;
3544 func = extern_obj_member_func_parse(p, token, &obj);
3545 CHECK(func, EINVAL);
3547 instr->type = INSTR_EXTERN_OBJ;
3548 instr->ext_obj.ext_obj_id = obj->id;
3549 instr->ext_obj.func_id = func->id;
3554 if (token[0] == 'f') {
3555 struct extern_func *func;
3557 func = extern_func_parse(p, token);
3558 CHECK(func, EINVAL);
3560 instr->type = INSTR_EXTERN_FUNC;
3561 instr->ext_func.ext_func_id = func->id;
3570 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3572 struct thread *t = &p->threads[p->thread_id];
3573 struct instruction *ip = t->ip;
3574 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3575 uint32_t func_id = ip->ext_obj.func_id;
3576 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3577 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3579 TRACE("[Thread %2u] extern obj %u member func %u\n",
3584 /* Extern object member function execute. */
3585 uint32_t done = func(obj->obj, obj->mailbox);
3588 thread_ip_inc_cond(t, done);
3589 thread_yield_cond(p, done ^ 1);
3593 instr_extern_func_exec(struct rte_swx_pipeline *p)
3595 struct thread *t = &p->threads[p->thread_id];
3596 struct instruction *ip = t->ip;
3597 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3598 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3599 rte_swx_extern_func_t func = ext_func->func;
3601 TRACE("[Thread %2u] extern func %u\n",
3605 /* Extern function execute. */
3606 uint32_t done = func(ext_func->mailbox);
3609 thread_ip_inc_cond(t, done);
3610 thread_yield_cond(p, done ^ 1);
3617 instr_mov_translate(struct rte_swx_pipeline *p,
3618 struct action *action,
3621 struct instruction *instr,
3622 struct instruction_data *data __rte_unused)
3624 char *dst = tokens[1], *src = tokens[2];
3625 struct field *fdst, *fsrc;
3627 uint32_t dst_struct_id = 0, src_struct_id = 0;
3629 CHECK(n_tokens == 3, EINVAL);
3631 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3632 CHECK(fdst, EINVAL);
3634 /* MOV, MOV_MH, MOV_HM or MOV_HH. */
3635 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3637 instr->type = INSTR_MOV;
3638 if (dst[0] != 'h' && src[0] == 'h')
3639 instr->type = INSTR_MOV_MH;
3640 if (dst[0] == 'h' && src[0] != 'h')
3641 instr->type = INSTR_MOV_HM;
3642 if (dst[0] == 'h' && src[0] == 'h')
3643 instr->type = INSTR_MOV_HH;
3645 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3646 instr->mov.dst.n_bits = fdst->n_bits;
3647 instr->mov.dst.offset = fdst->offset / 8;
3648 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3649 instr->mov.src.n_bits = fsrc->n_bits;
3650 instr->mov.src.offset = fsrc->offset / 8;
3655 src_val = strtoull(src, &src, 0);
3656 CHECK(!src[0], EINVAL);
3659 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3661 instr->type = INSTR_MOV_I;
3662 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3663 instr->mov.dst.n_bits = fdst->n_bits;
3664 instr->mov.dst.offset = fdst->offset / 8;
3665 instr->mov.src_val = src_val;
3670 instr_mov_exec(struct rte_swx_pipeline *p)
3672 struct thread *t = &p->threads[p->thread_id];
3673 struct instruction *ip = t->ip;
3675 TRACE("[Thread %2u] mov\n",
3685 instr_mov_mh_exec(struct rte_swx_pipeline *p)
3687 struct thread *t = &p->threads[p->thread_id];
3688 struct instruction *ip = t->ip;
3690 TRACE("[Thread %2u] mov (mh)\n",
3700 instr_mov_hm_exec(struct rte_swx_pipeline *p)
3702 struct thread *t = &p->threads[p->thread_id];
3703 struct instruction *ip = t->ip;
3705 TRACE("[Thread %2u] mov (hm)\n",
3715 instr_mov_hh_exec(struct rte_swx_pipeline *p)
3717 struct thread *t = &p->threads[p->thread_id];
3718 struct instruction *ip = t->ip;
3720 TRACE("[Thread %2u] mov (hh)\n",
3730 instr_mov_i_exec(struct rte_swx_pipeline *p)
3732 struct thread *t = &p->threads[p->thread_id];
3733 struct instruction *ip = t->ip;
3735 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3749 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3752 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3754 struct thread *t = &p->threads[p->thread_id];
3755 struct instruction *ip = t->ip;
3756 uint8_t *action_data = t->structs[0];
3757 uint64_t valid_headers = t->valid_headers;
3760 for (i = 0; i < n_dma; i++) {
3761 uint32_t header_id = ip->dma.dst.header_id[i];
3762 uint32_t struct_id = ip->dma.dst.struct_id[i];
3763 uint32_t offset = ip->dma.src.offset[i];
3764 uint32_t n_bytes = ip->dma.n_bytes[i];
3766 struct header_runtime *h = &t->headers[header_id];
3767 uint8_t *h_ptr0 = h->ptr0;
3768 uint8_t *h_ptr = t->structs[struct_id];
3770 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3772 void *src = &action_data[offset];
3774 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3777 memcpy(dst, src, n_bytes);
3778 t->structs[struct_id] = dst;
3779 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3782 t->valid_headers = valid_headers;
3786 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3788 __instr_dma_ht_exec(p, 1);
3795 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3797 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3800 __instr_dma_ht_exec(p, 2);
3807 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3809 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3812 __instr_dma_ht_exec(p, 3);
3819 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3821 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3824 __instr_dma_ht_exec(p, 4);
3831 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3833 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3836 __instr_dma_ht_exec(p, 5);
3843 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3845 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3848 __instr_dma_ht_exec(p, 6);
3855 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3857 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3860 __instr_dma_ht_exec(p, 7);
3867 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3869 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3872 __instr_dma_ht_exec(p, 8);
3882 instr_alu_add_translate(struct rte_swx_pipeline *p,
3883 struct action *action,
3886 struct instruction *instr,
3887 struct instruction_data *data __rte_unused)
3889 char *dst = tokens[1], *src = tokens[2];
3890 struct field *fdst, *fsrc;
3892 uint32_t dst_struct_id = 0, src_struct_id = 0;
3894 CHECK(n_tokens == 3, EINVAL);
3896 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3897 CHECK(fdst, EINVAL);
3899 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3900 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3902 instr->type = INSTR_ALU_ADD;
3903 if (dst[0] == 'h' && src[0] != 'h')
3904 instr->type = INSTR_ALU_ADD_HM;
3905 if (dst[0] != 'h' && src[0] == 'h')
3906 instr->type = INSTR_ALU_ADD_MH;
3907 if (dst[0] == 'h' && src[0] == 'h')
3908 instr->type = INSTR_ALU_ADD_HH;
3910 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3911 instr->alu.dst.n_bits = fdst->n_bits;
3912 instr->alu.dst.offset = fdst->offset / 8;
3913 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3914 instr->alu.src.n_bits = fsrc->n_bits;
3915 instr->alu.src.offset = fsrc->offset / 8;
3919 /* ADD_MI, ADD_HI. */
3920 src_val = strtoull(src, &src, 0);
3921 CHECK(!src[0], EINVAL);
3923 instr->type = INSTR_ALU_ADD_MI;
3925 instr->type = INSTR_ALU_ADD_HI;
3927 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3928 instr->alu.dst.n_bits = fdst->n_bits;
3929 instr->alu.dst.offset = fdst->offset / 8;
3930 instr->alu.src_val = src_val;
3935 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3936 struct action *action,
3939 struct instruction *instr,
3940 struct instruction_data *data __rte_unused)
3942 char *dst = tokens[1], *src = tokens[2];
3943 struct field *fdst, *fsrc;
3945 uint32_t dst_struct_id = 0, src_struct_id = 0;
3947 CHECK(n_tokens == 3, EINVAL);
3949 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3950 CHECK(fdst, EINVAL);
3952 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3953 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3955 instr->type = INSTR_ALU_SUB;
3956 if (dst[0] == 'h' && src[0] != 'h')
3957 instr->type = INSTR_ALU_SUB_HM;
3958 if (dst[0] != 'h' && src[0] == 'h')
3959 instr->type = INSTR_ALU_SUB_MH;
3960 if (dst[0] == 'h' && src[0] == 'h')
3961 instr->type = INSTR_ALU_SUB_HH;
3963 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3964 instr->alu.dst.n_bits = fdst->n_bits;
3965 instr->alu.dst.offset = fdst->offset / 8;
3966 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3967 instr->alu.src.n_bits = fsrc->n_bits;
3968 instr->alu.src.offset = fsrc->offset / 8;
3972 /* SUB_MI, SUB_HI. */
3973 src_val = strtoull(src, &src, 0);
3974 CHECK(!src[0], EINVAL);
3976 instr->type = INSTR_ALU_SUB_MI;
3978 instr->type = INSTR_ALU_SUB_HI;
3980 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3981 instr->alu.dst.n_bits = fdst->n_bits;
3982 instr->alu.dst.offset = fdst->offset / 8;
3983 instr->alu.src_val = src_val;
3988 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3989 struct action *action __rte_unused,
3992 struct instruction *instr,
3993 struct instruction_data *data __rte_unused)
3995 char *dst = tokens[1], *src = tokens[2];
3996 struct header *hdst, *hsrc;
3997 struct field *fdst, *fsrc;
3999 CHECK(n_tokens == 3, EINVAL);
4001 fdst = header_field_parse(p, dst, &hdst);
4002 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4005 fsrc = header_field_parse(p, src, &hsrc);
4007 instr->type = INSTR_ALU_CKADD_FIELD;
4008 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4009 instr->alu.dst.n_bits = fdst->n_bits;
4010 instr->alu.dst.offset = fdst->offset / 8;
4011 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4012 instr->alu.src.n_bits = fsrc->n_bits;
4013 instr->alu.src.offset = fsrc->offset / 8;
4017 /* CKADD_STRUCT, CKADD_STRUCT20. */
4018 hsrc = header_parse(p, src);
4019 CHECK(hsrc, EINVAL);
4021 instr->type = INSTR_ALU_CKADD_STRUCT;
4022 if ((hsrc->st->n_bits / 8) == 20)
4023 instr->type = INSTR_ALU_CKADD_STRUCT20;
4025 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4026 instr->alu.dst.n_bits = fdst->n_bits;
4027 instr->alu.dst.offset = fdst->offset / 8;
4028 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4029 instr->alu.src.n_bits = hsrc->st->n_bits;
4030 instr->alu.src.offset = 0; /* Unused. */
4035 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
4036 struct action *action __rte_unused,
4039 struct instruction *instr,
4040 struct instruction_data *data __rte_unused)
4042 char *dst = tokens[1], *src = tokens[2];
4043 struct header *hdst, *hsrc;
4044 struct field *fdst, *fsrc;
4046 CHECK(n_tokens == 3, EINVAL);
4048 fdst = header_field_parse(p, dst, &hdst);
4049 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4051 fsrc = header_field_parse(p, src, &hsrc);
4052 CHECK(fsrc, EINVAL);
4054 instr->type = INSTR_ALU_CKSUB_FIELD;
4055 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4056 instr->alu.dst.n_bits = fdst->n_bits;
4057 instr->alu.dst.offset = fdst->offset / 8;
4058 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4059 instr->alu.src.n_bits = fsrc->n_bits;
4060 instr->alu.src.offset = fsrc->offset / 8;
4065 instr_alu_shl_translate(struct rte_swx_pipeline *p,
4066 struct action *action,
4069 struct instruction *instr,
4070 struct instruction_data *data __rte_unused)
4072 char *dst = tokens[1], *src = tokens[2];
4073 struct field *fdst, *fsrc;
4075 uint32_t dst_struct_id = 0, src_struct_id = 0;
4077 CHECK(n_tokens == 3, EINVAL);
4079 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4080 CHECK(fdst, EINVAL);
4082 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
4083 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4085 instr->type = INSTR_ALU_SHL;
4086 if (dst[0] == 'h' && src[0] != 'h')
4087 instr->type = INSTR_ALU_SHL_HM;
4088 if (dst[0] != 'h' && src[0] == 'h')
4089 instr->type = INSTR_ALU_SHL_MH;
4090 if (dst[0] == 'h' && src[0] == 'h')
4091 instr->type = INSTR_ALU_SHL_HH;
4093 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4094 instr->alu.dst.n_bits = fdst->n_bits;
4095 instr->alu.dst.offset = fdst->offset / 8;
4096 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4097 instr->alu.src.n_bits = fsrc->n_bits;
4098 instr->alu.src.offset = fsrc->offset / 8;
4102 /* SHL_MI, SHL_HI. */
4103 src_val = strtoull(src, &src, 0);
4104 CHECK(!src[0], EINVAL);
4106 instr->type = INSTR_ALU_SHL_MI;
4108 instr->type = INSTR_ALU_SHL_HI;
4110 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4111 instr->alu.dst.n_bits = fdst->n_bits;
4112 instr->alu.dst.offset = fdst->offset / 8;
4113 instr->alu.src_val = src_val;
4118 instr_alu_shr_translate(struct rte_swx_pipeline *p,
4119 struct action *action,
4122 struct instruction *instr,
4123 struct instruction_data *data __rte_unused)
4125 char *dst = tokens[1], *src = tokens[2];
4126 struct field *fdst, *fsrc;
4128 uint32_t dst_struct_id = 0, src_struct_id = 0;
4130 CHECK(n_tokens == 3, EINVAL);
4132 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4133 CHECK(fdst, EINVAL);
4135 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
4136 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4138 instr->type = INSTR_ALU_SHR;
4139 if (dst[0] == 'h' && src[0] != 'h')
4140 instr->type = INSTR_ALU_SHR_HM;
4141 if (dst[0] != 'h' && src[0] == 'h')
4142 instr->type = INSTR_ALU_SHR_MH;
4143 if (dst[0] == 'h' && src[0] == 'h')
4144 instr->type = INSTR_ALU_SHR_HH;
4146 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4147 instr->alu.dst.n_bits = fdst->n_bits;
4148 instr->alu.dst.offset = fdst->offset / 8;
4149 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4150 instr->alu.src.n_bits = fsrc->n_bits;
4151 instr->alu.src.offset = fsrc->offset / 8;
4155 /* SHR_MI, SHR_HI. */
4156 src_val = strtoull(src, &src, 0);
4157 CHECK(!src[0], EINVAL);
4159 instr->type = INSTR_ALU_SHR_MI;
4161 instr->type = INSTR_ALU_SHR_HI;
4163 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4164 instr->alu.dst.n_bits = fdst->n_bits;
4165 instr->alu.dst.offset = fdst->offset / 8;
4166 instr->alu.src_val = src_val;
4171 instr_alu_and_translate(struct rte_swx_pipeline *p,
4172 struct action *action,
4175 struct instruction *instr,
4176 struct instruction_data *data __rte_unused)
4178 char *dst = tokens[1], *src = tokens[2];
4179 struct field *fdst, *fsrc;
4181 uint32_t dst_struct_id = 0, src_struct_id = 0;
4183 CHECK(n_tokens == 3, EINVAL);
4185 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4186 CHECK(fdst, EINVAL);
4188 /* AND, AND_MH, AND_HM, AND_HH. */
4189 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4191 instr->type = INSTR_ALU_AND;
4192 if (dst[0] != 'h' && src[0] == 'h')
4193 instr->type = INSTR_ALU_AND_MH;
4194 if (dst[0] == 'h' && src[0] != 'h')
4195 instr->type = INSTR_ALU_AND_HM;
4196 if (dst[0] == 'h' && src[0] == 'h')
4197 instr->type = INSTR_ALU_AND_HH;
4199 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4200 instr->alu.dst.n_bits = fdst->n_bits;
4201 instr->alu.dst.offset = fdst->offset / 8;
4202 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4203 instr->alu.src.n_bits = fsrc->n_bits;
4204 instr->alu.src.offset = fsrc->offset / 8;
4209 src_val = strtoull(src, &src, 0);
4210 CHECK(!src[0], EINVAL);
4213 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4215 instr->type = INSTR_ALU_AND_I;
4216 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4217 instr->alu.dst.n_bits = fdst->n_bits;
4218 instr->alu.dst.offset = fdst->offset / 8;
4219 instr->alu.src_val = src_val;
4224 instr_alu_or_translate(struct rte_swx_pipeline *p,
4225 struct action *action,
4228 struct instruction *instr,
4229 struct instruction_data *data __rte_unused)
4231 char *dst = tokens[1], *src = tokens[2];
4232 struct field *fdst, *fsrc;
4234 uint32_t dst_struct_id = 0, src_struct_id = 0;
4236 CHECK(n_tokens == 3, EINVAL);
4238 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4239 CHECK(fdst, EINVAL);
4241 /* OR, OR_MH, OR_HM, OR_HH. */
4242 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4244 instr->type = INSTR_ALU_OR;
4245 if (dst[0] != 'h' && src[0] == 'h')
4246 instr->type = INSTR_ALU_OR_MH;
4247 if (dst[0] == 'h' && src[0] != 'h')
4248 instr->type = INSTR_ALU_OR_HM;
4249 if (dst[0] == 'h' && src[0] == 'h')
4250 instr->type = INSTR_ALU_OR_HH;
4252 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4253 instr->alu.dst.n_bits = fdst->n_bits;
4254 instr->alu.dst.offset = fdst->offset / 8;
4255 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4256 instr->alu.src.n_bits = fsrc->n_bits;
4257 instr->alu.src.offset = fsrc->offset / 8;
4262 src_val = strtoull(src, &src, 0);
4263 CHECK(!src[0], EINVAL);
4266 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4268 instr->type = INSTR_ALU_OR_I;
4269 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4270 instr->alu.dst.n_bits = fdst->n_bits;
4271 instr->alu.dst.offset = fdst->offset / 8;
4272 instr->alu.src_val = src_val;
4277 instr_alu_xor_translate(struct rte_swx_pipeline *p,
4278 struct action *action,
4281 struct instruction *instr,
4282 struct instruction_data *data __rte_unused)
4284 char *dst = tokens[1], *src = tokens[2];
4285 struct field *fdst, *fsrc;
4287 uint32_t dst_struct_id = 0, src_struct_id = 0;
4289 CHECK(n_tokens == 3, EINVAL);
4291 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4292 CHECK(fdst, EINVAL);
4294 /* XOR, XOR_MH, XOR_HM, XOR_HH. */
4295 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4297 instr->type = INSTR_ALU_XOR;
4298 if (dst[0] != 'h' && src[0] == 'h')
4299 instr->type = INSTR_ALU_XOR_MH;
4300 if (dst[0] == 'h' && src[0] != 'h')
4301 instr->type = INSTR_ALU_XOR_HM;
4302 if (dst[0] == 'h' && src[0] == 'h')
4303 instr->type = INSTR_ALU_XOR_HH;
4305 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4306 instr->alu.dst.n_bits = fdst->n_bits;
4307 instr->alu.dst.offset = fdst->offset / 8;
4308 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4309 instr->alu.src.n_bits = fsrc->n_bits;
4310 instr->alu.src.offset = fsrc->offset / 8;
4315 src_val = strtoull(src, &src, 0);
4316 CHECK(!src[0], EINVAL);
4319 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4321 instr->type = INSTR_ALU_XOR_I;
4322 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4323 instr->alu.dst.n_bits = fdst->n_bits;
4324 instr->alu.dst.offset = fdst->offset / 8;
4325 instr->alu.src_val = src_val;
4330 instr_alu_add_exec(struct rte_swx_pipeline *p)
4332 struct thread *t = &p->threads[p->thread_id];
4333 struct instruction *ip = t->ip;
4335 TRACE("[Thread %2u] add\n", p->thread_id);
4345 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
4347 struct thread *t = &p->threads[p->thread_id];
4348 struct instruction *ip = t->ip;
4350 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
4360 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
4362 struct thread *t = &p->threads[p->thread_id];
4363 struct instruction *ip = t->ip;
4365 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
4375 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
4377 struct thread *t = &p->threads[p->thread_id];
4378 struct instruction *ip = t->ip;
4380 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
4390 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
4392 struct thread *t = &p->threads[p->thread_id];
4393 struct instruction *ip = t->ip;
4395 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
4405 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
4407 struct thread *t = &p->threads[p->thread_id];
4408 struct instruction *ip = t->ip;
4410 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
4420 instr_alu_sub_exec(struct rte_swx_pipeline *p)
4422 struct thread *t = &p->threads[p->thread_id];
4423 struct instruction *ip = t->ip;
4425 TRACE("[Thread %2u] sub\n", p->thread_id);
4435 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4437 struct thread *t = &p->threads[p->thread_id];
4438 struct instruction *ip = t->ip;
4440 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4450 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4452 struct thread *t = &p->threads[p->thread_id];
4453 struct instruction *ip = t->ip;
4455 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4465 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4467 struct thread *t = &p->threads[p->thread_id];
4468 struct instruction *ip = t->ip;
4470 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4480 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4482 struct thread *t = &p->threads[p->thread_id];
4483 struct instruction *ip = t->ip;
4485 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4495 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4497 struct thread *t = &p->threads[p->thread_id];
4498 struct instruction *ip = t->ip;
4500 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4510 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4512 struct thread *t = &p->threads[p->thread_id];
4513 struct instruction *ip = t->ip;
4515 TRACE("[Thread %2u] shl\n", p->thread_id);
4525 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4527 struct thread *t = &p->threads[p->thread_id];
4528 struct instruction *ip = t->ip;
4530 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4540 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4542 struct thread *t = &p->threads[p->thread_id];
4543 struct instruction *ip = t->ip;
4545 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4555 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4557 struct thread *t = &p->threads[p->thread_id];
4558 struct instruction *ip = t->ip;
4560 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4570 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4572 struct thread *t = &p->threads[p->thread_id];
4573 struct instruction *ip = t->ip;
4575 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4585 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4587 struct thread *t = &p->threads[p->thread_id];
4588 struct instruction *ip = t->ip;
4590 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4600 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4602 struct thread *t = &p->threads[p->thread_id];
4603 struct instruction *ip = t->ip;
4605 TRACE("[Thread %2u] shr\n", p->thread_id);
4615 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4617 struct thread *t = &p->threads[p->thread_id];
4618 struct instruction *ip = t->ip;
4620 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4630 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4632 struct thread *t = &p->threads[p->thread_id];
4633 struct instruction *ip = t->ip;
4635 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4645 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4647 struct thread *t = &p->threads[p->thread_id];
4648 struct instruction *ip = t->ip;
4650 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4660 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4662 struct thread *t = &p->threads[p->thread_id];
4663 struct instruction *ip = t->ip;
4665 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4675 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4677 struct thread *t = &p->threads[p->thread_id];
4678 struct instruction *ip = t->ip;
4680 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4690 instr_alu_and_exec(struct rte_swx_pipeline *p)
4692 struct thread *t = &p->threads[p->thread_id];
4693 struct instruction *ip = t->ip;
4695 TRACE("[Thread %2u] and\n", p->thread_id);
4705 instr_alu_and_mh_exec(struct rte_swx_pipeline *p)
4707 struct thread *t = &p->threads[p->thread_id];
4708 struct instruction *ip = t->ip;
4710 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
4720 instr_alu_and_hm_exec(struct rte_swx_pipeline *p)
4722 struct thread *t = &p->threads[p->thread_id];
4723 struct instruction *ip = t->ip;
4725 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
4728 ALU_HM_FAST(t, ip, &);
4735 instr_alu_and_hh_exec(struct rte_swx_pipeline *p)
4737 struct thread *t = &p->threads[p->thread_id];
4738 struct instruction *ip = t->ip;
4740 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
4743 ALU_HH_FAST(t, ip, &);
4750 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4752 struct thread *t = &p->threads[p->thread_id];
4753 struct instruction *ip = t->ip;
4755 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4765 instr_alu_or_exec(struct rte_swx_pipeline *p)
4767 struct thread *t = &p->threads[p->thread_id];
4768 struct instruction *ip = t->ip;
4770 TRACE("[Thread %2u] or\n", p->thread_id);
4780 instr_alu_or_mh_exec(struct rte_swx_pipeline *p)
4782 struct thread *t = &p->threads[p->thread_id];
4783 struct instruction *ip = t->ip;
4785 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
4795 instr_alu_or_hm_exec(struct rte_swx_pipeline *p)
4797 struct thread *t = &p->threads[p->thread_id];
4798 struct instruction *ip = t->ip;
4800 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
4803 ALU_HM_FAST(t, ip, |);
4810 instr_alu_or_hh_exec(struct rte_swx_pipeline *p)
4812 struct thread *t = &p->threads[p->thread_id];
4813 struct instruction *ip = t->ip;
4815 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
4818 ALU_HH_FAST(t, ip, |);
4825 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4827 struct thread *t = &p->threads[p->thread_id];
4828 struct instruction *ip = t->ip;
4830 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4840 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4842 struct thread *t = &p->threads[p->thread_id];
4843 struct instruction *ip = t->ip;
4845 TRACE("[Thread %2u] xor\n", p->thread_id);
4855 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p)
4857 struct thread *t = &p->threads[p->thread_id];
4858 struct instruction *ip = t->ip;
4860 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
4870 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p)
4872 struct thread *t = &p->threads[p->thread_id];
4873 struct instruction *ip = t->ip;
4875 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
4878 ALU_HM_FAST(t, ip, ^);
4885 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p)
4887 struct thread *t = &p->threads[p->thread_id];
4888 struct instruction *ip = t->ip;
4890 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
4893 ALU_HH_FAST(t, ip, ^);
4900 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4902 struct thread *t = &p->threads[p->thread_id];
4903 struct instruction *ip = t->ip;
4905 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4915 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4917 struct thread *t = &p->threads[p->thread_id];
4918 struct instruction *ip = t->ip;
4919 uint8_t *dst_struct, *src_struct;
4920 uint16_t *dst16_ptr, dst;
4921 uint64_t *src64_ptr, src64, src64_mask, src;
4924 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4927 dst_struct = t->structs[ip->alu.dst.struct_id];
4928 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4931 src_struct = t->structs[ip->alu.src.struct_id];
4932 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4934 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4935 src = src64 & src64_mask;
4940 /* The first input (r) is a 16-bit number. The second and the third
4941 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4942 * three numbers (output r) is a 34-bit number.
4944 r += (src >> 32) + (src & 0xFFFFFFFF);
4946 /* The first input is a 16-bit number. The second input is an 18-bit
4947 * number. In the worst case scenario, the sum of the two numbers is a
4950 r = (r & 0xFFFF) + (r >> 16);
4952 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4953 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4955 r = (r & 0xFFFF) + (r >> 16);
4957 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4958 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4959 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4960 * therefore the output r is always a 16-bit number.
4962 r = (r & 0xFFFF) + (r >> 16);
4967 *dst16_ptr = (uint16_t)r;
4974 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4976 struct thread *t = &p->threads[p->thread_id];
4977 struct instruction *ip = t->ip;
4978 uint8_t *dst_struct, *src_struct;
4979 uint16_t *dst16_ptr, dst;
4980 uint64_t *src64_ptr, src64, src64_mask, src;
4983 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4986 dst_struct = t->structs[ip->alu.dst.struct_id];
4987 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4990 src_struct = t->structs[ip->alu.src.struct_id];
4991 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4993 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4994 src = src64 & src64_mask;
4999 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
5000 * the following sequence of operations in 2's complement arithmetic:
5001 * a '- b = (a - b) % 0xFFFF.
5003 * In order to prevent an underflow for the below subtraction, in which
5004 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
5005 * minuend), we first add a multiple of the 0xFFFF modulus to the
5006 * minuend. The number we add to the minuend needs to be a 34-bit number
5007 * or higher, so for readability reasons we picked the 36-bit multiple.
5008 * We are effectively turning the 16-bit minuend into a 36-bit number:
5009 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
5011 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
5013 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
5014 * result (the output r) is a 36-bit number.
5016 r -= (src >> 32) + (src & 0xFFFFFFFF);
5018 /* The first input is a 16-bit number. The second input is a 20-bit
5019 * number. Their sum is a 21-bit number.
5021 r = (r & 0xFFFF) + (r >> 16);
5023 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5024 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
5026 r = (r & 0xFFFF) + (r >> 16);
5028 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5029 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5030 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5031 * generated, therefore the output r is always a 16-bit number.
5033 r = (r & 0xFFFF) + (r >> 16);
5038 *dst16_ptr = (uint16_t)r;
5045 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
5047 struct thread *t = &p->threads[p->thread_id];
5048 struct instruction *ip = t->ip;
5049 uint8_t *dst_struct, *src_struct;
5050 uint16_t *dst16_ptr;
5051 uint32_t *src32_ptr;
5054 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
5057 dst_struct = t->structs[ip->alu.dst.struct_id];
5058 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5060 src_struct = t->structs[ip->alu.src.struct_id];
5061 src32_ptr = (uint32_t *)&src_struct[0];
5063 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
5064 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
5065 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
5066 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
5067 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
5069 /* The first input is a 16-bit number. The second input is a 19-bit
5070 * number. Their sum is a 20-bit number.
5072 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5074 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5075 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
5077 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5079 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5080 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5081 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
5082 * generated, therefore the output r is always a 16-bit number.
5084 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5087 r0 = r0 ? r0 : 0xFFFF;
5089 *dst16_ptr = (uint16_t)r0;
5096 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
5098 struct thread *t = &p->threads[p->thread_id];
5099 struct instruction *ip = t->ip;
5100 uint8_t *dst_struct, *src_struct;
5101 uint16_t *dst16_ptr;
5102 uint32_t *src32_ptr;
5106 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
5109 dst_struct = t->structs[ip->alu.dst.struct_id];
5110 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5112 src_struct = t->structs[ip->alu.src.struct_id];
5113 src32_ptr = (uint32_t *)&src_struct[0];
5115 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
5116 * Therefore, in the worst case scenario, a 35-bit number is added to a
5117 * 16-bit number (the input r), so the output r is 36-bit number.
5119 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
5122 /* The first input is a 16-bit number. The second input is a 20-bit
5123 * number. Their sum is a 21-bit number.
5125 r = (r & 0xFFFF) + (r >> 16);
5127 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5128 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
5130 r = (r & 0xFFFF) + (r >> 16);
5132 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5133 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5134 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5135 * generated, therefore the output r is always a 16-bit number.
5137 r = (r & 0xFFFF) + (r >> 16);
5142 *dst16_ptr = (uint16_t)r;
5151 static struct regarray *
5152 regarray_find(struct rte_swx_pipeline *p, const char *name);
5155 instr_regprefetch_translate(struct rte_swx_pipeline *p,
5156 struct action *action,
5159 struct instruction *instr,
5160 struct instruction_data *data __rte_unused)
5162 char *regarray = tokens[1], *idx = tokens[2];
5165 uint32_t idx_struct_id, idx_val;
5167 CHECK(n_tokens == 3, EINVAL);
5169 r = regarray_find(p, regarray);
5172 /* REGPREFETCH_RH, REGPREFETCH_RM. */
5173 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5175 instr->type = INSTR_REGPREFETCH_RM;
5177 instr->type = INSTR_REGPREFETCH_RH;
5179 instr->regarray.regarray_id = r->id;
5180 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5181 instr->regarray.idx.n_bits = fidx->n_bits;
5182 instr->regarray.idx.offset = fidx->offset / 8;
5183 instr->regarray.dstsrc_val = 0; /* Unused. */
5187 /* REGPREFETCH_RI. */
5188 idx_val = strtoul(idx, &idx, 0);
5189 CHECK(!idx[0], EINVAL);
5191 instr->type = INSTR_REGPREFETCH_RI;
5192 instr->regarray.regarray_id = r->id;
5193 instr->regarray.idx_val = idx_val;
5194 instr->regarray.dstsrc_val = 0; /* Unused. */
5199 instr_regrd_translate(struct rte_swx_pipeline *p,
5200 struct action *action,
5203 struct instruction *instr,
5204 struct instruction_data *data __rte_unused)
5206 char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
5208 struct field *fdst, *fidx;
5209 uint32_t dst_struct_id, idx_struct_id, idx_val;
5211 CHECK(n_tokens == 4, EINVAL);
5213 r = regarray_find(p, regarray);
5216 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
5217 CHECK(fdst, EINVAL);
5219 /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
5220 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5222 instr->type = INSTR_REGRD_MRM;
5223 if (dst[0] == 'h' && idx[0] != 'h')
5224 instr->type = INSTR_REGRD_HRM;
5225 if (dst[0] != 'h' && idx[0] == 'h')
5226 instr->type = INSTR_REGRD_MRH;
5227 if (dst[0] == 'h' && idx[0] == 'h')
5228 instr->type = INSTR_REGRD_HRH;
5230 instr->regarray.regarray_id = r->id;
5231 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5232 instr->regarray.idx.n_bits = fidx->n_bits;
5233 instr->regarray.idx.offset = fidx->offset / 8;
5234 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5235 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5236 instr->regarray.dstsrc.offset = fdst->offset / 8;
5240 /* REGRD_MRI, REGRD_HRI. */
5241 idx_val = strtoul(idx, &idx, 0);
5242 CHECK(!idx[0], EINVAL);
5244 instr->type = INSTR_REGRD_MRI;
5246 instr->type = INSTR_REGRD_HRI;
5248 instr->regarray.regarray_id = r->id;
5249 instr->regarray.idx_val = idx_val;
5250 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5251 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5252 instr->regarray.dstsrc.offset = fdst->offset / 8;
5257 instr_regwr_translate(struct rte_swx_pipeline *p,
5258 struct action *action,
5261 struct instruction *instr,
5262 struct instruction_data *data __rte_unused)
5264 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5266 struct field *fidx, *fsrc;
5268 uint32_t idx_struct_id, idx_val, src_struct_id;
5270 CHECK(n_tokens == 4, EINVAL);
5272 r = regarray_find(p, regarray);
5275 /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
5276 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5277 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5279 instr->type = INSTR_REGWR_RMM;
5280 if (idx[0] == 'h' && src[0] != 'h')
5281 instr->type = INSTR_REGWR_RHM;
5282 if (idx[0] != 'h' && src[0] == 'h')
5283 instr->type = INSTR_REGWR_RMH;
5284 if (idx[0] == 'h' && src[0] == 'h')
5285 instr->type = INSTR_REGWR_RHH;
5287 instr->regarray.regarray_id = r->id;
5288 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5289 instr->regarray.idx.n_bits = fidx->n_bits;
5290 instr->regarray.idx.offset = fidx->offset / 8;
5291 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5292 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5293 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5297 /* REGWR_RHI, REGWR_RMI. */
5298 if (fidx && !fsrc) {
5299 src_val = strtoull(src, &src, 0);
5300 CHECK(!src[0], EINVAL);
5302 instr->type = INSTR_REGWR_RMI;
5304 instr->type = INSTR_REGWR_RHI;
5306 instr->regarray.regarray_id = r->id;
5307 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5308 instr->regarray.idx.n_bits = fidx->n_bits;
5309 instr->regarray.idx.offset = fidx->offset / 8;
5310 instr->regarray.dstsrc_val = src_val;
5314 /* REGWR_RIH, REGWR_RIM. */
5315 if (!fidx && fsrc) {
5316 idx_val = strtoul(idx, &idx, 0);
5317 CHECK(!idx[0], EINVAL);
5319 instr->type = INSTR_REGWR_RIM;
5321 instr->type = INSTR_REGWR_RIH;
5323 instr->regarray.regarray_id = r->id;
5324 instr->regarray.idx_val = idx_val;
5325 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5326 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5327 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5332 src_val = strtoull(src, &src, 0);
5333 CHECK(!src[0], EINVAL);
5335 idx_val = strtoul(idx, &idx, 0);
5336 CHECK(!idx[0], EINVAL);
5338 instr->type = INSTR_REGWR_RII;
5339 instr->regarray.idx_val = idx_val;
5340 instr->regarray.dstsrc_val = src_val;
5346 instr_regadd_translate(struct rte_swx_pipeline *p,
5347 struct action *action,
5350 struct instruction *instr,
5351 struct instruction_data *data __rte_unused)
5353 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5355 struct field *fidx, *fsrc;
5357 uint32_t idx_struct_id, idx_val, src_struct_id;
5359 CHECK(n_tokens == 4, EINVAL);
5361 r = regarray_find(p, regarray);
5364 /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
5365 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5366 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5368 instr->type = INSTR_REGADD_RMM;
5369 if (idx[0] == 'h' && src[0] != 'h')
5370 instr->type = INSTR_REGADD_RHM;
5371 if (idx[0] != 'h' && src[0] == 'h')
5372 instr->type = INSTR_REGADD_RMH;
5373 if (idx[0] == 'h' && src[0] == 'h')
5374 instr->type = INSTR_REGADD_RHH;
5376 instr->regarray.regarray_id = r->id;
5377 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5378 instr->regarray.idx.n_bits = fidx->n_bits;
5379 instr->regarray.idx.offset = fidx->offset / 8;
5380 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5381 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5382 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5386 /* REGADD_RHI, REGADD_RMI. */
5387 if (fidx && !fsrc) {
5388 src_val = strtoull(src, &src, 0);
5389 CHECK(!src[0], EINVAL);
5391 instr->type = INSTR_REGADD_RMI;
5393 instr->type = INSTR_REGADD_RHI;
5395 instr->regarray.regarray_id = r->id;
5396 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5397 instr->regarray.idx.n_bits = fidx->n_bits;
5398 instr->regarray.idx.offset = fidx->offset / 8;
5399 instr->regarray.dstsrc_val = src_val;
5403 /* REGADD_RIH, REGADD_RIM. */
5404 if (!fidx && fsrc) {
5405 idx_val = strtoul(idx, &idx, 0);
5406 CHECK(!idx[0], EINVAL);
5408 instr->type = INSTR_REGADD_RIM;
5410 instr->type = INSTR_REGADD_RIH;
5412 instr->regarray.regarray_id = r->id;
5413 instr->regarray.idx_val = idx_val;
5414 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5415 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5416 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5421 src_val = strtoull(src, &src, 0);
5422 CHECK(!src[0], EINVAL);
5424 idx_val = strtoul(idx, &idx, 0);
5425 CHECK(!idx[0], EINVAL);
5427 instr->type = INSTR_REGADD_RII;
5428 instr->regarray.idx_val = idx_val;
5429 instr->regarray.dstsrc_val = src_val;
5433 static inline uint64_t *
5434 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
5436 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5440 static inline uint64_t
5441 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5443 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5445 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5446 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5447 uint64_t idx64 = *idx64_ptr;
5448 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
5449 uint64_t idx = idx64 & idx64_mask & r->size_mask;
5454 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5456 static inline uint64_t
5457 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5459 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5461 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5462 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5463 uint64_t idx64 = *idx64_ptr;
5464 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
5471 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
5475 static inline uint64_t
5476 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
5478 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5480 uint64_t idx = ip->regarray.idx_val & r->size_mask;
5485 static inline uint64_t
5486 instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
5488 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5489 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5490 uint64_t src64 = *src64_ptr;
5491 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5492 uint64_t src = src64 & src64_mask;
5497 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5499 static inline uint64_t
5500 instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
5502 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5503 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5504 uint64_t src64 = *src64_ptr;
5505 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
5512 #define instr_regarray_src_nbo instr_regarray_src_hbo
5517 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5519 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5520 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5521 uint64_t dst64 = *dst64_ptr;
5522 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5524 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5528 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5531 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5533 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5534 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5535 uint64_t dst64 = *dst64_ptr;
5536 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5538 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
5539 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5544 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
5549 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
5551 struct thread *t = &p->threads[p->thread_id];
5552 struct instruction *ip = t->ip;
5553 uint64_t *regarray, idx;
5555 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
5558 regarray = instr_regarray_regarray(p, ip);
5559 idx = instr_regarray_idx_nbo(p, t, ip);
5560 rte_prefetch0(®array[idx]);
5567 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
5569 struct thread *t = &p->threads[p->thread_id];
5570 struct instruction *ip = t->ip;
5571 uint64_t *regarray, idx;
5573 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
5576 regarray = instr_regarray_regarray(p, ip);
5577 idx = instr_regarray_idx_hbo(p, t, ip);
5578 rte_prefetch0(®array[idx]);
5585 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
5587 struct thread *t = &p->threads[p->thread_id];
5588 struct instruction *ip = t->ip;
5589 uint64_t *regarray, idx;
5591 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
5594 regarray = instr_regarray_regarray(p, ip);
5595 idx = instr_regarray_idx_imm(p, ip);
5596 rte_prefetch0(®array[idx]);
5603 instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
5605 struct thread *t = &p->threads[p->thread_id];
5606 struct instruction *ip = t->ip;
5607 uint64_t *regarray, idx;
5609 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
5612 regarray = instr_regarray_regarray(p, ip);
5613 idx = instr_regarray_idx_nbo(p, t, ip);
5614 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5621 instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
5623 struct thread *t = &p->threads[p->thread_id];
5624 struct instruction *ip = t->ip;
5625 uint64_t *regarray, idx;
5627 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
5630 regarray = instr_regarray_regarray(p, ip);
5631 idx = instr_regarray_idx_hbo(p, t, ip);
5632 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5639 instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
5641 struct thread *t = &p->threads[p->thread_id];
5642 struct instruction *ip = t->ip;
5643 uint64_t *regarray, idx;
5645 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
5648 regarray = instr_regarray_regarray(p, ip);
5649 idx = instr_regarray_idx_nbo(p, t, ip);
5650 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5657 instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
5659 struct thread *t = &p->threads[p->thread_id];
5660 struct instruction *ip = t->ip;
5661 uint64_t *regarray, idx;
5664 regarray = instr_regarray_regarray(p, ip);
5665 idx = instr_regarray_idx_hbo(p, t, ip);
5666 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5673 instr_regrd_hri_exec(struct rte_swx_pipeline *p)
5675 struct thread *t = &p->threads[p->thread_id];
5676 struct instruction *ip = t->ip;
5677 uint64_t *regarray, idx;
5679 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
5682 regarray = instr_regarray_regarray(p, ip);
5683 idx = instr_regarray_idx_imm(p, ip);
5684 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5691 instr_regrd_mri_exec(struct rte_swx_pipeline *p)
5693 struct thread *t = &p->threads[p->thread_id];
5694 struct instruction *ip = t->ip;
5695 uint64_t *regarray, idx;
5697 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
5700 regarray = instr_regarray_regarray(p, ip);
5701 idx = instr_regarray_idx_imm(p, ip);
5702 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5709 instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
5711 struct thread *t = &p->threads[p->thread_id];
5712 struct instruction *ip = t->ip;
5713 uint64_t *regarray, idx, src;
5715 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
5718 regarray = instr_regarray_regarray(p, ip);
5719 idx = instr_regarray_idx_nbo(p, t, ip);
5720 src = instr_regarray_src_nbo(t, ip);
5721 regarray[idx] = src;
5728 instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
5730 struct thread *t = &p->threads[p->thread_id];
5731 struct instruction *ip = t->ip;
5732 uint64_t *regarray, idx, src;
5734 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
5737 regarray = instr_regarray_regarray(p, ip);
5738 idx = instr_regarray_idx_nbo(p, t, ip);
5739 src = instr_regarray_src_hbo(t, ip);
5740 regarray[idx] = src;
5747 instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
5749 struct thread *t = &p->threads[p->thread_id];
5750 struct instruction *ip = t->ip;
5751 uint64_t *regarray, idx, src;
5753 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
5756 regarray = instr_regarray_regarray(p, ip);
5757 idx = instr_regarray_idx_hbo(p, t, ip);
5758 src = instr_regarray_src_nbo(t, ip);
5759 regarray[idx] = src;
5766 instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
5768 struct thread *t = &p->threads[p->thread_id];
5769 struct instruction *ip = t->ip;
5770 uint64_t *regarray, idx, src;
5772 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
5775 regarray = instr_regarray_regarray(p, ip);
5776 idx = instr_regarray_idx_hbo(p, t, ip);
5777 src = instr_regarray_src_hbo(t, ip);
5778 regarray[idx] = src;
5785 instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
5787 struct thread *t = &p->threads[p->thread_id];
5788 struct instruction *ip = t->ip;
5789 uint64_t *regarray, idx, src;
5791 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
5794 regarray = instr_regarray_regarray(p, ip);
5795 idx = instr_regarray_idx_nbo(p, t, ip);
5796 src = ip->regarray.dstsrc_val;
5797 regarray[idx] = src;
5804 instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
5806 struct thread *t = &p->threads[p->thread_id];
5807 struct instruction *ip = t->ip;
5808 uint64_t *regarray, idx, src;
5810 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
5813 regarray = instr_regarray_regarray(p, ip);
5814 idx = instr_regarray_idx_hbo(p, t, ip);
5815 src = ip->regarray.dstsrc_val;
5816 regarray[idx] = src;
5823 instr_regwr_rih_exec(struct rte_swx_pipeline *p)
5825 struct thread *t = &p->threads[p->thread_id];
5826 struct instruction *ip = t->ip;
5827 uint64_t *regarray, idx, src;
5829 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
5832 regarray = instr_regarray_regarray(p, ip);
5833 idx = instr_regarray_idx_imm(p, ip);
5834 src = instr_regarray_src_nbo(t, ip);
5835 regarray[idx] = src;
5842 instr_regwr_rim_exec(struct rte_swx_pipeline *p)
5844 struct thread *t = &p->threads[p->thread_id];
5845 struct instruction *ip = t->ip;
5846 uint64_t *regarray, idx, src;
5848 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
5851 regarray = instr_regarray_regarray(p, ip);
5852 idx = instr_regarray_idx_imm(p, ip);
5853 src = instr_regarray_src_hbo(t, ip);
5854 regarray[idx] = src;
5861 instr_regwr_rii_exec(struct rte_swx_pipeline *p)
5863 struct thread *t = &p->threads[p->thread_id];
5864 struct instruction *ip = t->ip;
5865 uint64_t *regarray, idx, src;
5867 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
5870 regarray = instr_regarray_regarray(p, ip);
5871 idx = instr_regarray_idx_imm(p, ip);
5872 src = ip->regarray.dstsrc_val;
5873 regarray[idx] = src;
5880 instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
5882 struct thread *t = &p->threads[p->thread_id];
5883 struct instruction *ip = t->ip;
5884 uint64_t *regarray, idx, src;
5886 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
5889 regarray = instr_regarray_regarray(p, ip);
5890 idx = instr_regarray_idx_nbo(p, t, ip);
5891 src = instr_regarray_src_nbo(t, ip);
5892 regarray[idx] += src;
5899 instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
5901 struct thread *t = &p->threads[p->thread_id];
5902 struct instruction *ip = t->ip;
5903 uint64_t *regarray, idx, src;
5905 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
5908 regarray = instr_regarray_regarray(p, ip);
5909 idx = instr_regarray_idx_nbo(p, t, ip);
5910 src = instr_regarray_src_hbo(t, ip);
5911 regarray[idx] += src;
5918 instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
5920 struct thread *t = &p->threads[p->thread_id];
5921 struct instruction *ip = t->ip;
5922 uint64_t *regarray, idx, src;
5924 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
5927 regarray = instr_regarray_regarray(p, ip);
5928 idx = instr_regarray_idx_hbo(p, t, ip);
5929 src = instr_regarray_src_nbo(t, ip);
5930 regarray[idx] += src;
5937 instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
5939 struct thread *t = &p->threads[p->thread_id];
5940 struct instruction *ip = t->ip;
5941 uint64_t *regarray, idx, src;
5943 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
5946 regarray = instr_regarray_regarray(p, ip);
5947 idx = instr_regarray_idx_hbo(p, t, ip);
5948 src = instr_regarray_src_hbo(t, ip);
5949 regarray[idx] += src;
5956 instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
5958 struct thread *t = &p->threads[p->thread_id];
5959 struct instruction *ip = t->ip;
5960 uint64_t *regarray, idx, src;
5962 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
5965 regarray = instr_regarray_regarray(p, ip);
5966 idx = instr_regarray_idx_nbo(p, t, ip);
5967 src = ip->regarray.dstsrc_val;
5968 regarray[idx] += src;
5975 instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
5977 struct thread *t = &p->threads[p->thread_id];
5978 struct instruction *ip = t->ip;
5979 uint64_t *regarray, idx, src;
5981 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
5984 regarray = instr_regarray_regarray(p, ip);
5985 idx = instr_regarray_idx_hbo(p, t, ip);
5986 src = ip->regarray.dstsrc_val;
5987 regarray[idx] += src;
5994 instr_regadd_rih_exec(struct rte_swx_pipeline *p)
5996 struct thread *t = &p->threads[p->thread_id];
5997 struct instruction *ip = t->ip;
5998 uint64_t *regarray, idx, src;
6000 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
6003 regarray = instr_regarray_regarray(p, ip);
6004 idx = instr_regarray_idx_imm(p, ip);
6005 src = instr_regarray_src_nbo(t, ip);
6006 regarray[idx] += src;
6013 instr_regadd_rim_exec(struct rte_swx_pipeline *p)
6015 struct thread *t = &p->threads[p->thread_id];
6016 struct instruction *ip = t->ip;
6017 uint64_t *regarray, idx, src;
6019 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
6022 regarray = instr_regarray_regarray(p, ip);
6023 idx = instr_regarray_idx_imm(p, ip);
6024 src = instr_regarray_src_hbo(t, ip);
6025 regarray[idx] += src;
6032 instr_regadd_rii_exec(struct rte_swx_pipeline *p)
6034 struct thread *t = &p->threads[p->thread_id];
6035 struct instruction *ip = t->ip;
6036 uint64_t *regarray, idx, src;
6038 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
6041 regarray = instr_regarray_regarray(p, ip);
6042 idx = instr_regarray_idx_imm(p, ip);
6043 src = ip->regarray.dstsrc_val;
6044 regarray[idx] += src;
6053 static struct metarray *
6054 metarray_find(struct rte_swx_pipeline *p, const char *name);
6057 instr_metprefetch_translate(struct rte_swx_pipeline *p,
6058 struct action *action,
6061 struct instruction *instr,
6062 struct instruction_data *data __rte_unused)
6064 char *metarray = tokens[1], *idx = tokens[2];
6067 uint32_t idx_struct_id, idx_val;
6069 CHECK(n_tokens == 3, EINVAL);
6071 m = metarray_find(p, metarray);
6074 /* METPREFETCH_H, METPREFETCH_M. */
6075 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6077 instr->type = INSTR_METPREFETCH_M;
6079 instr->type = INSTR_METPREFETCH_H;
6081 instr->meter.metarray_id = m->id;
6082 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6083 instr->meter.idx.n_bits = fidx->n_bits;
6084 instr->meter.idx.offset = fidx->offset / 8;
6088 /* METPREFETCH_I. */
6089 idx_val = strtoul(idx, &idx, 0);
6090 CHECK(!idx[0], EINVAL);
6092 instr->type = INSTR_METPREFETCH_I;
6093 instr->meter.metarray_id = m->id;
6094 instr->meter.idx_val = idx_val;
6099 instr_meter_translate(struct rte_swx_pipeline *p,
6100 struct action *action,
6103 struct instruction *instr,
6104 struct instruction_data *data __rte_unused)
6106 char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
6107 char *color_in = tokens[4], *color_out = tokens[5];
6109 struct field *fidx, *flength, *fcin, *fcout;
6110 uint32_t idx_struct_id, length_struct_id;
6111 uint32_t color_in_struct_id, color_out_struct_id;
6113 CHECK(n_tokens == 6, EINVAL);
6115 m = metarray_find(p, metarray);
6118 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6120 flength = struct_field_parse(p, action, length, &length_struct_id);
6121 CHECK(flength, EINVAL);
6123 fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
6125 fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
6126 CHECK(fcout, EINVAL);
6128 /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
6130 instr->type = INSTR_METER_MMM;
6131 if (idx[0] == 'h' && length[0] == 'h')
6132 instr->type = INSTR_METER_HHM;
6133 if (idx[0] == 'h' && length[0] != 'h')
6134 instr->type = INSTR_METER_HMM;
6135 if (idx[0] != 'h' && length[0] == 'h')
6136 instr->type = INSTR_METER_MHM;
6138 instr->meter.metarray_id = m->id;
6140 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6141 instr->meter.idx.n_bits = fidx->n_bits;
6142 instr->meter.idx.offset = fidx->offset / 8;
6144 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6145 instr->meter.length.n_bits = flength->n_bits;
6146 instr->meter.length.offset = flength->offset / 8;
6148 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6149 instr->meter.color_in.n_bits = fcin->n_bits;
6150 instr->meter.color_in.offset = fcin->offset / 8;
6152 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6153 instr->meter.color_out.n_bits = fcout->n_bits;
6154 instr->meter.color_out.offset = fcout->offset / 8;
6159 /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
6160 if (fidx && !fcin) {
6161 uint32_t color_in_val = strtoul(color_in, &color_in, 0);
6162 CHECK(!color_in[0], EINVAL);
6164 instr->type = INSTR_METER_MMI;
6165 if (idx[0] == 'h' && length[0] == 'h')
6166 instr->type = INSTR_METER_HHI;
6167 if (idx[0] == 'h' && length[0] != 'h')
6168 instr->type = INSTR_METER_HMI;
6169 if (idx[0] != 'h' && length[0] == 'h')
6170 instr->type = INSTR_METER_MHI;
6172 instr->meter.metarray_id = m->id;
6174 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6175 instr->meter.idx.n_bits = fidx->n_bits;
6176 instr->meter.idx.offset = fidx->offset / 8;
6178 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6179 instr->meter.length.n_bits = flength->n_bits;
6180 instr->meter.length.offset = flength->offset / 8;
6182 instr->meter.color_in_val = color_in_val;
6184 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6185 instr->meter.color_out.n_bits = fcout->n_bits;
6186 instr->meter.color_out.offset = fcout->offset / 8;
6191 /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
6192 if (!fidx && fcin) {
6195 idx_val = strtoul(idx, &idx, 0);
6196 CHECK(!idx[0], EINVAL);
6198 instr->type = INSTR_METER_IMM;
6199 if (length[0] == 'h')
6200 instr->type = INSTR_METER_IHM;
6202 instr->meter.metarray_id = m->id;
6204 instr->meter.idx_val = idx_val;
6206 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6207 instr->meter.length.n_bits = flength->n_bits;
6208 instr->meter.length.offset = flength->offset / 8;
6210 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6211 instr->meter.color_in.n_bits = fcin->n_bits;
6212 instr->meter.color_in.offset = fcin->offset / 8;
6214 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6215 instr->meter.color_out.n_bits = fcout->n_bits;
6216 instr->meter.color_out.offset = fcout->offset / 8;
6221 /* index = I, length = HMEFT, color_in = I, color_out = MEF. */
6222 if (!fidx && !fcin) {
6223 uint32_t idx_val, color_in_val;
6225 idx_val = strtoul(idx, &idx, 0);
6226 CHECK(!idx[0], EINVAL);
6228 color_in_val = strtoul(color_in, &color_in, 0);
6229 CHECK(!color_in[0], EINVAL);
6231 instr->type = INSTR_METER_IMI;
6232 if (length[0] == 'h')
6233 instr->type = INSTR_METER_IHI;
6235 instr->meter.metarray_id = m->id;
6237 instr->meter.idx_val = idx_val;
6239 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6240 instr->meter.length.n_bits = flength->n_bits;
6241 instr->meter.length.offset = flength->offset / 8;
6243 instr->meter.color_in_val = color_in_val;
6245 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6246 instr->meter.color_out.n_bits = fcout->n_bits;
6247 instr->meter.color_out.offset = fcout->offset / 8;
6255 static inline struct meter *
6256 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6258 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6260 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6261 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6262 uint64_t idx64 = *idx64_ptr;
6263 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
6264 uint64_t idx = idx64 & idx64_mask & r->size_mask;
6266 return &r->metarray[idx];
6269 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6271 static inline struct meter *
6272 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6274 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6276 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6277 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6278 uint64_t idx64 = *idx64_ptr;
6279 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
6281 return &r->metarray[idx];
6286 #define instr_meter_idx_nbo instr_meter_idx_hbo
6290 static inline struct meter *
6291 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
6293 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6295 uint64_t idx = ip->meter.idx_val & r->size_mask;
6297 return &r->metarray[idx];
6300 static inline uint32_t
6301 instr_meter_length_hbo(struct thread *t, struct instruction *ip)
6303 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6304 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6305 uint64_t src64 = *src64_ptr;
6306 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
6307 uint64_t src = src64 & src64_mask;
6309 return (uint32_t)src;
6312 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6314 static inline uint32_t
6315 instr_meter_length_nbo(struct thread *t, struct instruction *ip)
6317 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6318 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6319 uint64_t src64 = *src64_ptr;
6320 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
6322 return (uint32_t)src;
6327 #define instr_meter_length_nbo instr_meter_length_hbo
6331 static inline enum rte_color
6332 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
6334 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
6335 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
6336 uint64_t src64 = *src64_ptr;
6337 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
6338 uint64_t src = src64 & src64_mask;
6340 return (enum rte_color)src;
6344 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
6346 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
6347 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
6348 uint64_t dst64 = *dst64_ptr;
6349 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
6351 uint64_t src = (uint64_t)color_out;
6353 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
6357 instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
6359 struct thread *t = &p->threads[p->thread_id];
6360 struct instruction *ip = t->ip;
6363 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
6366 m = instr_meter_idx_nbo(p, t, ip);
6374 instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
6376 struct thread *t = &p->threads[p->thread_id];
6377 struct instruction *ip = t->ip;
6380 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
6383 m = instr_meter_idx_hbo(p, t, ip);
6391 instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
6393 struct thread *t = &p->threads[p->thread_id];
6394 struct instruction *ip = t->ip;
6397 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
6400 m = instr_meter_idx_imm(p, ip);
6408 instr_meter_hhm_exec(struct rte_swx_pipeline *p)
6410 struct thread *t = &p->threads[p->thread_id];
6411 struct instruction *ip = t->ip;
6413 uint64_t time, n_pkts, n_bytes;
6415 enum rte_color color_in, color_out;
6417 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
6420 m = instr_meter_idx_nbo(p, t, ip);
6421 rte_prefetch0(m->n_pkts);
6422 time = rte_get_tsc_cycles();
6423 length = instr_meter_length_nbo(t, ip);
6424 color_in = instr_meter_color_in_hbo(t, ip);
6426 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6427 &m->profile->profile,
6432 color_out &= m->color_mask;
6434 n_pkts = m->n_pkts[color_out];
6435 n_bytes = m->n_bytes[color_out];
6437 instr_meter_color_out_hbo_set(t, ip, color_out);
6439 m->n_pkts[color_out] = n_pkts + 1;
6440 m->n_bytes[color_out] = n_bytes + length;
6447 instr_meter_hhi_exec(struct rte_swx_pipeline *p)
6449 struct thread *t = &p->threads[p->thread_id];
6450 struct instruction *ip = t->ip;
6452 uint64_t time, n_pkts, n_bytes;
6454 enum rte_color color_in, color_out;
6456 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
6459 m = instr_meter_idx_nbo(p, t, ip);
6460 rte_prefetch0(m->n_pkts);
6461 time = rte_get_tsc_cycles();
6462 length = instr_meter_length_nbo(t, ip);
6463 color_in = (enum rte_color)ip->meter.color_in_val;
6465 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6466 &m->profile->profile,
6471 color_out &= m->color_mask;
6473 n_pkts = m->n_pkts[color_out];
6474 n_bytes = m->n_bytes[color_out];
6476 instr_meter_color_out_hbo_set(t, ip, color_out);
6478 m->n_pkts[color_out] = n_pkts + 1;
6479 m->n_bytes[color_out] = n_bytes + length;
6486 instr_meter_hmm_exec(struct rte_swx_pipeline *p)
6488 struct thread *t = &p->threads[p->thread_id];
6489 struct instruction *ip = t->ip;
6491 uint64_t time, n_pkts, n_bytes;
6493 enum rte_color color_in, color_out;
6495 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
6498 m = instr_meter_idx_nbo(p, t, ip);
6499 rte_prefetch0(m->n_pkts);
6500 time = rte_get_tsc_cycles();
6501 length = instr_meter_length_hbo(t, ip);
6502 color_in = instr_meter_color_in_hbo(t, ip);
6504 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6505 &m->profile->profile,
6510 color_out &= m->color_mask;
6512 n_pkts = m->n_pkts[color_out];
6513 n_bytes = m->n_bytes[color_out];
6515 instr_meter_color_out_hbo_set(t, ip, color_out);
6517 m->n_pkts[color_out] = n_pkts + 1;
6518 m->n_bytes[color_out] = n_bytes + length;
6524 instr_meter_hmi_exec(struct rte_swx_pipeline *p)
6526 struct thread *t = &p->threads[p->thread_id];
6527 struct instruction *ip = t->ip;
6529 uint64_t time, n_pkts, n_bytes;
6531 enum rte_color color_in, color_out;
6533 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
6536 m = instr_meter_idx_nbo(p, t, ip);
6537 rte_prefetch0(m->n_pkts);
6538 time = rte_get_tsc_cycles();
6539 length = instr_meter_length_hbo(t, ip);
6540 color_in = (enum rte_color)ip->meter.color_in_val;
6542 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6543 &m->profile->profile,
6548 color_out &= m->color_mask;
6550 n_pkts = m->n_pkts[color_out];
6551 n_bytes = m->n_bytes[color_out];
6553 instr_meter_color_out_hbo_set(t, ip, color_out);
6555 m->n_pkts[color_out] = n_pkts + 1;
6556 m->n_bytes[color_out] = n_bytes + length;
6563 instr_meter_mhm_exec(struct rte_swx_pipeline *p)
6565 struct thread *t = &p->threads[p->thread_id];
6566 struct instruction *ip = t->ip;
6568 uint64_t time, n_pkts, n_bytes;
6570 enum rte_color color_in, color_out;
6572 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
6575 m = instr_meter_idx_hbo(p, t, ip);
6576 rte_prefetch0(m->n_pkts);
6577 time = rte_get_tsc_cycles();
6578 length = instr_meter_length_nbo(t, ip);
6579 color_in = instr_meter_color_in_hbo(t, ip);
6581 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6582 &m->profile->profile,
6587 color_out &= m->color_mask;
6589 n_pkts = m->n_pkts[color_out];
6590 n_bytes = m->n_bytes[color_out];
6592 instr_meter_color_out_hbo_set(t, ip, color_out);
6594 m->n_pkts[color_out] = n_pkts + 1;
6595 m->n_bytes[color_out] = n_bytes + length;
6602 instr_meter_mhi_exec(struct rte_swx_pipeline *p)
6604 struct thread *t = &p->threads[p->thread_id];
6605 struct instruction *ip = t->ip;
6607 uint64_t time, n_pkts, n_bytes;
6609 enum rte_color color_in, color_out;
6611 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
6614 m = instr_meter_idx_hbo(p, t, ip);
6615 rte_prefetch0(m->n_pkts);
6616 time = rte_get_tsc_cycles();
6617 length = instr_meter_length_nbo(t, ip);
6618 color_in = (enum rte_color)ip->meter.color_in_val;
6620 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6621 &m->profile->profile,
6626 color_out &= m->color_mask;
6628 n_pkts = m->n_pkts[color_out];
6629 n_bytes = m->n_bytes[color_out];
6631 instr_meter_color_out_hbo_set(t, ip, color_out);
6633 m->n_pkts[color_out] = n_pkts + 1;
6634 m->n_bytes[color_out] = n_bytes + length;
6641 instr_meter_mmm_exec(struct rte_swx_pipeline *p)
6643 struct thread *t = &p->threads[p->thread_id];
6644 struct instruction *ip = t->ip;
6646 uint64_t time, n_pkts, n_bytes;
6648 enum rte_color color_in, color_out;
6650 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
6653 m = instr_meter_idx_hbo(p, t, ip);
6654 rte_prefetch0(m->n_pkts);
6655 time = rte_get_tsc_cycles();
6656 length = instr_meter_length_hbo(t, ip);
6657 color_in = instr_meter_color_in_hbo(t, ip);
6659 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6660 &m->profile->profile,
6665 color_out &= m->color_mask;
6667 n_pkts = m->n_pkts[color_out];
6668 n_bytes = m->n_bytes[color_out];
6670 instr_meter_color_out_hbo_set(t, ip, color_out);
6672 m->n_pkts[color_out] = n_pkts + 1;
6673 m->n_bytes[color_out] = n_bytes + length;
6680 instr_meter_mmi_exec(struct rte_swx_pipeline *p)
6682 struct thread *t = &p->threads[p->thread_id];
6683 struct instruction *ip = t->ip;
6685 uint64_t time, n_pkts, n_bytes;
6687 enum rte_color color_in, color_out;
6689 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
6692 m = instr_meter_idx_hbo(p, t, ip);
6693 rte_prefetch0(m->n_pkts);
6694 time = rte_get_tsc_cycles();
6695 length = instr_meter_length_hbo(t, ip);
6696 color_in = (enum rte_color)ip->meter.color_in_val;
6698 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6699 &m->profile->profile,
6704 color_out &= m->color_mask;
6706 n_pkts = m->n_pkts[color_out];
6707 n_bytes = m->n_bytes[color_out];
6709 instr_meter_color_out_hbo_set(t, ip, color_out);
6711 m->n_pkts[color_out] = n_pkts + 1;
6712 m->n_bytes[color_out] = n_bytes + length;
6719 instr_meter_ihm_exec(struct rte_swx_pipeline *p)
6721 struct thread *t = &p->threads[p->thread_id];
6722 struct instruction *ip = t->ip;
6724 uint64_t time, n_pkts, n_bytes;
6726 enum rte_color color_in, color_out;
6728 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
6731 m = instr_meter_idx_imm(p, ip);
6732 rte_prefetch0(m->n_pkts);
6733 time = rte_get_tsc_cycles();
6734 length = instr_meter_length_nbo(t, ip);
6735 color_in = instr_meter_color_in_hbo(t, ip);
6737 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6738 &m->profile->profile,
6743 color_out &= m->color_mask;
6745 n_pkts = m->n_pkts[color_out];
6746 n_bytes = m->n_bytes[color_out];
6748 instr_meter_color_out_hbo_set(t, ip, color_out);
6750 m->n_pkts[color_out] = n_pkts + 1;
6751 m->n_bytes[color_out] = n_bytes + length;
6758 instr_meter_ihi_exec(struct rte_swx_pipeline *p)
6760 struct thread *t = &p->threads[p->thread_id];
6761 struct instruction *ip = t->ip;
6763 uint64_t time, n_pkts, n_bytes;
6765 enum rte_color color_in, color_out;
6767 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
6770 m = instr_meter_idx_imm(p, ip);
6771 rte_prefetch0(m->n_pkts);
6772 time = rte_get_tsc_cycles();
6773 length = instr_meter_length_nbo(t, ip);
6774 color_in = (enum rte_color)ip->meter.color_in_val;
6776 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6777 &m->profile->profile,
6782 color_out &= m->color_mask;
6784 n_pkts = m->n_pkts[color_out];
6785 n_bytes = m->n_bytes[color_out];
6787 instr_meter_color_out_hbo_set(t, ip, color_out);
6789 m->n_pkts[color_out] = n_pkts + 1;
6790 m->n_bytes[color_out] = n_bytes + length;
6797 instr_meter_imm_exec(struct rte_swx_pipeline *p)
6799 struct thread *t = &p->threads[p->thread_id];
6800 struct instruction *ip = t->ip;
6802 uint64_t time, n_pkts, n_bytes;
6804 enum rte_color color_in, color_out;
6806 TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
6809 m = instr_meter_idx_imm(p, ip);
6810 rte_prefetch0(m->n_pkts);
6811 time = rte_get_tsc_cycles();
6812 length = instr_meter_length_hbo(t, ip);
6813 color_in = instr_meter_color_in_hbo(t, ip);
6815 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6816 &m->profile->profile,
6821 color_out &= m->color_mask;
6823 n_pkts = m->n_pkts[color_out];
6824 n_bytes = m->n_bytes[color_out];
6826 instr_meter_color_out_hbo_set(t, ip, color_out);
6828 m->n_pkts[color_out] = n_pkts + 1;
6829 m->n_bytes[color_out] = n_bytes + length;
6835 instr_meter_imi_exec(struct rte_swx_pipeline *p)
6837 struct thread *t = &p->threads[p->thread_id];
6838 struct instruction *ip = t->ip;
6840 uint64_t time, n_pkts, n_bytes;
6842 enum rte_color color_in, color_out;
6844 TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
6847 m = instr_meter_idx_imm(p, ip);
6848 rte_prefetch0(m->n_pkts);
6849 time = rte_get_tsc_cycles();
6850 length = instr_meter_length_hbo(t, ip);
6851 color_in = (enum rte_color)ip->meter.color_in_val;
6853 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6854 &m->profile->profile,
6859 color_out &= m->color_mask;
6861 n_pkts = m->n_pkts[color_out];
6862 n_bytes = m->n_bytes[color_out];
6864 instr_meter_color_out_hbo_set(t, ip, color_out);
6866 m->n_pkts[color_out] = n_pkts + 1;
6867 m->n_bytes[color_out] = n_bytes + length;
6876 static struct action *
6877 action_find(struct rte_swx_pipeline *p, const char *name);
6880 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
6881 struct action *action __rte_unused,
6884 struct instruction *instr,
6885 struct instruction_data *data)
6887 CHECK(n_tokens == 2, EINVAL);
6889 strcpy(data->jmp_label, tokens[1]);
6891 instr->type = INSTR_JMP;
6892 instr->jmp.ip = NULL; /* Resolved later. */
6897 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
6898 struct action *action __rte_unused,
6901 struct instruction *instr,
6902 struct instruction_data *data)
6906 CHECK(n_tokens == 3, EINVAL);
6908 strcpy(data->jmp_label, tokens[1]);
6910 h = header_parse(p, tokens[2]);
6913 instr->type = INSTR_JMP_VALID;
6914 instr->jmp.ip = NULL; /* Resolved later. */
6915 instr->jmp.header_id = h->id;
6920 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
6921 struct action *action __rte_unused,
6924 struct instruction *instr,
6925 struct instruction_data *data)
6929 CHECK(n_tokens == 3, EINVAL);
6931 strcpy(data->jmp_label, tokens[1]);
6933 h = header_parse(p, tokens[2]);
6936 instr->type = INSTR_JMP_INVALID;
6937 instr->jmp.ip = NULL; /* Resolved later. */
6938 instr->jmp.header_id = h->id;
6943 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
6944 struct action *action,
6947 struct instruction *instr,
6948 struct instruction_data *data)
6950 CHECK(!action, EINVAL);
6951 CHECK(n_tokens == 2, EINVAL);
6953 strcpy(data->jmp_label, tokens[1]);
6955 instr->type = INSTR_JMP_HIT;
6956 instr->jmp.ip = NULL; /* Resolved later. */
6961 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
6962 struct action *action,
6965 struct instruction *instr,
6966 struct instruction_data *data)
6968 CHECK(!action, EINVAL);
6969 CHECK(n_tokens == 2, EINVAL);
6971 strcpy(data->jmp_label, tokens[1]);
6973 instr->type = INSTR_JMP_MISS;
6974 instr->jmp.ip = NULL; /* Resolved later. */
6979 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
6980 struct action *action,
6983 struct instruction *instr,
6984 struct instruction_data *data)
6988 CHECK(!action, EINVAL);
6989 CHECK(n_tokens == 3, EINVAL);
6991 strcpy(data->jmp_label, tokens[1]);
6993 a = action_find(p, tokens[2]);
6996 instr->type = INSTR_JMP_ACTION_HIT;
6997 instr->jmp.ip = NULL; /* Resolved later. */
6998 instr->jmp.action_id = a->id;
7003 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
7004 struct action *action,
7007 struct instruction *instr,
7008 struct instruction_data *data)
7012 CHECK(!action, EINVAL);
7013 CHECK(n_tokens == 3, EINVAL);
7015 strcpy(data->jmp_label, tokens[1]);
7017 a = action_find(p, tokens[2]);
7020 instr->type = INSTR_JMP_ACTION_MISS;
7021 instr->jmp.ip = NULL; /* Resolved later. */
7022 instr->jmp.action_id = a->id;
7027 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
7028 struct action *action,
7031 struct instruction *instr,
7032 struct instruction_data *data)
7034 char *a = tokens[2], *b = tokens[3];
7035 struct field *fa, *fb;
7037 uint32_t a_struct_id, b_struct_id;
7039 CHECK(n_tokens == 4, EINVAL);
7041 strcpy(data->jmp_label, tokens[1]);
7043 fa = struct_field_parse(p, action, a, &a_struct_id);
7046 /* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */
7047 fb = struct_field_parse(p, action, b, &b_struct_id);
7049 instr->type = INSTR_JMP_EQ;
7050 if (a[0] != 'h' && b[0] == 'h')
7051 instr->type = INSTR_JMP_EQ_MH;
7052 if (a[0] == 'h' && b[0] != 'h')
7053 instr->type = INSTR_JMP_EQ_HM;
7054 if (a[0] == 'h' && b[0] == 'h')
7055 instr->type = INSTR_JMP_EQ_HH;
7056 instr->jmp.ip = NULL; /* Resolved later. */
7058 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7059 instr->jmp.a.n_bits = fa->n_bits;
7060 instr->jmp.a.offset = fa->offset / 8;
7061 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7062 instr->jmp.b.n_bits = fb->n_bits;
7063 instr->jmp.b.offset = fb->offset / 8;
7068 b_val = strtoull(b, &b, 0);
7069 CHECK(!b[0], EINVAL);
7072 b_val = hton64(b_val) >> (64 - fa->n_bits);
7074 instr->type = INSTR_JMP_EQ_I;
7075 instr->jmp.ip = NULL; /* Resolved later. */
7076 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7077 instr->jmp.a.n_bits = fa->n_bits;
7078 instr->jmp.a.offset = fa->offset / 8;
7079 instr->jmp.b_val = b_val;
7084 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
7085 struct action *action,
7088 struct instruction *instr,
7089 struct instruction_data *data)
7091 char *a = tokens[2], *b = tokens[3];
7092 struct field *fa, *fb;
7094 uint32_t a_struct_id, b_struct_id;
7096 CHECK(n_tokens == 4, EINVAL);
7098 strcpy(data->jmp_label, tokens[1]);
7100 fa = struct_field_parse(p, action, a, &a_struct_id);
7103 /* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */
7104 fb = struct_field_parse(p, action, b, &b_struct_id);
7106 instr->type = INSTR_JMP_NEQ;
7107 if (a[0] != 'h' && b[0] == 'h')
7108 instr->type = INSTR_JMP_NEQ_MH;
7109 if (a[0] == 'h' && b[0] != 'h')
7110 instr->type = INSTR_JMP_NEQ_HM;
7111 if (a[0] == 'h' && b[0] == 'h')
7112 instr->type = INSTR_JMP_NEQ_HH;
7113 instr->jmp.ip = NULL; /* Resolved later. */
7115 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7116 instr->jmp.a.n_bits = fa->n_bits;
7117 instr->jmp.a.offset = fa->offset / 8;
7118 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7119 instr->jmp.b.n_bits = fb->n_bits;
7120 instr->jmp.b.offset = fb->offset / 8;
7125 b_val = strtoull(b, &b, 0);
7126 CHECK(!b[0], EINVAL);
7129 b_val = hton64(b_val) >> (64 - fa->n_bits);
7131 instr->type = INSTR_JMP_NEQ_I;
7132 instr->jmp.ip = NULL; /* Resolved later. */
7133 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7134 instr->jmp.a.n_bits = fa->n_bits;
7135 instr->jmp.a.offset = fa->offset / 8;
7136 instr->jmp.b_val = b_val;
7141 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
7142 struct action *action,
7145 struct instruction *instr,
7146 struct instruction_data *data)
7148 char *a = tokens[2], *b = tokens[3];
7149 struct field *fa, *fb;
7151 uint32_t a_struct_id, b_struct_id;
7153 CHECK(n_tokens == 4, EINVAL);
7155 strcpy(data->jmp_label, tokens[1]);
7157 fa = struct_field_parse(p, action, a, &a_struct_id);
7160 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
7161 fb = struct_field_parse(p, action, b, &b_struct_id);
7163 instr->type = INSTR_JMP_LT;
7164 if (a[0] == 'h' && b[0] != 'h')
7165 instr->type = INSTR_JMP_LT_HM;
7166 if (a[0] != 'h' && b[0] == 'h')
7167 instr->type = INSTR_JMP_LT_MH;
7168 if (a[0] == 'h' && b[0] == 'h')
7169 instr->type = INSTR_JMP_LT_HH;
7170 instr->jmp.ip = NULL; /* Resolved later. */
7172 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7173 instr->jmp.a.n_bits = fa->n_bits;
7174 instr->jmp.a.offset = fa->offset / 8;
7175 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7176 instr->jmp.b.n_bits = fb->n_bits;
7177 instr->jmp.b.offset = fb->offset / 8;
7181 /* JMP_LT_MI, JMP_LT_HI. */
7182 b_val = strtoull(b, &b, 0);
7183 CHECK(!b[0], EINVAL);
7185 instr->type = INSTR_JMP_LT_MI;
7187 instr->type = INSTR_JMP_LT_HI;
7188 instr->jmp.ip = NULL; /* Resolved later. */
7190 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7191 instr->jmp.a.n_bits = fa->n_bits;
7192 instr->jmp.a.offset = fa->offset / 8;
7193 instr->jmp.b_val = b_val;
7198 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
7199 struct action *action,
7202 struct instruction *instr,
7203 struct instruction_data *data)
7205 char *a = tokens[2], *b = tokens[3];
7206 struct field *fa, *fb;
7208 uint32_t a_struct_id, b_struct_id;
7210 CHECK(n_tokens == 4, EINVAL);
7212 strcpy(data->jmp_label, tokens[1]);
7214 fa = struct_field_parse(p, action, a, &a_struct_id);
7217 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
7218 fb = struct_field_parse(p, action, b, &b_struct_id);
7220 instr->type = INSTR_JMP_GT;
7221 if (a[0] == 'h' && b[0] != 'h')
7222 instr->type = INSTR_JMP_GT_HM;
7223 if (a[0] != 'h' && b[0] == 'h')
7224 instr->type = INSTR_JMP_GT_MH;
7225 if (a[0] == 'h' && b[0] == 'h')
7226 instr->type = INSTR_JMP_GT_HH;
7227 instr->jmp.ip = NULL; /* Resolved later. */
7229 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7230 instr->jmp.a.n_bits = fa->n_bits;
7231 instr->jmp.a.offset = fa->offset / 8;
7232 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7233 instr->jmp.b.n_bits = fb->n_bits;
7234 instr->jmp.b.offset = fb->offset / 8;
7238 /* JMP_GT_MI, JMP_GT_HI. */
7239 b_val = strtoull(b, &b, 0);
7240 CHECK(!b[0], EINVAL);
7242 instr->type = INSTR_JMP_GT_MI;
7244 instr->type = INSTR_JMP_GT_HI;
7245 instr->jmp.ip = NULL; /* Resolved later. */
7247 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7248 instr->jmp.a.n_bits = fa->n_bits;
7249 instr->jmp.a.offset = fa->offset / 8;
7250 instr->jmp.b_val = b_val;
7255 instr_jmp_exec(struct rte_swx_pipeline *p)
7257 struct thread *t = &p->threads[p->thread_id];
7258 struct instruction *ip = t->ip;
7260 TRACE("[Thread %2u] jmp\n", p->thread_id);
7262 thread_ip_set(t, ip->jmp.ip);
7266 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
7268 struct thread *t = &p->threads[p->thread_id];
7269 struct instruction *ip = t->ip;
7270 uint32_t header_id = ip->jmp.header_id;
7272 TRACE("[Thread %2u] jmpv\n", p->thread_id);
7274 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
7278 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
7280 struct thread *t = &p->threads[p->thread_id];
7281 struct instruction *ip = t->ip;
7282 uint32_t header_id = ip->jmp.header_id;
7284 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
7286 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
7290 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
7292 struct thread *t = &p->threads[p->thread_id];
7293 struct instruction *ip = t->ip;
7294 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
7296 TRACE("[Thread %2u] jmph\n", p->thread_id);
7298 t->ip = ip_next[t->hit];
7302 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
7304 struct thread *t = &p->threads[p->thread_id];
7305 struct instruction *ip = t->ip;
7306 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
7308 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
7310 t->ip = ip_next[t->hit];
7314 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
7316 struct thread *t = &p->threads[p->thread_id];
7317 struct instruction *ip = t->ip;
7319 TRACE("[Thread %2u] jmpa\n", p->thread_id);
7321 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
7325 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
7327 struct thread *t = &p->threads[p->thread_id];
7328 struct instruction *ip = t->ip;
7330 TRACE("[Thread %2u] jmpna\n", p->thread_id);
7332 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
7336 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
7338 struct thread *t = &p->threads[p->thread_id];
7339 struct instruction *ip = t->ip;
7341 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
7347 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p)
7349 struct thread *t = &p->threads[p->thread_id];
7350 struct instruction *ip = t->ip;
7352 TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id);
7354 JMP_CMP_MH(t, ip, ==);
7358 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p)
7360 struct thread *t = &p->threads[p->thread_id];
7361 struct instruction *ip = t->ip;
7363 TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id);
7365 JMP_CMP_HM(t, ip, ==);
7369 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p)
7371 struct thread *t = &p->threads[p->thread_id];
7372 struct instruction *ip = t->ip;
7374 TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id);
7376 JMP_CMP_HH_FAST(t, ip, ==);
7380 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
7382 struct thread *t = &p->threads[p->thread_id];
7383 struct instruction *ip = t->ip;
7385 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
7387 JMP_CMP_I(t, ip, ==);
7391 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
7393 struct thread *t = &p->threads[p->thread_id];
7394 struct instruction *ip = t->ip;
7396 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
7402 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p)
7404 struct thread *t = &p->threads[p->thread_id];
7405 struct instruction *ip = t->ip;
7407 TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id);
7409 JMP_CMP_MH(t, ip, !=);
7413 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p)
7415 struct thread *t = &p->threads[p->thread_id];
7416 struct instruction *ip = t->ip;
7418 TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id);
7420 JMP_CMP_HM(t, ip, !=);
7424 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p)
7426 struct thread *t = &p->threads[p->thread_id];
7427 struct instruction *ip = t->ip;
7429 TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id);
7431 JMP_CMP_HH_FAST(t, ip, !=);
7435 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
7437 struct thread *t = &p->threads[p->thread_id];
7438 struct instruction *ip = t->ip;
7440 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
7442 JMP_CMP_I(t, ip, !=);
7446 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
7448 struct thread *t = &p->threads[p->thread_id];
7449 struct instruction *ip = t->ip;
7451 TRACE("[Thread %2u] jmplt\n", p->thread_id);
7457 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
7459 struct thread *t = &p->threads[p->thread_id];
7460 struct instruction *ip = t->ip;
7462 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
7464 JMP_CMP_MH(t, ip, <);
7468 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
7470 struct thread *t = &p->threads[p->thread_id];
7471 struct instruction *ip = t->ip;
7473 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
7475 JMP_CMP_HM(t, ip, <);
7479 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
7481 struct thread *t = &p->threads[p->thread_id];
7482 struct instruction *ip = t->ip;
7484 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
7486 JMP_CMP_HH(t, ip, <);
7490 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
7492 struct thread *t = &p->threads[p->thread_id];
7493 struct instruction *ip = t->ip;
7495 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
7497 JMP_CMP_MI(t, ip, <);
7501 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
7503 struct thread *t = &p->threads[p->thread_id];
7504 struct instruction *ip = t->ip;
7506 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
7508 JMP_CMP_HI(t, ip, <);
7512 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
7514 struct thread *t = &p->threads[p->thread_id];
7515 struct instruction *ip = t->ip;
7517 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
7523 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
7525 struct thread *t = &p->threads[p->thread_id];
7526 struct instruction *ip = t->ip;
7528 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
7530 JMP_CMP_MH(t, ip, >);
7534 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
7536 struct thread *t = &p->threads[p->thread_id];
7537 struct instruction *ip = t->ip;
7539 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
7541 JMP_CMP_HM(t, ip, >);
7545 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
7547 struct thread *t = &p->threads[p->thread_id];
7548 struct instruction *ip = t->ip;
7550 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
7552 JMP_CMP_HH(t, ip, >);
7556 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
7558 struct thread *t = &p->threads[p->thread_id];
7559 struct instruction *ip = t->ip;
7561 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
7563 JMP_CMP_MI(t, ip, >);
7567 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
7569 struct thread *t = &p->threads[p->thread_id];
7570 struct instruction *ip = t->ip;
7572 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
7574 JMP_CMP_HI(t, ip, >);
7581 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
7582 struct action *action,
7583 char **tokens __rte_unused,
7585 struct instruction *instr,
7586 struct instruction_data *data __rte_unused)
7588 CHECK(action, EINVAL);
7589 CHECK(n_tokens == 1, EINVAL);
7591 instr->type = INSTR_RETURN;
7596 instr_return_exec(struct rte_swx_pipeline *p)
7598 struct thread *t = &p->threads[p->thread_id];
7600 TRACE("[Thread %2u] return\n", p->thread_id);
7606 instr_translate(struct rte_swx_pipeline *p,
7607 struct action *action,
7609 struct instruction *instr,
7610 struct instruction_data *data)
7612 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
7613 int n_tokens = 0, tpos = 0;
7615 /* Parse the instruction string into tokens. */
7619 token = strtok_r(string, " \t\v", &string);
7623 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
7624 CHECK_NAME(token, EINVAL);
7626 tokens[n_tokens] = token;
7630 CHECK(n_tokens, EINVAL);
7632 /* Handle the optional instruction label. */
7633 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
7634 strcpy(data->label, tokens[0]);
7637 CHECK(n_tokens - tpos, EINVAL);
7640 /* Identify the instruction type. */
7641 if (!strcmp(tokens[tpos], "rx"))
7642 return instr_rx_translate(p,
7649 if (!strcmp(tokens[tpos], "tx"))
7650 return instr_tx_translate(p,
7657 if (!strcmp(tokens[tpos], "drop"))
7658 return instr_drop_translate(p,
7665 if (!strcmp(tokens[tpos], "extract"))
7666 return instr_hdr_extract_translate(p,
7673 if (!strcmp(tokens[tpos], "emit"))
7674 return instr_hdr_emit_translate(p,
7681 if (!strcmp(tokens[tpos], "validate"))
7682 return instr_hdr_validate_translate(p,
7689 if (!strcmp(tokens[tpos], "invalidate"))
7690 return instr_hdr_invalidate_translate(p,
7697 if (!strcmp(tokens[tpos], "mov"))
7698 return instr_mov_translate(p,
7705 if (!strcmp(tokens[tpos], "add"))
7706 return instr_alu_add_translate(p,
7713 if (!strcmp(tokens[tpos], "sub"))
7714 return instr_alu_sub_translate(p,
7721 if (!strcmp(tokens[tpos], "ckadd"))
7722 return instr_alu_ckadd_translate(p,
7729 if (!strcmp(tokens[tpos], "cksub"))
7730 return instr_alu_cksub_translate(p,
7737 if (!strcmp(tokens[tpos], "and"))
7738 return instr_alu_and_translate(p,
7745 if (!strcmp(tokens[tpos], "or"))
7746 return instr_alu_or_translate(p,
7753 if (!strcmp(tokens[tpos], "xor"))
7754 return instr_alu_xor_translate(p,
7761 if (!strcmp(tokens[tpos], "shl"))
7762 return instr_alu_shl_translate(p,
7769 if (!strcmp(tokens[tpos], "shr"))
7770 return instr_alu_shr_translate(p,
7777 if (!strcmp(tokens[tpos], "regprefetch"))
7778 return instr_regprefetch_translate(p,
7785 if (!strcmp(tokens[tpos], "regrd"))
7786 return instr_regrd_translate(p,
7793 if (!strcmp(tokens[tpos], "regwr"))
7794 return instr_regwr_translate(p,
7801 if (!strcmp(tokens[tpos], "regadd"))
7802 return instr_regadd_translate(p,
7809 if (!strcmp(tokens[tpos], "metprefetch"))
7810 return instr_metprefetch_translate(p,
7817 if (!strcmp(tokens[tpos], "meter"))
7818 return instr_meter_translate(p,
7825 if (!strcmp(tokens[tpos], "table"))
7826 return instr_table_translate(p,
7833 if (!strcmp(tokens[tpos], "extern"))
7834 return instr_extern_translate(p,
7841 if (!strcmp(tokens[tpos], "jmp"))
7842 return instr_jmp_translate(p,
7849 if (!strcmp(tokens[tpos], "jmpv"))
7850 return instr_jmp_valid_translate(p,
7857 if (!strcmp(tokens[tpos], "jmpnv"))
7858 return instr_jmp_invalid_translate(p,
7865 if (!strcmp(tokens[tpos], "jmph"))
7866 return instr_jmp_hit_translate(p,
7873 if (!strcmp(tokens[tpos], "jmpnh"))
7874 return instr_jmp_miss_translate(p,
7881 if (!strcmp(tokens[tpos], "jmpa"))
7882 return instr_jmp_action_hit_translate(p,
7889 if (!strcmp(tokens[tpos], "jmpna"))
7890 return instr_jmp_action_miss_translate(p,
7897 if (!strcmp(tokens[tpos], "jmpeq"))
7898 return instr_jmp_eq_translate(p,
7905 if (!strcmp(tokens[tpos], "jmpneq"))
7906 return instr_jmp_neq_translate(p,
7913 if (!strcmp(tokens[tpos], "jmplt"))
7914 return instr_jmp_lt_translate(p,
7921 if (!strcmp(tokens[tpos], "jmpgt"))
7922 return instr_jmp_gt_translate(p,
7929 if (!strcmp(tokens[tpos], "return"))
7930 return instr_return_translate(p,
7940 static struct instruction_data *
7941 label_find(struct instruction_data *data, uint32_t n, const char *label)
7945 for (i = 0; i < n; i++)
7946 if (!strcmp(label, data[i].label))
7953 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
7955 uint32_t count = 0, i;
7960 for (i = 0; i < n; i++)
7961 if (!strcmp(label, data[i].jmp_label))
7968 instr_label_check(struct instruction_data *instruction_data,
7969 uint32_t n_instructions)
7973 /* Check that all instruction labels are unique. */
7974 for (i = 0; i < n_instructions; i++) {
7975 struct instruction_data *data = &instruction_data[i];
7976 char *label = data->label;
7982 for (j = i + 1; j < n_instructions; j++)
7983 CHECK(strcmp(label, data[j].label), EINVAL);
7986 /* Get users for each instruction label. */
7987 for (i = 0; i < n_instructions; i++) {
7988 struct instruction_data *data = &instruction_data[i];
7989 char *label = data->label;
7991 data->n_users = label_is_used(instruction_data,
8000 instr_jmp_resolve(struct instruction *instructions,
8001 struct instruction_data *instruction_data,
8002 uint32_t n_instructions)
8006 for (i = 0; i < n_instructions; i++) {
8007 struct instruction *instr = &instructions[i];
8008 struct instruction_data *data = &instruction_data[i];
8009 struct instruction_data *found;
8011 if (!instruction_is_jmp(instr))
8014 found = label_find(instruction_data,
8017 CHECK(found, EINVAL);
8019 instr->jmp.ip = &instructions[found - instruction_data];
8026 instr_verify(struct rte_swx_pipeline *p __rte_unused,
8028 struct instruction *instr,
8029 struct instruction_data *data __rte_unused,
8030 uint32_t n_instructions)
8033 enum instruction_type type;
8036 /* Check that the first instruction is rx. */
8037 CHECK(instr[0].type == INSTR_RX, EINVAL);
8039 /* Check that there is at least one tx instruction. */
8040 for (i = 0; i < n_instructions; i++) {
8041 type = instr[i].type;
8043 if (instruction_is_tx(type))
8046 CHECK(i < n_instructions, EINVAL);
8048 /* Check that the last instruction is either tx or unconditional
8051 type = instr[n_instructions - 1].type;
8052 CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
8056 enum instruction_type type;
8059 /* Check that there is at least one return or tx instruction. */
8060 for (i = 0; i < n_instructions; i++) {
8061 type = instr[i].type;
8063 if ((type == INSTR_RETURN) || instruction_is_tx(type))
8066 CHECK(i < n_instructions, EINVAL);
8073 instr_compact(struct instruction *instructions,
8074 struct instruction_data *instruction_data,
8075 uint32_t n_instructions)
8077 uint32_t i, pos = 0;
8079 /* Eliminate the invalid instructions that have been optimized out. */
8080 for (i = 0; i < n_instructions; i++) {
8081 struct instruction *instr = &instructions[i];
8082 struct instruction_data *data = &instruction_data[i];
8088 memcpy(&instructions[pos], instr, sizeof(*instr));
8089 memcpy(&instruction_data[pos], data, sizeof(*data));
8099 instr_pattern_extract_many_search(struct instruction *instr,
8100 struct instruction_data *data,
8102 uint32_t *n_pattern_instr)
8106 for (i = 0; i < n_instr; i++) {
8107 if (data[i].invalid)
8110 if (instr[i].type != INSTR_HDR_EXTRACT)
8113 if (i == RTE_DIM(instr->io.hdr.header_id))
8116 if (i && data[i].n_users)
8123 *n_pattern_instr = i;
8128 instr_pattern_extract_many_replace(struct instruction *instr,
8129 struct instruction_data *data,
8134 for (i = 1; i < n_instr; i++) {
8136 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8137 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8138 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8140 data[i].invalid = 1;
8145 instr_pattern_extract_many_optimize(struct instruction *instructions,
8146 struct instruction_data *instruction_data,
8147 uint32_t n_instructions)
8151 for (i = 0; i < n_instructions; ) {
8152 struct instruction *instr = &instructions[i];
8153 struct instruction_data *data = &instruction_data[i];
8154 uint32_t n_instr = 0;
8158 detected = instr_pattern_extract_many_search(instr,
8163 instr_pattern_extract_many_replace(instr,
8170 /* No pattern starting at the current instruction. */
8174 /* Eliminate the invalid instructions that have been optimized out. */
8175 n_instructions = instr_compact(instructions,
8179 return n_instructions;
8183 instr_pattern_emit_many_tx_search(struct instruction *instr,
8184 struct instruction_data *data,
8186 uint32_t *n_pattern_instr)
8190 for (i = 0; i < n_instr; i++) {
8191 if (data[i].invalid)
8194 if (instr[i].type != INSTR_HDR_EMIT)
8197 if (i == RTE_DIM(instr->io.hdr.header_id))
8200 if (i && data[i].n_users)
8207 if (!instruction_is_tx(instr[i].type))
8210 if (data[i].n_users)
8215 *n_pattern_instr = i;
8220 instr_pattern_emit_many_tx_replace(struct instruction *instr,
8221 struct instruction_data *data,
8226 /* Any emit instruction in addition to the first one. */
8227 for (i = 1; i < n_instr - 1; i++) {
8229 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8230 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8231 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8233 data[i].invalid = 1;
8236 /* The TX instruction is the last one in the pattern. */
8238 instr[0].io.io.offset = instr[i].io.io.offset;
8239 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
8240 data[i].invalid = 1;
8244 instr_pattern_emit_many_tx_optimize(struct instruction *instructions,
8245 struct instruction_data *instruction_data,
8246 uint32_t n_instructions)
8250 for (i = 0; i < n_instructions; ) {
8251 struct instruction *instr = &instructions[i];
8252 struct instruction_data *data = &instruction_data[i];
8253 uint32_t n_instr = 0;
8256 /* Emit many + TX. */
8257 detected = instr_pattern_emit_many_tx_search(instr,
8262 instr_pattern_emit_many_tx_replace(instr,
8269 /* No pattern starting at the current instruction. */
8273 /* Eliminate the invalid instructions that have been optimized out. */
8274 n_instructions = instr_compact(instructions,
8278 return n_instructions;
8282 action_arg_src_mov_count(struct action *a,
8284 struct instruction *instructions,
8285 struct instruction_data *instruction_data,
8286 uint32_t n_instructions);
8289 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p,
8291 struct instruction *instr,
8292 struct instruction_data *data,
8294 struct instruction *instructions,
8295 struct instruction_data *instruction_data,
8296 uint32_t n_instructions,
8297 uint32_t *n_pattern_instr)
8300 uint32_t src_field_id, i, j;
8302 /* Prerequisites. */
8306 /* First instruction: MOV_HM. */
8307 if (data[0].invalid || (instr[0].type != INSTR_MOV_HM))
8310 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8314 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8315 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8318 if (src_field_id == a->st->n_fields)
8321 if (instr[0].mov.dst.offset ||
8322 (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) ||
8323 instr[0].mov.src.struct_id ||
8324 (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) ||
8325 (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits))
8328 if ((n_instr < h->st->n_fields + 1) ||
8329 (a->st->n_fields < src_field_id + h->st->n_fields + 1))
8332 /* Subsequent instructions: MOV_HM. */
8333 for (i = 1; i < h->st->n_fields; i++)
8334 if (data[i].invalid ||
8336 (instr[i].type != INSTR_MOV_HM) ||
8337 (instr[i].mov.dst.struct_id != h->struct_id) ||
8338 (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) ||
8339 (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) ||
8340 instr[i].mov.src.struct_id ||
8341 (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) ||
8342 (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) ||
8343 (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits))
8346 /* Last instruction: HDR_VALIDATE. */
8347 if ((instr[i].type != INSTR_HDR_VALIDATE) ||
8348 (instr[i].valid.header_id != h->id))
8351 /* Check that none of the action args that are used as source for this
8352 * DMA transfer are not used as source in any other mov instruction.
8354 for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) {
8357 n_users = action_arg_src_mov_count(a,
8366 *n_pattern_instr = 1 + i;
8371 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p,
8373 struct instruction *instr,
8374 struct instruction_data *data,
8378 uint32_t src_field_id, src_offset, i;
8380 /* Read from the instructions before they are modified. */
8381 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8385 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8386 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8389 if (src_field_id == a->st->n_fields)
8392 src_offset = instr[0].mov.src.offset;
8394 /* Modify the instructions. */
8395 instr[0].type = INSTR_DMA_HT;
8396 instr[0].dma.dst.header_id[0] = h->id;
8397 instr[0].dma.dst.struct_id[0] = h->struct_id;
8398 instr[0].dma.src.offset[0] = (uint8_t)src_offset;
8399 instr[0].dma.n_bytes[0] = h->st->n_bits / 8;
8401 for (i = 1; i < n_instr; i++)
8402 data[i].invalid = 1;
8404 /* Update the endianness of the action arguments to header endianness. */
8405 for (i = 0; i < h->st->n_fields; i++)
8406 a->args_endianness[src_field_id + i] = 1;
8410 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p,
8412 struct instruction *instructions,
8413 struct instruction_data *instruction_data,
8414 uint32_t n_instructions)
8419 return n_instructions;
8421 for (i = 0; i < n_instructions; ) {
8422 struct instruction *instr = &instructions[i];
8423 struct instruction_data *data = &instruction_data[i];
8424 uint32_t n_instr = 0;
8427 /* Mov all + validate. */
8428 detected = instr_pattern_mov_all_validate_search(p,
8438 instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr);
8443 /* No pattern starting at the current instruction. */
8447 /* Eliminate the invalid instructions that have been optimized out. */
8448 n_instructions = instr_compact(instructions,
8452 return n_instructions;
8456 instr_pattern_dma_many_search(struct instruction *instr,
8457 struct instruction_data *data,
8459 uint32_t *n_pattern_instr)
8463 for (i = 0; i < n_instr; i++) {
8464 if (data[i].invalid)
8467 if (instr[i].type != INSTR_DMA_HT)
8470 if (i == RTE_DIM(instr->dma.dst.header_id))
8473 if (i && data[i].n_users)
8480 *n_pattern_instr = i;
8485 instr_pattern_dma_many_replace(struct instruction *instr,
8486 struct instruction_data *data,
8491 for (i = 1; i < n_instr; i++) {
8493 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
8494 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
8495 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
8496 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
8498 data[i].invalid = 1;
8503 instr_pattern_dma_many_optimize(struct instruction *instructions,
8504 struct instruction_data *instruction_data,
8505 uint32_t n_instructions)
8509 for (i = 0; i < n_instructions; ) {
8510 struct instruction *instr = &instructions[i];
8511 struct instruction_data *data = &instruction_data[i];
8512 uint32_t n_instr = 0;
8516 detected = instr_pattern_dma_many_search(instr,
8521 instr_pattern_dma_many_replace(instr, data, n_instr);
8526 /* No pattern starting at the current instruction. */
8530 /* Eliminate the invalid instructions that have been optimized out. */
8531 n_instructions = instr_compact(instructions,
8535 return n_instructions;
8539 instr_optimize(struct rte_swx_pipeline *p,
8541 struct instruction *instructions,
8542 struct instruction_data *instruction_data,
8543 uint32_t n_instructions)
8546 n_instructions = instr_pattern_extract_many_optimize(instructions,
8550 /* Emit many + TX. */
8551 n_instructions = instr_pattern_emit_many_tx_optimize(instructions,
8555 /* Mov all + validate. */
8556 n_instructions = instr_pattern_mov_all_validate_optimize(p,
8563 n_instructions = instr_pattern_dma_many_optimize(instructions,
8567 return n_instructions;
8571 instruction_config(struct rte_swx_pipeline *p,
8573 const char **instructions,
8574 uint32_t n_instructions)
8576 struct instruction *instr = NULL;
8577 struct instruction_data *data = NULL;
8581 CHECK(n_instructions, EINVAL);
8582 CHECK(instructions, EINVAL);
8583 for (i = 0; i < n_instructions; i++)
8584 CHECK_INSTRUCTION(instructions[i], EINVAL);
8586 /* Memory allocation. */
8587 instr = calloc(n_instructions, sizeof(struct instruction));
8593 data = calloc(n_instructions, sizeof(struct instruction_data));
8599 for (i = 0; i < n_instructions; i++) {
8600 char *string = strdup(instructions[i]);
8606 err = instr_translate(p, a, string, &instr[i], &data[i]);
8615 err = instr_label_check(data, n_instructions);
8619 err = instr_verify(p, a, instr, data, n_instructions);
8623 n_instructions = instr_optimize(p, a, instr, data, n_instructions);
8625 err = instr_jmp_resolve(instr, data, n_instructions);
8630 a->instructions = instr;
8631 a->n_instructions = n_instructions;
8633 p->instructions = instr;
8634 p->n_instructions = n_instructions;
8646 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
8648 static instr_exec_t instruction_table[] = {
8649 [INSTR_RX] = instr_rx_exec,
8650 [INSTR_TX] = instr_tx_exec,
8651 [INSTR_TX_I] = instr_tx_i_exec,
8653 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
8654 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
8655 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
8656 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
8657 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
8658 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
8659 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
8660 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
8662 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
8663 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
8664 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
8665 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
8666 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
8667 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
8668 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
8669 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
8670 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
8672 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
8673 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
8675 [INSTR_MOV] = instr_mov_exec,
8676 [INSTR_MOV_MH] = instr_mov_mh_exec,
8677 [INSTR_MOV_HM] = instr_mov_hm_exec,
8678 [INSTR_MOV_HH] = instr_mov_hh_exec,
8679 [INSTR_MOV_I] = instr_mov_i_exec,
8681 [INSTR_DMA_HT] = instr_dma_ht_exec,
8682 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
8683 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
8684 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
8685 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
8686 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
8687 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
8688 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
8690 [INSTR_ALU_ADD] = instr_alu_add_exec,
8691 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
8692 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
8693 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
8694 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
8695 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
8697 [INSTR_ALU_SUB] = instr_alu_sub_exec,
8698 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
8699 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
8700 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
8701 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
8702 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
8704 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
8705 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
8706 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
8707 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
8709 [INSTR_ALU_AND] = instr_alu_and_exec,
8710 [INSTR_ALU_AND_MH] = instr_alu_and_mh_exec,
8711 [INSTR_ALU_AND_HM] = instr_alu_and_hm_exec,
8712 [INSTR_ALU_AND_HH] = instr_alu_and_hh_exec,
8713 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
8715 [INSTR_ALU_OR] = instr_alu_or_exec,
8716 [INSTR_ALU_OR_MH] = instr_alu_or_mh_exec,
8717 [INSTR_ALU_OR_HM] = instr_alu_or_hm_exec,
8718 [INSTR_ALU_OR_HH] = instr_alu_or_hh_exec,
8719 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
8721 [INSTR_ALU_XOR] = instr_alu_xor_exec,
8722 [INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec,
8723 [INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec,
8724 [INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec,
8725 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
8727 [INSTR_ALU_SHL] = instr_alu_shl_exec,
8728 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
8729 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
8730 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
8731 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
8732 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
8734 [INSTR_ALU_SHR] = instr_alu_shr_exec,
8735 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
8736 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
8737 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
8738 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
8739 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
8741 [INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec,
8742 [INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec,
8743 [INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec,
8745 [INSTR_REGRD_HRH] = instr_regrd_hrh_exec,
8746 [INSTR_REGRD_HRM] = instr_regrd_hrm_exec,
8747 [INSTR_REGRD_MRH] = instr_regrd_mrh_exec,
8748 [INSTR_REGRD_MRM] = instr_regrd_mrm_exec,
8749 [INSTR_REGRD_HRI] = instr_regrd_hri_exec,
8750 [INSTR_REGRD_MRI] = instr_regrd_mri_exec,
8752 [INSTR_REGWR_RHH] = instr_regwr_rhh_exec,
8753 [INSTR_REGWR_RHM] = instr_regwr_rhm_exec,
8754 [INSTR_REGWR_RMH] = instr_regwr_rmh_exec,
8755 [INSTR_REGWR_RMM] = instr_regwr_rmm_exec,
8756 [INSTR_REGWR_RHI] = instr_regwr_rhi_exec,
8757 [INSTR_REGWR_RMI] = instr_regwr_rmi_exec,
8758 [INSTR_REGWR_RIH] = instr_regwr_rih_exec,
8759 [INSTR_REGWR_RIM] = instr_regwr_rim_exec,
8760 [INSTR_REGWR_RII] = instr_regwr_rii_exec,
8762 [INSTR_REGADD_RHH] = instr_regadd_rhh_exec,
8763 [INSTR_REGADD_RHM] = instr_regadd_rhm_exec,
8764 [INSTR_REGADD_RMH] = instr_regadd_rmh_exec,
8765 [INSTR_REGADD_RMM] = instr_regadd_rmm_exec,
8766 [INSTR_REGADD_RHI] = instr_regadd_rhi_exec,
8767 [INSTR_REGADD_RMI] = instr_regadd_rmi_exec,
8768 [INSTR_REGADD_RIH] = instr_regadd_rih_exec,
8769 [INSTR_REGADD_RIM] = instr_regadd_rim_exec,
8770 [INSTR_REGADD_RII] = instr_regadd_rii_exec,
8772 [INSTR_METPREFETCH_H] = instr_metprefetch_h_exec,
8773 [INSTR_METPREFETCH_M] = instr_metprefetch_m_exec,
8774 [INSTR_METPREFETCH_I] = instr_metprefetch_i_exec,
8776 [INSTR_METER_HHM] = instr_meter_hhm_exec,
8777 [INSTR_METER_HHI] = instr_meter_hhi_exec,
8778 [INSTR_METER_HMM] = instr_meter_hmm_exec,
8779 [INSTR_METER_HMI] = instr_meter_hmi_exec,
8780 [INSTR_METER_MHM] = instr_meter_mhm_exec,
8781 [INSTR_METER_MHI] = instr_meter_mhi_exec,
8782 [INSTR_METER_MMM] = instr_meter_mmm_exec,
8783 [INSTR_METER_MMI] = instr_meter_mmi_exec,
8784 [INSTR_METER_IHM] = instr_meter_ihm_exec,
8785 [INSTR_METER_IHI] = instr_meter_ihi_exec,
8786 [INSTR_METER_IMM] = instr_meter_imm_exec,
8787 [INSTR_METER_IMI] = instr_meter_imi_exec,
8789 [INSTR_TABLE] = instr_table_exec,
8790 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
8791 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
8793 [INSTR_JMP] = instr_jmp_exec,
8794 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
8795 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
8796 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
8797 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
8798 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
8799 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
8801 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
8802 [INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec,
8803 [INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec,
8804 [INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec,
8805 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
8807 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
8808 [INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec,
8809 [INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec,
8810 [INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec,
8811 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
8813 [INSTR_JMP_LT] = instr_jmp_lt_exec,
8814 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
8815 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
8816 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
8817 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
8818 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
8820 [INSTR_JMP_GT] = instr_jmp_gt_exec,
8821 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
8822 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
8823 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
8824 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
8825 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
8827 [INSTR_RETURN] = instr_return_exec,
8831 instr_exec(struct rte_swx_pipeline *p)
8833 struct thread *t = &p->threads[p->thread_id];
8834 struct instruction *ip = t->ip;
8835 instr_exec_t instr = instruction_table[ip->type];
8843 static struct action *
8844 action_find(struct rte_swx_pipeline *p, const char *name)
8846 struct action *elem;
8851 TAILQ_FOREACH(elem, &p->actions, node)
8852 if (strcmp(elem->name, name) == 0)
8858 static struct action *
8859 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
8861 struct action *action = NULL;
8863 TAILQ_FOREACH(action, &p->actions, node)
8864 if (action->id == id)
8870 static struct field *
8871 action_field_find(struct action *a, const char *name)
8873 return a->st ? struct_type_field_find(a->st, name) : NULL;
8876 static struct field *
8877 action_field_parse(struct action *action, const char *name)
8879 if (name[0] != 't' || name[1] != '.')
8882 return action_field_find(action, &name[2]);
8886 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
8888 const char *args_struct_type_name,
8889 const char **instructions,
8890 uint32_t n_instructions)
8892 struct struct_type *args_struct_type;
8898 CHECK_NAME(name, EINVAL);
8899 CHECK(!action_find(p, name), EEXIST);
8901 if (args_struct_type_name) {
8902 CHECK_NAME(args_struct_type_name, EINVAL);
8903 args_struct_type = struct_type_find(p, args_struct_type_name);
8904 CHECK(args_struct_type, EINVAL);
8906 args_struct_type = NULL;
8909 /* Node allocation. */
8910 a = calloc(1, sizeof(struct action));
8912 if (args_struct_type) {
8913 a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int));
8914 if (!a->args_endianness) {
8920 /* Node initialization. */
8921 strcpy(a->name, name);
8922 a->st = args_struct_type;
8923 a->id = p->n_actions;
8925 /* Instruction translation. */
8926 err = instruction_config(p, a, instructions, n_instructions);
8928 free(a->args_endianness);
8933 /* Node add to tailq. */
8934 TAILQ_INSERT_TAIL(&p->actions, a, node);
8941 action_build(struct rte_swx_pipeline *p)
8943 struct action *action;
8945 p->action_instructions = calloc(p->n_actions,
8946 sizeof(struct instruction *));
8947 CHECK(p->action_instructions, ENOMEM);
8949 TAILQ_FOREACH(action, &p->actions, node)
8950 p->action_instructions[action->id] = action->instructions;
8956 action_build_free(struct rte_swx_pipeline *p)
8958 free(p->action_instructions);
8959 p->action_instructions = NULL;
8963 action_free(struct rte_swx_pipeline *p)
8965 action_build_free(p);
8968 struct action *action;
8970 action = TAILQ_FIRST(&p->actions);
8974 TAILQ_REMOVE(&p->actions, action, node);
8975 free(action->instructions);
8981 action_arg_src_mov_count(struct action *a,
8983 struct instruction *instructions,
8984 struct instruction_data *instruction_data,
8985 uint32_t n_instructions)
8987 uint32_t offset, n_users = 0, i;
8990 (arg_id >= a->st->n_fields) ||
8992 !instruction_data ||
8996 offset = a->st->fields[arg_id].offset / 8;
8998 for (i = 0; i < n_instructions; i++) {
8999 struct instruction *instr = &instructions[i];
9000 struct instruction_data *data = &instruction_data[i];
9002 if (data->invalid ||
9003 ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) ||
9004 instr->mov.src.struct_id ||
9005 (instr->mov.src.offset != offset))
9017 static struct table_type *
9018 table_type_find(struct rte_swx_pipeline *p, const char *name)
9020 struct table_type *elem;
9022 TAILQ_FOREACH(elem, &p->table_types, node)
9023 if (strcmp(elem->name, name) == 0)
9029 static struct table_type *
9030 table_type_resolve(struct rte_swx_pipeline *p,
9031 const char *recommended_type_name,
9032 enum rte_swx_table_match_type match_type)
9034 struct table_type *elem;
9036 /* Only consider the recommended type if the match type is correct. */
9037 if (recommended_type_name)
9038 TAILQ_FOREACH(elem, &p->table_types, node)
9039 if (!strcmp(elem->name, recommended_type_name) &&
9040 (elem->match_type == match_type))
9043 /* Ignore the recommended type and get the first element with this match
9046 TAILQ_FOREACH(elem, &p->table_types, node)
9047 if (elem->match_type == match_type)
9053 static struct table *
9054 table_find(struct rte_swx_pipeline *p, const char *name)
9058 TAILQ_FOREACH(elem, &p->tables, node)
9059 if (strcmp(elem->name, name) == 0)
9065 static struct table *
9066 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9068 struct table *table = NULL;
9070 TAILQ_FOREACH(table, &p->tables, node)
9071 if (table->id == id)
9078 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
9080 enum rte_swx_table_match_type match_type,
9081 struct rte_swx_table_ops *ops)
9083 struct table_type *elem;
9087 CHECK_NAME(name, EINVAL);
9088 CHECK(!table_type_find(p, name), EEXIST);
9091 CHECK(ops->create, EINVAL);
9092 CHECK(ops->lkp, EINVAL);
9093 CHECK(ops->free, EINVAL);
9095 /* Node allocation. */
9096 elem = calloc(1, sizeof(struct table_type));
9097 CHECK(elem, ENOMEM);
9099 /* Node initialization. */
9100 strcpy(elem->name, name);
9101 elem->match_type = match_type;
9102 memcpy(&elem->ops, ops, sizeof(*ops));
9104 /* Node add to tailq. */
9105 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
9110 static enum rte_swx_table_match_type
9111 table_match_type_resolve(struct rte_swx_match_field_params *fields,
9113 uint32_t max_offset_field_id)
9115 uint32_t n_fields_em = 0, i;
9117 for (i = 0; i < n_fields; i++)
9118 if (fields[i].match_type == RTE_SWX_TABLE_MATCH_EXACT)
9121 if (n_fields_em == n_fields)
9122 return RTE_SWX_TABLE_MATCH_EXACT;
9124 if ((n_fields_em == n_fields - 1) &&
9125 (fields[max_offset_field_id].match_type == RTE_SWX_TABLE_MATCH_LPM))
9126 return RTE_SWX_TABLE_MATCH_LPM;
9128 return RTE_SWX_TABLE_MATCH_WILDCARD;
9132 table_match_fields_check(struct rte_swx_pipeline *p,
9133 struct rte_swx_pipeline_table_params *params,
9134 struct header **header,
9135 uint32_t *min_offset_field_id,
9136 uint32_t *max_offset_field_id)
9138 struct header *h0 = NULL;
9139 struct field *hf, *mf;
9140 uint32_t *offset = NULL, min_offset, max_offset, min_offset_pos, max_offset_pos, i;
9143 /* Return if no match fields. */
9144 if (!params->n_fields) {
9145 if (params->fields) {
9153 /* Memory allocation. */
9154 offset = calloc(params->n_fields, sizeof(uint32_t));
9160 /* Check that all the match fields belong to either the same header or
9163 hf = header_field_parse(p, params->fields[0].name, &h0);
9164 mf = metadata_field_parse(p, params->fields[0].name);
9170 offset[0] = h0 ? hf->offset : mf->offset;
9172 for (i = 1; i < params->n_fields; i++)
9176 hf = header_field_parse(p, params->fields[i].name, &h);
9177 if (!hf || (h->id != h0->id)) {
9182 offset[i] = hf->offset;
9184 mf = metadata_field_parse(p, params->fields[i].name);
9190 offset[i] = mf->offset;
9193 /* Check that there are no duplicated match fields. */
9194 for (i = 0; i < params->n_fields; i++) {
9197 for (j = 0; j < i; j++)
9198 if (offset[j] == offset[i]) {
9204 /* Find the min and max offset fields. */
9205 min_offset = offset[0];
9206 max_offset = offset[0];
9210 for (i = 1; i < params->n_fields; i++) {
9211 if (offset[i] < min_offset) {
9212 min_offset = offset[i];
9216 if (offset[i] > max_offset) {
9217 max_offset = offset[i];
9226 if (min_offset_field_id)
9227 *min_offset_field_id = min_offset_pos;
9229 if (max_offset_field_id)
9230 *max_offset_field_id = max_offset_pos;
9238 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
9240 struct rte_swx_pipeline_table_params *params,
9241 const char *recommended_table_type_name,
9245 struct table_type *type;
9247 struct action *default_action;
9248 struct header *header = NULL;
9249 uint32_t action_data_size_max = 0, min_offset_field_id = 0, max_offset_field_id = 0, i;
9254 CHECK_NAME(name, EINVAL);
9255 CHECK(!table_find(p, name), EEXIST);
9257 CHECK(params, EINVAL);
9260 status = table_match_fields_check(p,
9263 &min_offset_field_id,
9264 &max_offset_field_id);
9268 /* Action checks. */
9269 CHECK(params->n_actions, EINVAL);
9270 CHECK(params->action_names, EINVAL);
9271 for (i = 0; i < params->n_actions; i++) {
9272 const char *action_name = params->action_names[i];
9274 uint32_t action_data_size;
9276 CHECK_NAME(action_name, EINVAL);
9278 a = action_find(p, action_name);
9281 action_data_size = a->st ? a->st->n_bits / 8 : 0;
9282 if (action_data_size > action_data_size_max)
9283 action_data_size_max = action_data_size;
9286 CHECK_NAME(params->default_action_name, EINVAL);
9287 for (i = 0; i < p->n_actions; i++)
9288 if (!strcmp(params->action_names[i],
9289 params->default_action_name))
9291 CHECK(i < params->n_actions, EINVAL);
9292 default_action = action_find(p, params->default_action_name);
9293 CHECK((default_action->st && params->default_action_data) ||
9294 !params->default_action_data, EINVAL);
9296 /* Table type checks. */
9297 if (recommended_table_type_name)
9298 CHECK_NAME(recommended_table_type_name, EINVAL);
9300 if (params->n_fields) {
9301 enum rte_swx_table_match_type match_type;
9303 match_type = table_match_type_resolve(params->fields,
9305 max_offset_field_id);
9306 type = table_type_resolve(p,
9307 recommended_table_type_name,
9309 CHECK(type, EINVAL);
9314 /* Memory allocation. */
9315 t = calloc(1, sizeof(struct table));
9318 t->fields = calloc(params->n_fields, sizeof(struct match_field));
9324 t->actions = calloc(params->n_actions, sizeof(struct action *));
9331 if (action_data_size_max) {
9332 t->default_action_data = calloc(1, action_data_size_max);
9333 if (!t->default_action_data) {
9341 /* Node initialization. */
9342 strcpy(t->name, name);
9343 if (args && args[0])
9344 strcpy(t->args, args);
9347 for (i = 0; i < params->n_fields; i++) {
9348 struct rte_swx_match_field_params *field = ¶ms->fields[i];
9349 struct match_field *f = &t->fields[i];
9351 f->match_type = field->match_type;
9353 header_field_parse(p, field->name, NULL) :
9354 metadata_field_parse(p, field->name);
9356 t->n_fields = params->n_fields;
9359 for (i = 0; i < params->n_actions; i++)
9360 t->actions[i] = action_find(p, params->action_names[i]);
9361 t->default_action = default_action;
9362 if (default_action->st)
9363 memcpy(t->default_action_data,
9364 params->default_action_data,
9365 default_action->st->n_bits / 8);
9366 t->n_actions = params->n_actions;
9367 t->default_action_is_const = params->default_action_is_const;
9368 t->action_data_size_max = action_data_size_max;
9371 t->id = p->n_tables;
9373 /* Node add to tailq. */
9374 TAILQ_INSERT_TAIL(&p->tables, t, node);
9380 static struct rte_swx_table_params *
9381 table_params_get(struct table *table)
9383 struct rte_swx_table_params *params;
9384 struct field *first, *last;
9386 uint32_t key_size, key_offset, action_data_size, i;
9388 /* Memory allocation. */
9389 params = calloc(1, sizeof(struct rte_swx_table_params));
9393 /* Find first (smallest offset) and last (biggest offset) match fields. */
9394 first = table->fields[0].field;
9395 last = table->fields[0].field;
9397 for (i = 0; i < table->n_fields; i++) {
9398 struct field *f = table->fields[i].field;
9400 if (f->offset < first->offset)
9403 if (f->offset > last->offset)
9407 /* Key offset and size. */
9408 key_offset = first->offset / 8;
9409 key_size = (last->offset + last->n_bits - first->offset) / 8;
9411 /* Memory allocation. */
9412 key_mask = calloc(1, key_size);
9419 for (i = 0; i < table->n_fields; i++) {
9420 struct field *f = table->fields[i].field;
9421 uint32_t start = (f->offset - first->offset) / 8;
9422 size_t size = f->n_bits / 8;
9424 memset(&key_mask[start], 0xFF, size);
9427 /* Action data size. */
9428 action_data_size = 0;
9429 for (i = 0; i < table->n_actions; i++) {
9430 struct action *action = table->actions[i];
9431 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
9433 if (ads > action_data_size)
9434 action_data_size = ads;
9438 params->match_type = table->type->match_type;
9439 params->key_size = key_size;
9440 params->key_offset = key_offset;
9441 params->key_mask0 = key_mask;
9442 params->action_data_size = action_data_size;
9443 params->n_keys_max = table->size;
9449 table_params_free(struct rte_swx_table_params *params)
9454 free(params->key_mask0);
9459 table_state_build(struct rte_swx_pipeline *p)
9461 struct table *table;
9463 p->table_state = calloc(p->n_tables,
9464 sizeof(struct rte_swx_table_state));
9465 CHECK(p->table_state, ENOMEM);
9467 TAILQ_FOREACH(table, &p->tables, node) {
9468 struct rte_swx_table_state *ts = &p->table_state[table->id];
9471 struct rte_swx_table_params *params;
9474 params = table_params_get(table);
9475 CHECK(params, ENOMEM);
9477 ts->obj = table->type->ops.create(params,
9482 table_params_free(params);
9483 CHECK(ts->obj, ENODEV);
9486 /* ts->default_action_data. */
9487 if (table->action_data_size_max) {
9488 ts->default_action_data =
9489 malloc(table->action_data_size_max);
9490 CHECK(ts->default_action_data, ENOMEM);
9492 memcpy(ts->default_action_data,
9493 table->default_action_data,
9494 table->action_data_size_max);
9497 /* ts->default_action_id. */
9498 ts->default_action_id = table->default_action->id;
9505 table_state_build_free(struct rte_swx_pipeline *p)
9509 if (!p->table_state)
9512 for (i = 0; i < p->n_tables; i++) {
9513 struct rte_swx_table_state *ts = &p->table_state[i];
9514 struct table *table = table_find_by_id(p, i);
9517 if (table->type && ts->obj)
9518 table->type->ops.free(ts->obj);
9520 /* ts->default_action_data. */
9521 free(ts->default_action_data);
9524 free(p->table_state);
9525 p->table_state = NULL;
9529 table_state_free(struct rte_swx_pipeline *p)
9531 table_state_build_free(p);
9535 table_stub_lkp(void *table __rte_unused,
9536 void *mailbox __rte_unused,
9537 uint8_t **key __rte_unused,
9538 uint64_t *action_id __rte_unused,
9539 uint8_t **action_data __rte_unused,
9543 return 1; /* DONE. */
9547 table_build(struct rte_swx_pipeline *p)
9551 /* Per pipeline: table statistics. */
9552 p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics));
9553 CHECK(p->table_stats, ENOMEM);
9555 for (i = 0; i < p->n_tables; i++) {
9556 p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t));
9557 CHECK(p->table_stats[i].n_pkts_action, ENOMEM);
9560 /* Per thread: table runt-time. */
9561 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9562 struct thread *t = &p->threads[i];
9563 struct table *table;
9565 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
9566 CHECK(t->tables, ENOMEM);
9568 TAILQ_FOREACH(table, &p->tables, node) {
9569 struct table_runtime *r = &t->tables[table->id];
9574 size = table->type->ops.mailbox_size_get();
9577 r->func = table->type->ops.lkp;
9581 r->mailbox = calloc(1, size);
9582 CHECK(r->mailbox, ENOMEM);
9586 r->key = table->header ?
9587 &t->structs[table->header->struct_id] :
9588 &t->structs[p->metadata_struct_id];
9590 r->func = table_stub_lkp;
9599 table_build_free(struct rte_swx_pipeline *p)
9603 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9604 struct thread *t = &p->threads[i];
9610 for (j = 0; j < p->n_tables; j++) {
9611 struct table_runtime *r = &t->tables[j];
9620 if (p->table_stats) {
9621 for (i = 0; i < p->n_tables; i++)
9622 free(p->table_stats[i].n_pkts_action);
9624 free(p->table_stats);
9629 table_free(struct rte_swx_pipeline *p)
9631 table_build_free(p);
9637 elem = TAILQ_FIRST(&p->tables);
9641 TAILQ_REMOVE(&p->tables, elem, node);
9643 free(elem->actions);
9644 free(elem->default_action_data);
9650 struct table_type *elem;
9652 elem = TAILQ_FIRST(&p->table_types);
9656 TAILQ_REMOVE(&p->table_types, elem, node);
9664 static struct regarray *
9665 regarray_find(struct rte_swx_pipeline *p, const char *name)
9667 struct regarray *elem;
9669 TAILQ_FOREACH(elem, &p->regarrays, node)
9670 if (!strcmp(elem->name, name))
9676 static struct regarray *
9677 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9679 struct regarray *elem = NULL;
9681 TAILQ_FOREACH(elem, &p->regarrays, node)
9689 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p,
9698 CHECK_NAME(name, EINVAL);
9699 CHECK(!regarray_find(p, name), EEXIST);
9701 CHECK(size, EINVAL);
9702 size = rte_align32pow2(size);
9704 /* Memory allocation. */
9705 r = calloc(1, sizeof(struct regarray));
9708 /* Node initialization. */
9709 strcpy(r->name, name);
9710 r->init_val = init_val;
9712 r->id = p->n_regarrays;
9714 /* Node add to tailq. */
9715 TAILQ_INSERT_TAIL(&p->regarrays, r, node);
9722 regarray_build(struct rte_swx_pipeline *p)
9724 struct regarray *regarray;
9726 if (!p->n_regarrays)
9729 p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime));
9730 CHECK(p->regarray_runtime, ENOMEM);
9732 TAILQ_FOREACH(regarray, &p->regarrays, node) {
9733 struct regarray_runtime *r = &p->regarray_runtime[regarray->id];
9736 r->regarray = env_malloc(regarray->size * sizeof(uint64_t),
9737 RTE_CACHE_LINE_SIZE,
9739 CHECK(r->regarray, ENOMEM);
9741 if (regarray->init_val)
9742 for (i = 0; i < regarray->size; i++)
9743 r->regarray[i] = regarray->init_val;
9745 r->size_mask = regarray->size - 1;
9752 regarray_build_free(struct rte_swx_pipeline *p)
9756 if (!p->regarray_runtime)
9759 for (i = 0; i < p->n_regarrays; i++) {
9760 struct regarray *regarray = regarray_find_by_id(p, i);
9761 struct regarray_runtime *r = &p->regarray_runtime[i];
9763 env_free(r->regarray, regarray->size * sizeof(uint64_t));
9766 free(p->regarray_runtime);
9767 p->regarray_runtime = NULL;
9771 regarray_free(struct rte_swx_pipeline *p)
9773 regarray_build_free(p);
9776 struct regarray *elem;
9778 elem = TAILQ_FIRST(&p->regarrays);
9782 TAILQ_REMOVE(&p->regarrays, elem, node);
9790 static struct meter_profile *
9791 meter_profile_find(struct rte_swx_pipeline *p, const char *name)
9793 struct meter_profile *elem;
9795 TAILQ_FOREACH(elem, &p->meter_profiles, node)
9796 if (!strcmp(elem->name, name))
9802 static struct metarray *
9803 metarray_find(struct rte_swx_pipeline *p, const char *name)
9805 struct metarray *elem;
9807 TAILQ_FOREACH(elem, &p->metarrays, node)
9808 if (!strcmp(elem->name, name))
9814 static struct metarray *
9815 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9817 struct metarray *elem = NULL;
9819 TAILQ_FOREACH(elem, &p->metarrays, node)
9827 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p,
9835 CHECK_NAME(name, EINVAL);
9836 CHECK(!metarray_find(p, name), EEXIST);
9838 CHECK(size, EINVAL);
9839 size = rte_align32pow2(size);
9841 /* Memory allocation. */
9842 m = calloc(1, sizeof(struct metarray));
9845 /* Node initialization. */
9846 strcpy(m->name, name);
9848 m->id = p->n_metarrays;
9850 /* Node add to tailq. */
9851 TAILQ_INSERT_TAIL(&p->metarrays, m, node);
9857 struct meter_profile meter_profile_default = {
9866 .cir_bytes_per_period = 1,
9868 .pir_bytes_per_period = 1,
9875 meter_init(struct meter *m)
9877 memset(m, 0, sizeof(struct meter));
9878 rte_meter_trtcm_config(&m->m, &meter_profile_default.profile);
9879 m->profile = &meter_profile_default;
9880 m->color_mask = RTE_COLOR_GREEN;
9882 meter_profile_default.n_users++;
9886 metarray_build(struct rte_swx_pipeline *p)
9890 if (!p->n_metarrays)
9893 p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime));
9894 CHECK(p->metarray_runtime, ENOMEM);
9896 TAILQ_FOREACH(m, &p->metarrays, node) {
9897 struct metarray_runtime *r = &p->metarray_runtime[m->id];
9900 r->metarray = env_malloc(m->size * sizeof(struct meter),
9901 RTE_CACHE_LINE_SIZE,
9903 CHECK(r->metarray, ENOMEM);
9905 for (i = 0; i < m->size; i++)
9906 meter_init(&r->metarray[i]);
9908 r->size_mask = m->size - 1;
9915 metarray_build_free(struct rte_swx_pipeline *p)
9919 if (!p->metarray_runtime)
9922 for (i = 0; i < p->n_metarrays; i++) {
9923 struct metarray *m = metarray_find_by_id(p, i);
9924 struct metarray_runtime *r = &p->metarray_runtime[i];
9926 env_free(r->metarray, m->size * sizeof(struct meter));
9929 free(p->metarray_runtime);
9930 p->metarray_runtime = NULL;
9934 metarray_free(struct rte_swx_pipeline *p)
9936 metarray_build_free(p);
9940 struct metarray *elem;
9942 elem = TAILQ_FIRST(&p->metarrays);
9946 TAILQ_REMOVE(&p->metarrays, elem, node);
9950 /* Meter profiles. */
9952 struct meter_profile *elem;
9954 elem = TAILQ_FIRST(&p->meter_profiles);
9958 TAILQ_REMOVE(&p->meter_profiles, elem, node);
9967 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
9969 struct rte_swx_pipeline *pipeline;
9971 /* Check input parameters. */
9974 /* Memory allocation. */
9975 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
9976 CHECK(pipeline, ENOMEM);
9978 /* Initialization. */
9979 TAILQ_INIT(&pipeline->struct_types);
9980 TAILQ_INIT(&pipeline->port_in_types);
9981 TAILQ_INIT(&pipeline->ports_in);
9982 TAILQ_INIT(&pipeline->port_out_types);
9983 TAILQ_INIT(&pipeline->ports_out);
9984 TAILQ_INIT(&pipeline->extern_types);
9985 TAILQ_INIT(&pipeline->extern_objs);
9986 TAILQ_INIT(&pipeline->extern_funcs);
9987 TAILQ_INIT(&pipeline->headers);
9988 TAILQ_INIT(&pipeline->actions);
9989 TAILQ_INIT(&pipeline->table_types);
9990 TAILQ_INIT(&pipeline->tables);
9991 TAILQ_INIT(&pipeline->regarrays);
9992 TAILQ_INIT(&pipeline->meter_profiles);
9993 TAILQ_INIT(&pipeline->metarrays);
9995 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
9996 pipeline->numa_node = numa_node;
10003 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
10008 free(p->instructions);
10012 table_state_free(p);
10017 extern_func_free(p);
10018 extern_obj_free(p);
10027 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
10028 const char **instructions,
10029 uint32_t n_instructions)
10034 err = instruction_config(p, NULL, instructions, n_instructions);
10038 /* Thread instruction pointer reset. */
10039 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10040 struct thread *t = &p->threads[i];
10042 thread_ip_reset(p, t);
10049 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
10054 CHECK(p->build_done == 0, EEXIST);
10056 status = port_in_build(p);
10060 status = port_out_build(p);
10064 status = struct_build(p);
10068 status = extern_obj_build(p);
10072 status = extern_func_build(p);
10076 status = header_build(p);
10080 status = metadata_build(p);
10084 status = action_build(p);
10088 status = table_build(p);
10092 status = table_state_build(p);
10096 status = regarray_build(p);
10100 status = metarray_build(p);
10108 metarray_build_free(p);
10109 regarray_build_free(p);
10110 table_state_build_free(p);
10111 table_build_free(p);
10112 action_build_free(p);
10113 metadata_build_free(p);
10114 header_build_free(p);
10115 extern_func_build_free(p);
10116 extern_obj_build_free(p);
10117 port_out_build_free(p);
10118 port_in_build_free(p);
10119 struct_build_free(p);
10125 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
10129 for (i = 0; i < n_instructions; i++)
10134 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
10138 for (i = 0; i < p->n_ports_out; i++) {
10139 struct port_out_runtime *port = &p->out[i];
10142 port->flush(port->obj);
10150 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
10151 struct rte_swx_ctl_pipeline_info *pipeline)
10153 struct action *action;
10154 struct table *table;
10155 uint32_t n_actions = 0, n_tables = 0;
10157 if (!p || !pipeline)
10160 TAILQ_FOREACH(action, &p->actions, node)
10163 TAILQ_FOREACH(table, &p->tables, node)
10166 pipeline->n_ports_in = p->n_ports_in;
10167 pipeline->n_ports_out = p->n_ports_out;
10168 pipeline->n_actions = n_actions;
10169 pipeline->n_tables = n_tables;
10170 pipeline->n_regarrays = p->n_regarrays;
10171 pipeline->n_metarrays = p->n_metarrays;
10177 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
10179 if (!p || !numa_node)
10182 *numa_node = p->numa_node;
10187 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
10188 uint32_t action_id,
10189 struct rte_swx_ctl_action_info *action)
10191 struct action *a = NULL;
10193 if (!p || (action_id >= p->n_actions) || !action)
10196 a = action_find_by_id(p, action_id);
10200 strcpy(action->name, a->name);
10201 action->n_args = a->st ? a->st->n_fields : 0;
10206 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
10207 uint32_t action_id,
10208 uint32_t action_arg_id,
10209 struct rte_swx_ctl_action_arg_info *action_arg)
10211 struct action *a = NULL;
10212 struct field *arg = NULL;
10214 if (!p || (action_id >= p->n_actions) || !action_arg)
10217 a = action_find_by_id(p, action_id);
10218 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
10221 arg = &a->st->fields[action_arg_id];
10222 strcpy(action_arg->name, arg->name);
10223 action_arg->n_bits = arg->n_bits;
10224 action_arg->is_network_byte_order = a->args_endianness[action_arg_id];
10230 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
10232 struct rte_swx_ctl_table_info *table)
10234 struct table *t = NULL;
10239 t = table_find_by_id(p, table_id);
10243 strcpy(table->name, t->name);
10244 strcpy(table->args, t->args);
10245 table->n_match_fields = t->n_fields;
10246 table->n_actions = t->n_actions;
10247 table->default_action_is_const = t->default_action_is_const;
10248 table->size = t->size;
10253 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
10255 uint32_t match_field_id,
10256 struct rte_swx_ctl_table_match_field_info *match_field)
10259 struct match_field *f;
10261 if (!p || (table_id >= p->n_tables) || !match_field)
10264 t = table_find_by_id(p, table_id);
10265 if (!t || (match_field_id >= t->n_fields))
10268 f = &t->fields[match_field_id];
10269 match_field->match_type = f->match_type;
10270 match_field->is_header = t->header ? 1 : 0;
10271 match_field->n_bits = f->field->n_bits;
10272 match_field->offset = f->field->offset;
10278 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
10280 uint32_t table_action_id,
10281 struct rte_swx_ctl_table_action_info *table_action)
10285 if (!p || (table_id >= p->n_tables) || !table_action)
10288 t = table_find_by_id(p, table_id);
10289 if (!t || (table_action_id >= t->n_actions))
10292 table_action->action_id = t->actions[table_action_id]->id;
10298 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
10300 struct rte_swx_table_ops *table_ops,
10305 if (!p || (table_id >= p->n_tables))
10308 t = table_find_by_id(p, table_id);
10314 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
10324 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
10325 struct rte_swx_table_state **table_state)
10327 if (!p || !table_state || !p->build_done)
10330 *table_state = p->table_state;
10335 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
10336 struct rte_swx_table_state *table_state)
10338 if (!p || !table_state || !p->build_done)
10341 p->table_state = table_state;
10346 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
10348 struct rte_swx_port_in_stats *stats)
10350 struct port_in *port;
10355 port = port_in_find(p, port_id);
10359 port->type->ops.stats_read(port->obj, stats);
10364 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
10366 struct rte_swx_port_out_stats *stats)
10368 struct port_out *port;
10373 port = port_out_find(p, port_id);
10377 port->type->ops.stats_read(port->obj, stats);
10382 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p,
10383 const char *table_name,
10384 struct rte_swx_table_stats *stats)
10386 struct table *table;
10387 struct table_statistics *table_stats;
10389 if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action)
10392 table = table_find(p, table_name);
10396 table_stats = &p->table_stats[table->id];
10398 memcpy(&stats->n_pkts_action,
10399 &table_stats->n_pkts_action,
10400 p->n_actions * sizeof(uint64_t));
10402 stats->n_pkts_hit = table_stats->n_pkts_hit[1];
10403 stats->n_pkts_miss = table_stats->n_pkts_hit[0];
10409 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p,
10410 uint32_t regarray_id,
10411 struct rte_swx_ctl_regarray_info *regarray)
10413 struct regarray *r;
10415 if (!p || !regarray)
10418 r = regarray_find_by_id(p, regarray_id);
10422 strcpy(regarray->name, r->name);
10423 regarray->size = r->size;
10428 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p,
10429 const char *regarray_name,
10430 uint32_t regarray_index,
10433 struct regarray *regarray;
10434 struct regarray_runtime *r;
10436 if (!p || !regarray_name || !value)
10439 regarray = regarray_find(p, regarray_name);
10440 if (!regarray || (regarray_index >= regarray->size))
10443 r = &p->regarray_runtime[regarray->id];
10444 *value = r->regarray[regarray_index];
10449 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p,
10450 const char *regarray_name,
10451 uint32_t regarray_index,
10454 struct regarray *regarray;
10455 struct regarray_runtime *r;
10457 if (!p || !regarray_name)
10460 regarray = regarray_find(p, regarray_name);
10461 if (!regarray || (regarray_index >= regarray->size))
10464 r = &p->regarray_runtime[regarray->id];
10465 r->regarray[regarray_index] = value;
10470 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p,
10471 uint32_t metarray_id,
10472 struct rte_swx_ctl_metarray_info *metarray)
10474 struct metarray *m;
10476 if (!p || !metarray)
10479 m = metarray_find_by_id(p, metarray_id);
10483 strcpy(metarray->name, m->name);
10484 metarray->size = m->size;
10489 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p,
10491 struct rte_meter_trtcm_params *params)
10493 struct meter_profile *mp;
10497 CHECK_NAME(name, EINVAL);
10498 CHECK(params, EINVAL);
10499 CHECK(!meter_profile_find(p, name), EEXIST);
10501 /* Node allocation. */
10502 mp = calloc(1, sizeof(struct meter_profile));
10505 /* Node initialization. */
10506 strcpy(mp->name, name);
10507 memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params));
10508 status = rte_meter_trtcm_profile_config(&mp->profile, params);
10514 /* Node add to tailq. */
10515 TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node);
10521 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p,
10524 struct meter_profile *mp;
10527 CHECK_NAME(name, EINVAL);
10529 mp = meter_profile_find(p, name);
10531 CHECK(!mp->n_users, EBUSY);
10533 /* Remove node from tailq. */
10534 TAILQ_REMOVE(&p->meter_profiles, mp, node);
10541 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p,
10542 const char *metarray_name,
10543 uint32_t metarray_index)
10545 struct meter_profile *mp_old;
10546 struct metarray *metarray;
10547 struct metarray_runtime *metarray_runtime;
10551 CHECK_NAME(metarray_name, EINVAL);
10553 metarray = metarray_find(p, metarray_name);
10554 CHECK(metarray, EINVAL);
10555 CHECK(metarray_index < metarray->size, EINVAL);
10557 metarray_runtime = &p->metarray_runtime[metarray->id];
10558 m = &metarray_runtime->metarray[metarray_index];
10559 mp_old = m->profile;
10569 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p,
10570 const char *metarray_name,
10571 uint32_t metarray_index,
10572 const char *profile_name)
10574 struct meter_profile *mp, *mp_old;
10575 struct metarray *metarray;
10576 struct metarray_runtime *metarray_runtime;
10580 CHECK_NAME(metarray_name, EINVAL);
10582 metarray = metarray_find(p, metarray_name);
10583 CHECK(metarray, EINVAL);
10584 CHECK(metarray_index < metarray->size, EINVAL);
10586 mp = meter_profile_find(p, profile_name);
10589 metarray_runtime = &p->metarray_runtime[metarray->id];
10590 m = &metarray_runtime->metarray[metarray_index];
10591 mp_old = m->profile;
10593 memset(m, 0, sizeof(struct meter));
10594 rte_meter_trtcm_config(&m->m, &mp->profile);
10596 m->color_mask = RTE_COLORS;
10605 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p,
10606 const char *metarray_name,
10607 uint32_t metarray_index,
10608 struct rte_swx_ctl_meter_stats *stats)
10610 struct metarray *metarray;
10611 struct metarray_runtime *metarray_runtime;
10615 CHECK_NAME(metarray_name, EINVAL);
10617 metarray = metarray_find(p, metarray_name);
10618 CHECK(metarray, EINVAL);
10619 CHECK(metarray_index < metarray->size, EINVAL);
10621 CHECK(stats, EINVAL);
10623 metarray_runtime = &p->metarray_runtime[metarray->id];
10624 m = &metarray_runtime->metarray[metarray_index];
10626 memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts));
10627 memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes));