1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <arpa/inet.h>
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15 #include <rte_cycles.h>
16 #include <rte_meter.h>
18 #include <rte_swx_table_selector.h>
20 #include "rte_swx_pipeline.h"
21 #include "rte_swx_ctl.h"
23 #define CHECK(condition, err_code) \
29 #define CHECK_NAME(name, err_code) \
32 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
35 #define CHECK_INSTRUCTION(instr, err_code) \
38 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
39 RTE_SWX_INSTRUCTION_SIZE), \
47 #define TRACE(...) printf(__VA_ARGS__)
55 #define ntoh64(x) rte_be_to_cpu_64(x)
56 #define hton64(x) rte_cpu_to_be_64(x)
58 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE
60 #include <rte_malloc.h>
63 env_malloc(size_t size, size_t alignment, int numa_node)
65 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
69 env_free(void *start, size_t size __rte_unused)
79 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
83 if (numa_available() == -1)
86 start = numa_alloc_onnode(size, numa_node);
90 memset(start, 0, size);
95 env_free(void *start, size_t size)
97 if (numa_available() == -1)
100 numa_free(start, size);
109 char name[RTE_SWX_NAME_SIZE];
116 TAILQ_ENTRY(struct_type) node;
117 char name[RTE_SWX_NAME_SIZE];
118 struct field *fields;
125 TAILQ_HEAD(struct_type_tailq, struct_type);
130 struct port_in_type {
131 TAILQ_ENTRY(port_in_type) node;
132 char name[RTE_SWX_NAME_SIZE];
133 struct rte_swx_port_in_ops ops;
136 TAILQ_HEAD(port_in_type_tailq, port_in_type);
139 TAILQ_ENTRY(port_in) node;
140 struct port_in_type *type;
145 TAILQ_HEAD(port_in_tailq, port_in);
147 struct port_in_runtime {
148 rte_swx_port_in_pkt_rx_t pkt_rx;
155 struct port_out_type {
156 TAILQ_ENTRY(port_out_type) node;
157 char name[RTE_SWX_NAME_SIZE];
158 struct rte_swx_port_out_ops ops;
161 TAILQ_HEAD(port_out_type_tailq, port_out_type);
164 TAILQ_ENTRY(port_out) node;
165 struct port_out_type *type;
170 TAILQ_HEAD(port_out_tailq, port_out);
172 struct port_out_runtime {
173 rte_swx_port_out_pkt_tx_t pkt_tx;
174 rte_swx_port_out_flush_t flush;
181 struct extern_type_member_func {
182 TAILQ_ENTRY(extern_type_member_func) node;
183 char name[RTE_SWX_NAME_SIZE];
184 rte_swx_extern_type_member_func_t func;
188 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
191 TAILQ_ENTRY(extern_type) node;
192 char name[RTE_SWX_NAME_SIZE];
193 struct struct_type *mailbox_struct_type;
194 rte_swx_extern_type_constructor_t constructor;
195 rte_swx_extern_type_destructor_t destructor;
196 struct extern_type_member_func_tailq funcs;
200 TAILQ_HEAD(extern_type_tailq, extern_type);
203 TAILQ_ENTRY(extern_obj) node;
204 char name[RTE_SWX_NAME_SIZE];
205 struct extern_type *type;
211 TAILQ_HEAD(extern_obj_tailq, extern_obj);
213 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
214 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
217 struct extern_obj_runtime {
220 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
227 TAILQ_ENTRY(extern_func) node;
228 char name[RTE_SWX_NAME_SIZE];
229 struct struct_type *mailbox_struct_type;
230 rte_swx_extern_func_t func;
235 TAILQ_HEAD(extern_func_tailq, extern_func);
237 struct extern_func_runtime {
239 rte_swx_extern_func_t func;
246 TAILQ_ENTRY(header) node;
247 char name[RTE_SWX_NAME_SIZE];
248 struct struct_type *st;
253 TAILQ_HEAD(header_tailq, header);
255 struct header_runtime {
260 struct header_out_runtime {
270 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
271 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
272 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
273 * when transferred to packet meta-data and in NBO when transferred to packet
277 /* Notation conventions:
278 * -Header field: H = h.header.field (dst/src)
279 * -Meta-data field: M = m.field (dst/src)
280 * -Extern object mailbox field: E = e.field (dst/src)
281 * -Extern function mailbox field: F = f.field (dst/src)
282 * -Table action data field: T = t.field (src only)
283 * -Immediate value: I = 32-bit unsigned value (src only)
286 enum instruction_type {
293 INSTR_TX, /* port_out = M */
294 INSTR_TX_I, /* port_out = I */
296 /* extract h.header */
306 /* extract h.header m.last_field_size */
320 /* validate h.header */
323 /* invalidate h.header */
324 INSTR_HDR_INVALIDATE,
328 * dst = HMEF, src = HMEFTI
330 INSTR_MOV, /* dst = MEF, src = MEFT */
331 INSTR_MOV_MH, /* dst = MEF, src = H */
332 INSTR_MOV_HM, /* dst = H, src = MEFT */
333 INSTR_MOV_HH, /* dst = H, src = H */
334 INSTR_MOV_I, /* dst = HMEF, src = I */
336 /* dma h.header t.field
337 * memcpy(h.header, t.field, sizeof(h.header))
350 * dst = HMEF, src = HMEFTI
352 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
353 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
354 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
355 INSTR_ALU_ADD_HH, /* dst = H, src = H */
356 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
357 INSTR_ALU_ADD_HI, /* dst = H, src = I */
361 * dst = HMEF, src = HMEFTI
363 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
364 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
365 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
366 INSTR_ALU_SUB_HH, /* dst = H, src = H */
367 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
368 INSTR_ALU_SUB_HI, /* dst = H, src = I */
371 * dst = dst '+ src[0:1] '+ src[2:3] + ...
372 * dst = H, src = {H, h.header}
374 INSTR_ALU_CKADD_FIELD, /* src = H */
375 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
376 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
382 INSTR_ALU_CKSUB_FIELD,
386 * dst = HMEF, src = HMEFTI
388 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
389 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
390 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
391 INSTR_ALU_AND_HH, /* dst = H, src = H */
392 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
396 * dst = HMEF, src = HMEFTI
398 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
399 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
400 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
401 INSTR_ALU_OR_HH, /* dst = H, src = H */
402 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
406 * dst = HMEF, src = HMEFTI
408 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
409 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
410 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
411 INSTR_ALU_XOR_HH, /* dst = H, src = H */
412 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
416 * dst = HMEF, src = HMEFTI
418 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
419 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
420 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
421 INSTR_ALU_SHL_HH, /* dst = H, src = H */
422 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
423 INSTR_ALU_SHL_HI, /* dst = H, src = I */
427 * dst = HMEF, src = HMEFTI
429 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
430 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
431 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
432 INSTR_ALU_SHR_HH, /* dst = H, src = H */
433 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
434 INSTR_ALU_SHR_HI, /* dst = H, src = I */
436 /* regprefetch REGARRAY index
437 * prefetch REGARRAY[index]
440 INSTR_REGPREFETCH_RH, /* index = H */
441 INSTR_REGPREFETCH_RM, /* index = MEFT */
442 INSTR_REGPREFETCH_RI, /* index = I */
444 /* regrd dst REGARRAY index
445 * dst = REGARRAY[index]
446 * dst = HMEF, index = HMEFTI
448 INSTR_REGRD_HRH, /* dst = H, index = H */
449 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
450 INSTR_REGRD_HRI, /* dst = H, index = I */
451 INSTR_REGRD_MRH, /* dst = MEF, index = H */
452 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
453 INSTR_REGRD_MRI, /* dst = MEF, index = I */
455 /* regwr REGARRAY index src
456 * REGARRAY[index] = src
457 * index = HMEFTI, src = HMEFTI
459 INSTR_REGWR_RHH, /* index = H, src = H */
460 INSTR_REGWR_RHM, /* index = H, src = MEFT */
461 INSTR_REGWR_RHI, /* index = H, src = I */
462 INSTR_REGWR_RMH, /* index = MEFT, src = H */
463 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
464 INSTR_REGWR_RMI, /* index = MEFT, src = I */
465 INSTR_REGWR_RIH, /* index = I, src = H */
466 INSTR_REGWR_RIM, /* index = I, src = MEFT */
467 INSTR_REGWR_RII, /* index = I, src = I */
469 /* regadd REGARRAY index src
470 * REGARRAY[index] += src
471 * index = HMEFTI, src = HMEFTI
473 INSTR_REGADD_RHH, /* index = H, src = H */
474 INSTR_REGADD_RHM, /* index = H, src = MEFT */
475 INSTR_REGADD_RHI, /* index = H, src = I */
476 INSTR_REGADD_RMH, /* index = MEFT, src = H */
477 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
478 INSTR_REGADD_RMI, /* index = MEFT, src = I */
479 INSTR_REGADD_RIH, /* index = I, src = H */
480 INSTR_REGADD_RIM, /* index = I, src = MEFT */
481 INSTR_REGADD_RII, /* index = I, src = I */
483 /* metprefetch METARRAY index
484 * prefetch METARRAY[index]
487 INSTR_METPREFETCH_H, /* index = H */
488 INSTR_METPREFETCH_M, /* index = MEFT */
489 INSTR_METPREFETCH_I, /* index = I */
491 /* meter METARRAY index length color_in color_out
492 * color_out = meter(METARRAY[index], length, color_in)
493 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
495 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
496 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
497 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
498 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
499 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
500 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
501 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
502 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
503 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
504 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
505 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
506 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
512 /* extern e.obj.func */
523 /* jmpv LABEL h.header
524 * Jump if header is valid
528 /* jmpnv LABEL h.header
529 * Jump if header is invalid
534 * Jump if table lookup hit
539 * Jump if table lookup miss
546 INSTR_JMP_ACTION_HIT,
548 /* jmpna LABEL ACTION
549 * Jump if action not run
551 INSTR_JMP_ACTION_MISS,
554 * Jump if a is equal to b
555 * a = HMEFT, b = HMEFTI
557 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
558 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
559 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
560 INSTR_JMP_EQ_HH, /* a = H, b = H */
561 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
564 * Jump if a is not equal to b
565 * a = HMEFT, b = HMEFTI
567 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
568 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
569 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
570 INSTR_JMP_NEQ_HH, /* a = H, b = H */
571 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
574 * Jump if a is less than b
575 * a = HMEFT, b = HMEFTI
577 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
578 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
579 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
580 INSTR_JMP_LT_HH, /* a = H, b = H */
581 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
582 INSTR_JMP_LT_HI, /* a = H, b = I */
585 * Jump if a is greater than b
586 * a = HMEFT, b = HMEFTI
588 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
589 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
590 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
591 INSTR_JMP_GT_HH, /* a = H, b = H */
592 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
593 INSTR_JMP_GT_HI, /* a = H, b = I */
601 struct instr_operand {
622 uint8_t header_id[8];
623 uint8_t struct_id[8];
628 struct instr_hdr_validity {
636 struct instr_extern_obj {
641 struct instr_extern_func {
645 struct instr_dst_src {
646 struct instr_operand dst;
648 struct instr_operand src;
653 struct instr_regarray {
658 struct instr_operand idx;
663 struct instr_operand dstsrc;
673 struct instr_operand idx;
677 struct instr_operand length;
680 struct instr_operand color_in;
681 uint32_t color_in_val;
684 struct instr_operand color_out;
689 uint8_t header_id[8];
690 uint8_t struct_id[8];
701 struct instruction *ip;
704 struct instr_operand a;
710 struct instr_operand b;
716 enum instruction_type type;
719 struct instr_hdr_validity valid;
720 struct instr_dst_src mov;
721 struct instr_regarray regarray;
722 struct instr_meter meter;
723 struct instr_dma dma;
724 struct instr_dst_src alu;
725 struct instr_table table;
726 struct instr_extern_obj ext_obj;
727 struct instr_extern_func ext_func;
728 struct instr_jmp jmp;
732 struct instruction_data {
733 char label[RTE_SWX_NAME_SIZE];
734 char jmp_label[RTE_SWX_NAME_SIZE];
735 uint32_t n_users; /* user = jmp instruction to this instruction. */
743 TAILQ_ENTRY(action) node;
744 char name[RTE_SWX_NAME_SIZE];
745 struct struct_type *st;
746 int *args_endianness; /* 0 = Host Byte Order (HBO). */
747 struct instruction *instructions;
748 uint32_t n_instructions;
752 TAILQ_HEAD(action_tailq, action);
758 TAILQ_ENTRY(table_type) node;
759 char name[RTE_SWX_NAME_SIZE];
760 enum rte_swx_table_match_type match_type;
761 struct rte_swx_table_ops ops;
764 TAILQ_HEAD(table_type_tailq, table_type);
767 enum rte_swx_table_match_type match_type;
772 TAILQ_ENTRY(table) node;
773 char name[RTE_SWX_NAME_SIZE];
774 char args[RTE_SWX_NAME_SIZE];
775 struct table_type *type; /* NULL when n_fields == 0. */
778 struct match_field *fields;
780 struct header *header; /* Only valid when n_fields > 0. */
783 struct action **actions;
784 struct action *default_action;
785 uint8_t *default_action_data;
787 int default_action_is_const;
788 uint32_t action_data_size_max;
794 TAILQ_HEAD(table_tailq, table);
796 struct table_runtime {
797 rte_swx_table_lookup_t func;
802 struct table_statistics {
803 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
804 uint64_t *n_pkts_action;
811 TAILQ_ENTRY(selector) node;
812 char name[RTE_SWX_NAME_SIZE];
814 struct field *group_id_field;
815 struct field **selector_fields;
816 uint32_t n_selector_fields;
817 struct header *selector_header;
818 struct field *member_id_field;
820 uint32_t n_groups_max;
821 uint32_t n_members_per_group_max;
826 TAILQ_HEAD(selector_tailq, selector);
828 struct selector_runtime {
830 uint8_t **group_id_buffer;
831 uint8_t **selector_buffer;
832 uint8_t **member_id_buffer;
835 struct selector_statistics {
843 TAILQ_ENTRY(regarray) node;
844 char name[RTE_SWX_NAME_SIZE];
850 TAILQ_HEAD(regarray_tailq, regarray);
852 struct regarray_runtime {
860 struct meter_profile {
861 TAILQ_ENTRY(meter_profile) node;
862 char name[RTE_SWX_NAME_SIZE];
863 struct rte_meter_trtcm_params params;
864 struct rte_meter_trtcm_profile profile;
868 TAILQ_HEAD(meter_profile_tailq, meter_profile);
871 TAILQ_ENTRY(metarray) node;
872 char name[RTE_SWX_NAME_SIZE];
877 TAILQ_HEAD(metarray_tailq, metarray);
880 struct rte_meter_trtcm m;
881 struct meter_profile *profile;
882 enum rte_color color_mask;
885 uint64_t n_pkts[RTE_COLORS];
886 uint64_t n_bytes[RTE_COLORS];
889 struct metarray_runtime {
890 struct meter *metarray;
899 struct rte_swx_pkt pkt;
905 /* Packet headers. */
906 struct header_runtime *headers; /* Extracted or generated headers. */
907 struct header_out_runtime *headers_out; /* Emitted headers. */
908 uint8_t *header_storage;
909 uint8_t *header_out_storage;
910 uint64_t valid_headers;
911 uint32_t n_headers_out;
913 /* Packet meta-data. */
917 struct table_runtime *tables;
918 struct selector_runtime *selectors;
919 struct rte_swx_table_state *table_state;
921 int hit; /* 0 = Miss, 1 = Hit. */
923 /* Extern objects and functions. */
924 struct extern_obj_runtime *extern_objs;
925 struct extern_func_runtime *extern_funcs;
928 struct instruction *ip;
929 struct instruction *ret;
932 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
933 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
934 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
936 #define HEADER_VALID(thread, header_id) \
937 MASK64_BIT_GET((thread)->valid_headers, header_id)
939 #define ALU(thread, ip, operator) \
941 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
942 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
943 uint64_t dst64 = *dst64_ptr; \
944 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
945 uint64_t dst = dst64 & dst64_mask; \
947 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
948 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
949 uint64_t src64 = *src64_ptr; \
950 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
951 uint64_t src = src64 & src64_mask; \
953 uint64_t result = dst operator src; \
955 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
958 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
960 #define ALU_MH(thread, ip, operator) \
962 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
963 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
964 uint64_t dst64 = *dst64_ptr; \
965 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
966 uint64_t dst = dst64 & dst64_mask; \
968 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
969 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
970 uint64_t src64 = *src64_ptr; \
971 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
973 uint64_t result = dst operator src; \
975 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
978 #define ALU_HM(thread, ip, operator) \
980 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
981 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
982 uint64_t dst64 = *dst64_ptr; \
983 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
984 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
986 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
987 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
988 uint64_t src64 = *src64_ptr; \
989 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
990 uint64_t src = src64 & src64_mask; \
992 uint64_t result = dst operator src; \
993 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
995 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
998 #define ALU_HM_FAST(thread, ip, operator) \
1000 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1001 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1002 uint64_t dst64 = *dst64_ptr; \
1003 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1004 uint64_t dst = dst64 & dst64_mask; \
1006 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1007 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1008 uint64_t src64 = *src64_ptr; \
1009 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1010 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1012 uint64_t result = dst operator src; \
1014 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1017 #define ALU_HH(thread, ip, operator) \
1019 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1020 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1021 uint64_t dst64 = *dst64_ptr; \
1022 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1023 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1025 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1026 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1027 uint64_t src64 = *src64_ptr; \
1028 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1030 uint64_t result = dst operator src; \
1031 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1033 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1036 #define ALU_HH_FAST(thread, ip, operator) \
1038 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1039 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1040 uint64_t dst64 = *dst64_ptr; \
1041 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1042 uint64_t dst = dst64 & dst64_mask; \
1044 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1045 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1046 uint64_t src64 = *src64_ptr; \
1047 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1049 uint64_t result = dst operator src; \
1051 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1058 #define ALU_HM_FAST ALU
1060 #define ALU_HH_FAST ALU
1064 #define ALU_I(thread, ip, operator) \
1066 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1067 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1068 uint64_t dst64 = *dst64_ptr; \
1069 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1070 uint64_t dst = dst64 & dst64_mask; \
1072 uint64_t src = (ip)->alu.src_val; \
1074 uint64_t result = dst operator src; \
1076 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1079 #define ALU_MI ALU_I
1081 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1083 #define ALU_HI(thread, ip, operator) \
1085 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1086 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1087 uint64_t dst64 = *dst64_ptr; \
1088 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1089 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1091 uint64_t src = (ip)->alu.src_val; \
1093 uint64_t result = dst operator src; \
1094 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1096 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1101 #define ALU_HI ALU_I
1105 #define MOV(thread, ip) \
1107 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1108 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1109 uint64_t dst64 = *dst64_ptr; \
1110 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1112 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1113 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1114 uint64_t src64 = *src64_ptr; \
1115 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1116 uint64_t src = src64 & src64_mask; \
1118 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1121 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1123 #define MOV_MH(thread, ip) \
1125 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1126 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1127 uint64_t dst64 = *dst64_ptr; \
1128 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1130 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1131 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1132 uint64_t src64 = *src64_ptr; \
1133 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1135 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1138 #define MOV_HM(thread, ip) \
1140 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1141 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1142 uint64_t dst64 = *dst64_ptr; \
1143 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1145 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1146 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1147 uint64_t src64 = *src64_ptr; \
1148 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1149 uint64_t src = src64 & src64_mask; \
1151 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1152 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1155 #define MOV_HH(thread, ip) \
1157 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1158 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1159 uint64_t dst64 = *dst64_ptr; \
1160 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1162 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1163 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1164 uint64_t src64 = *src64_ptr; \
1166 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1167 src = src >> (64 - (ip)->mov.dst.n_bits); \
1168 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1179 #define MOV_I(thread, ip) \
1181 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1182 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1183 uint64_t dst64 = *dst64_ptr; \
1184 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1186 uint64_t src = (ip)->mov.src_val; \
1188 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1191 #define JMP_CMP(thread, ip, operator) \
1193 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1194 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1195 uint64_t a64 = *a64_ptr; \
1196 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1197 uint64_t a = a64 & a64_mask; \
1199 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1200 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1201 uint64_t b64 = *b64_ptr; \
1202 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1203 uint64_t b = b64 & b64_mask; \
1205 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1208 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1210 #define JMP_CMP_MH(thread, ip, operator) \
1212 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1213 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1214 uint64_t a64 = *a64_ptr; \
1215 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1216 uint64_t a = a64 & a64_mask; \
1218 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1219 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1220 uint64_t b64 = *b64_ptr; \
1221 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1223 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1226 #define JMP_CMP_HM(thread, ip, operator) \
1228 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1229 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1230 uint64_t a64 = *a64_ptr; \
1231 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1233 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1234 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1235 uint64_t b64 = *b64_ptr; \
1236 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1237 uint64_t b = b64 & b64_mask; \
1239 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1242 #define JMP_CMP_HH(thread, ip, operator) \
1244 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1245 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1246 uint64_t a64 = *a64_ptr; \
1247 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1249 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1250 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1251 uint64_t b64 = *b64_ptr; \
1252 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1254 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1257 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1259 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1260 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1261 uint64_t a64 = *a64_ptr; \
1262 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1264 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1265 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1266 uint64_t b64 = *b64_ptr; \
1267 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1269 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1274 #define JMP_CMP_MH JMP_CMP
1275 #define JMP_CMP_HM JMP_CMP
1276 #define JMP_CMP_HH JMP_CMP
1277 #define JMP_CMP_HH_FAST JMP_CMP
1281 #define JMP_CMP_I(thread, ip, operator) \
1283 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1284 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1285 uint64_t a64 = *a64_ptr; \
1286 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1287 uint64_t a = a64 & a64_mask; \
1289 uint64_t b = (ip)->jmp.b_val; \
1291 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1294 #define JMP_CMP_MI JMP_CMP_I
1296 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1298 #define JMP_CMP_HI(thread, ip, operator) \
1300 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1301 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1302 uint64_t a64 = *a64_ptr; \
1303 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1305 uint64_t b = (ip)->jmp.b_val; \
1307 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1312 #define JMP_CMP_HI JMP_CMP_I
1316 #define METADATA_READ(thread, offset, n_bits) \
1318 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1319 uint64_t m64 = *m64_ptr; \
1320 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1324 #define METADATA_WRITE(thread, offset, n_bits, value) \
1326 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1327 uint64_t m64 = *m64_ptr; \
1328 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1330 uint64_t m_new = value; \
1332 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1335 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1336 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1339 struct rte_swx_pipeline {
1340 struct struct_type_tailq struct_types;
1341 struct port_in_type_tailq port_in_types;
1342 struct port_in_tailq ports_in;
1343 struct port_out_type_tailq port_out_types;
1344 struct port_out_tailq ports_out;
1345 struct extern_type_tailq extern_types;
1346 struct extern_obj_tailq extern_objs;
1347 struct extern_func_tailq extern_funcs;
1348 struct header_tailq headers;
1349 struct struct_type *metadata_st;
1350 uint32_t metadata_struct_id;
1351 struct action_tailq actions;
1352 struct table_type_tailq table_types;
1353 struct table_tailq tables;
1354 struct selector_tailq selectors;
1355 struct regarray_tailq regarrays;
1356 struct meter_profile_tailq meter_profiles;
1357 struct metarray_tailq metarrays;
1359 struct port_in_runtime *in;
1360 struct port_out_runtime *out;
1361 struct instruction **action_instructions;
1362 struct rte_swx_table_state *table_state;
1363 struct table_statistics *table_stats;
1364 struct selector_statistics *selector_stats;
1365 struct regarray_runtime *regarray_runtime;
1366 struct metarray_runtime *metarray_runtime;
1367 struct instruction *instructions;
1368 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1371 uint32_t n_ports_in;
1372 uint32_t n_ports_out;
1373 uint32_t n_extern_objs;
1374 uint32_t n_extern_funcs;
1377 uint32_t n_selectors;
1378 uint32_t n_regarrays;
1379 uint32_t n_metarrays;
1383 uint32_t n_instructions;
1391 static struct struct_type *
1392 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1394 struct struct_type *elem;
1396 TAILQ_FOREACH(elem, &p->struct_types, node)
1397 if (strcmp(elem->name, name) == 0)
1403 static struct field *
1404 struct_type_field_find(struct struct_type *st, const char *name)
1408 for (i = 0; i < st->n_fields; i++) {
1409 struct field *f = &st->fields[i];
1411 if (strcmp(f->name, name) == 0)
1419 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1421 struct rte_swx_field_params *fields,
1423 int last_field_has_variable_size)
1425 struct struct_type *st;
1429 CHECK_NAME(name, EINVAL);
1430 CHECK(fields, EINVAL);
1431 CHECK(n_fields, EINVAL);
1433 for (i = 0; i < n_fields; i++) {
1434 struct rte_swx_field_params *f = &fields[i];
1435 int var_size = ((i == n_fields - 1) && last_field_has_variable_size) ? 1 : 0;
1438 CHECK_NAME(f->name, EINVAL);
1439 CHECK(f->n_bits, EINVAL);
1440 CHECK((f->n_bits <= 64) || var_size, EINVAL);
1441 CHECK((f->n_bits & 7) == 0, EINVAL);
1443 for (j = 0; j < i; j++) {
1444 struct rte_swx_field_params *f_prev = &fields[j];
1446 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1450 CHECK(!struct_type_find(p, name), EEXIST);
1452 /* Node allocation. */
1453 st = calloc(1, sizeof(struct struct_type));
1456 st->fields = calloc(n_fields, sizeof(struct field));
1462 /* Node initialization. */
1463 strcpy(st->name, name);
1464 for (i = 0; i < n_fields; i++) {
1465 struct field *dst = &st->fields[i];
1466 struct rte_swx_field_params *src = &fields[i];
1467 int var_size = ((i == n_fields - 1) && last_field_has_variable_size) ? 1 : 0;
1469 strcpy(dst->name, src->name);
1470 dst->n_bits = src->n_bits;
1471 dst->offset = st->n_bits;
1472 dst->var_size = var_size;
1474 st->n_bits += src->n_bits;
1475 st->n_bits_min += var_size ? 0 : src->n_bits;
1477 st->n_fields = n_fields;
1478 st->var_size = last_field_has_variable_size;
1480 /* Node add to tailq. */
1481 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1487 struct_build(struct rte_swx_pipeline *p)
1491 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1492 struct thread *t = &p->threads[i];
1494 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1495 CHECK(t->structs, ENOMEM);
1502 struct_build_free(struct rte_swx_pipeline *p)
1506 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1507 struct thread *t = &p->threads[i];
1515 struct_free(struct rte_swx_pipeline *p)
1517 struct_build_free(p);
1521 struct struct_type *elem;
1523 elem = TAILQ_FIRST(&p->struct_types);
1527 TAILQ_REMOVE(&p->struct_types, elem, node);
1536 static struct port_in_type *
1537 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1539 struct port_in_type *elem;
1544 TAILQ_FOREACH(elem, &p->port_in_types, node)
1545 if (strcmp(elem->name, name) == 0)
1552 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1554 struct rte_swx_port_in_ops *ops)
1556 struct port_in_type *elem;
1559 CHECK_NAME(name, EINVAL);
1561 CHECK(ops->create, EINVAL);
1562 CHECK(ops->free, EINVAL);
1563 CHECK(ops->pkt_rx, EINVAL);
1564 CHECK(ops->stats_read, EINVAL);
1566 CHECK(!port_in_type_find(p, name), EEXIST);
1568 /* Node allocation. */
1569 elem = calloc(1, sizeof(struct port_in_type));
1570 CHECK(elem, ENOMEM);
1572 /* Node initialization. */
1573 strcpy(elem->name, name);
1574 memcpy(&elem->ops, ops, sizeof(*ops));
1576 /* Node add to tailq. */
1577 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1582 static struct port_in *
1583 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1585 struct port_in *port;
1587 TAILQ_FOREACH(port, &p->ports_in, node)
1588 if (port->id == port_id)
1595 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1597 const char *port_type_name,
1600 struct port_in_type *type = NULL;
1601 struct port_in *port = NULL;
1606 CHECK(!port_in_find(p, port_id), EINVAL);
1608 CHECK_NAME(port_type_name, EINVAL);
1609 type = port_in_type_find(p, port_type_name);
1610 CHECK(type, EINVAL);
1612 obj = type->ops.create(args);
1615 /* Node allocation. */
1616 port = calloc(1, sizeof(struct port_in));
1617 CHECK(port, ENOMEM);
1619 /* Node initialization. */
1624 /* Node add to tailq. */
1625 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1626 if (p->n_ports_in < port_id + 1)
1627 p->n_ports_in = port_id + 1;
1633 port_in_build(struct rte_swx_pipeline *p)
1635 struct port_in *port;
1638 CHECK(p->n_ports_in, EINVAL);
1639 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1641 for (i = 0; i < p->n_ports_in; i++)
1642 CHECK(port_in_find(p, i), EINVAL);
1644 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1645 CHECK(p->in, ENOMEM);
1647 TAILQ_FOREACH(port, &p->ports_in, node) {
1648 struct port_in_runtime *in = &p->in[port->id];
1650 in->pkt_rx = port->type->ops.pkt_rx;
1651 in->obj = port->obj;
1658 port_in_build_free(struct rte_swx_pipeline *p)
1665 port_in_free(struct rte_swx_pipeline *p)
1667 port_in_build_free(p);
1671 struct port_in *port;
1673 port = TAILQ_FIRST(&p->ports_in);
1677 TAILQ_REMOVE(&p->ports_in, port, node);
1678 port->type->ops.free(port->obj);
1682 /* Input port types. */
1684 struct port_in_type *elem;
1686 elem = TAILQ_FIRST(&p->port_in_types);
1690 TAILQ_REMOVE(&p->port_in_types, elem, node);
1698 static struct port_out_type *
1699 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1701 struct port_out_type *elem;
1706 TAILQ_FOREACH(elem, &p->port_out_types, node)
1707 if (!strcmp(elem->name, name))
1714 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1716 struct rte_swx_port_out_ops *ops)
1718 struct port_out_type *elem;
1721 CHECK_NAME(name, EINVAL);
1723 CHECK(ops->create, EINVAL);
1724 CHECK(ops->free, EINVAL);
1725 CHECK(ops->pkt_tx, EINVAL);
1726 CHECK(ops->stats_read, EINVAL);
1728 CHECK(!port_out_type_find(p, name), EEXIST);
1730 /* Node allocation. */
1731 elem = calloc(1, sizeof(struct port_out_type));
1732 CHECK(elem, ENOMEM);
1734 /* Node initialization. */
1735 strcpy(elem->name, name);
1736 memcpy(&elem->ops, ops, sizeof(*ops));
1738 /* Node add to tailq. */
1739 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1744 static struct port_out *
1745 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1747 struct port_out *port;
1749 TAILQ_FOREACH(port, &p->ports_out, node)
1750 if (port->id == port_id)
1757 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1759 const char *port_type_name,
1762 struct port_out_type *type = NULL;
1763 struct port_out *port = NULL;
1768 CHECK(!port_out_find(p, port_id), EINVAL);
1770 CHECK_NAME(port_type_name, EINVAL);
1771 type = port_out_type_find(p, port_type_name);
1772 CHECK(type, EINVAL);
1774 obj = type->ops.create(args);
1777 /* Node allocation. */
1778 port = calloc(1, sizeof(struct port_out));
1779 CHECK(port, ENOMEM);
1781 /* Node initialization. */
1786 /* Node add to tailq. */
1787 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1788 if (p->n_ports_out < port_id + 1)
1789 p->n_ports_out = port_id + 1;
1795 port_out_build(struct rte_swx_pipeline *p)
1797 struct port_out *port;
1800 CHECK(p->n_ports_out, EINVAL);
1802 for (i = 0; i < p->n_ports_out; i++)
1803 CHECK(port_out_find(p, i), EINVAL);
1805 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1806 CHECK(p->out, ENOMEM);
1808 TAILQ_FOREACH(port, &p->ports_out, node) {
1809 struct port_out_runtime *out = &p->out[port->id];
1811 out->pkt_tx = port->type->ops.pkt_tx;
1812 out->flush = port->type->ops.flush;
1813 out->obj = port->obj;
1820 port_out_build_free(struct rte_swx_pipeline *p)
1827 port_out_free(struct rte_swx_pipeline *p)
1829 port_out_build_free(p);
1833 struct port_out *port;
1835 port = TAILQ_FIRST(&p->ports_out);
1839 TAILQ_REMOVE(&p->ports_out, port, node);
1840 port->type->ops.free(port->obj);
1844 /* Output port types. */
1846 struct port_out_type *elem;
1848 elem = TAILQ_FIRST(&p->port_out_types);
1852 TAILQ_REMOVE(&p->port_out_types, elem, node);
1860 static struct extern_type *
1861 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1863 struct extern_type *elem;
1865 TAILQ_FOREACH(elem, &p->extern_types, node)
1866 if (strcmp(elem->name, name) == 0)
1872 static struct extern_type_member_func *
1873 extern_type_member_func_find(struct extern_type *type, const char *name)
1875 struct extern_type_member_func *elem;
1877 TAILQ_FOREACH(elem, &type->funcs, node)
1878 if (strcmp(elem->name, name) == 0)
1884 static struct extern_obj *
1885 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1887 struct extern_obj *elem;
1889 TAILQ_FOREACH(elem, &p->extern_objs, node)
1890 if (strcmp(elem->name, name) == 0)
1896 static struct extern_type_member_func *
1897 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1899 struct extern_obj **obj)
1901 struct extern_obj *object;
1902 struct extern_type_member_func *func;
1903 char *object_name, *func_name;
1905 if (name[0] != 'e' || name[1] != '.')
1908 object_name = strdup(&name[2]);
1912 func_name = strchr(object_name, '.');
1921 object = extern_obj_find(p, object_name);
1927 func = extern_type_member_func_find(object->type, func_name);
1940 static struct field *
1941 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1943 struct extern_obj **object)
1945 struct extern_obj *obj;
1947 char *obj_name, *field_name;
1949 if ((name[0] != 'e') || (name[1] != '.'))
1952 obj_name = strdup(&name[2]);
1956 field_name = strchr(obj_name, '.');
1965 obj = extern_obj_find(p, obj_name);
1971 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1985 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1987 const char *mailbox_struct_type_name,
1988 rte_swx_extern_type_constructor_t constructor,
1989 rte_swx_extern_type_destructor_t destructor)
1991 struct extern_type *elem;
1992 struct struct_type *mailbox_struct_type;
1996 CHECK_NAME(name, EINVAL);
1997 CHECK(!extern_type_find(p, name), EEXIST);
1999 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2000 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2001 CHECK(mailbox_struct_type, EINVAL);
2002 CHECK(!mailbox_struct_type->var_size, EINVAL);
2004 CHECK(constructor, EINVAL);
2005 CHECK(destructor, EINVAL);
2007 /* Node allocation. */
2008 elem = calloc(1, sizeof(struct extern_type));
2009 CHECK(elem, ENOMEM);
2011 /* Node initialization. */
2012 strcpy(elem->name, name);
2013 elem->mailbox_struct_type = mailbox_struct_type;
2014 elem->constructor = constructor;
2015 elem->destructor = destructor;
2016 TAILQ_INIT(&elem->funcs);
2018 /* Node add to tailq. */
2019 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
2025 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
2026 const char *extern_type_name,
2028 rte_swx_extern_type_member_func_t member_func)
2030 struct extern_type *type;
2031 struct extern_type_member_func *type_member;
2035 CHECK_NAME(extern_type_name, EINVAL);
2036 type = extern_type_find(p, extern_type_name);
2037 CHECK(type, EINVAL);
2038 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
2040 CHECK_NAME(name, EINVAL);
2041 CHECK(!extern_type_member_func_find(type, name), EEXIST);
2043 CHECK(member_func, EINVAL);
2045 /* Node allocation. */
2046 type_member = calloc(1, sizeof(struct extern_type_member_func));
2047 CHECK(type_member, ENOMEM);
2049 /* Node initialization. */
2050 strcpy(type_member->name, name);
2051 type_member->func = member_func;
2052 type_member->id = type->n_funcs;
2054 /* Node add to tailq. */
2055 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
2062 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
2063 const char *extern_type_name,
2067 struct extern_type *type;
2068 struct extern_obj *obj;
2073 CHECK_NAME(extern_type_name, EINVAL);
2074 type = extern_type_find(p, extern_type_name);
2075 CHECK(type, EINVAL);
2077 CHECK_NAME(name, EINVAL);
2078 CHECK(!extern_obj_find(p, name), EEXIST);
2080 /* Node allocation. */
2081 obj = calloc(1, sizeof(struct extern_obj));
2084 /* Object construction. */
2085 obj_handle = type->constructor(args);
2091 /* Node initialization. */
2092 strcpy(obj->name, name);
2094 obj->obj = obj_handle;
2095 obj->struct_id = p->n_structs;
2096 obj->id = p->n_extern_objs;
2098 /* Node add to tailq. */
2099 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
2107 extern_obj_build(struct rte_swx_pipeline *p)
2111 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2112 struct thread *t = &p->threads[i];
2113 struct extern_obj *obj;
2115 t->extern_objs = calloc(p->n_extern_objs,
2116 sizeof(struct extern_obj_runtime));
2117 CHECK(t->extern_objs, ENOMEM);
2119 TAILQ_FOREACH(obj, &p->extern_objs, node) {
2120 struct extern_obj_runtime *r =
2121 &t->extern_objs[obj->id];
2122 struct extern_type_member_func *func;
2123 uint32_t mailbox_size =
2124 obj->type->mailbox_struct_type->n_bits / 8;
2128 r->mailbox = calloc(1, mailbox_size);
2129 CHECK(r->mailbox, ENOMEM);
2131 TAILQ_FOREACH(func, &obj->type->funcs, node)
2132 r->funcs[func->id] = func->func;
2134 t->structs[obj->struct_id] = r->mailbox;
2142 extern_obj_build_free(struct rte_swx_pipeline *p)
2146 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2147 struct thread *t = &p->threads[i];
2150 if (!t->extern_objs)
2153 for (j = 0; j < p->n_extern_objs; j++) {
2154 struct extern_obj_runtime *r = &t->extern_objs[j];
2159 free(t->extern_objs);
2160 t->extern_objs = NULL;
2165 extern_obj_free(struct rte_swx_pipeline *p)
2167 extern_obj_build_free(p);
2169 /* Extern objects. */
2171 struct extern_obj *elem;
2173 elem = TAILQ_FIRST(&p->extern_objs);
2177 TAILQ_REMOVE(&p->extern_objs, elem, node);
2179 elem->type->destructor(elem->obj);
2185 struct extern_type *elem;
2187 elem = TAILQ_FIRST(&p->extern_types);
2191 TAILQ_REMOVE(&p->extern_types, elem, node);
2194 struct extern_type_member_func *func;
2196 func = TAILQ_FIRST(&elem->funcs);
2200 TAILQ_REMOVE(&elem->funcs, func, node);
2211 static struct extern_func *
2212 extern_func_find(struct rte_swx_pipeline *p, const char *name)
2214 struct extern_func *elem;
2216 TAILQ_FOREACH(elem, &p->extern_funcs, node)
2217 if (strcmp(elem->name, name) == 0)
2223 static struct extern_func *
2224 extern_func_parse(struct rte_swx_pipeline *p,
2227 if (name[0] != 'f' || name[1] != '.')
2230 return extern_func_find(p, &name[2]);
2233 static struct field *
2234 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
2236 struct extern_func **function)
2238 struct extern_func *func;
2240 char *func_name, *field_name;
2242 if ((name[0] != 'f') || (name[1] != '.'))
2245 func_name = strdup(&name[2]);
2249 field_name = strchr(func_name, '.');
2258 func = extern_func_find(p, func_name);
2264 f = struct_type_field_find(func->mailbox_struct_type, field_name);
2278 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
2280 const char *mailbox_struct_type_name,
2281 rte_swx_extern_func_t func)
2283 struct extern_func *f;
2284 struct struct_type *mailbox_struct_type;
2288 CHECK_NAME(name, EINVAL);
2289 CHECK(!extern_func_find(p, name), EEXIST);
2291 CHECK_NAME(mailbox_struct_type_name, EINVAL);
2292 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
2293 CHECK(mailbox_struct_type, EINVAL);
2294 CHECK(!mailbox_struct_type->var_size, EINVAL);
2296 CHECK(func, EINVAL);
2298 /* Node allocation. */
2299 f = calloc(1, sizeof(struct extern_func));
2300 CHECK(func, ENOMEM);
2302 /* Node initialization. */
2303 strcpy(f->name, name);
2304 f->mailbox_struct_type = mailbox_struct_type;
2306 f->struct_id = p->n_structs;
2307 f->id = p->n_extern_funcs;
2309 /* Node add to tailq. */
2310 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
2311 p->n_extern_funcs++;
2318 extern_func_build(struct rte_swx_pipeline *p)
2322 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2323 struct thread *t = &p->threads[i];
2324 struct extern_func *func;
2326 /* Memory allocation. */
2327 t->extern_funcs = calloc(p->n_extern_funcs,
2328 sizeof(struct extern_func_runtime));
2329 CHECK(t->extern_funcs, ENOMEM);
2331 /* Extern function. */
2332 TAILQ_FOREACH(func, &p->extern_funcs, node) {
2333 struct extern_func_runtime *r =
2334 &t->extern_funcs[func->id];
2335 uint32_t mailbox_size =
2336 func->mailbox_struct_type->n_bits / 8;
2338 r->func = func->func;
2340 r->mailbox = calloc(1, mailbox_size);
2341 CHECK(r->mailbox, ENOMEM);
2343 t->structs[func->struct_id] = r->mailbox;
2351 extern_func_build_free(struct rte_swx_pipeline *p)
2355 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2356 struct thread *t = &p->threads[i];
2359 if (!t->extern_funcs)
2362 for (j = 0; j < p->n_extern_funcs; j++) {
2363 struct extern_func_runtime *r = &t->extern_funcs[j];
2368 free(t->extern_funcs);
2369 t->extern_funcs = NULL;
2374 extern_func_free(struct rte_swx_pipeline *p)
2376 extern_func_build_free(p);
2379 struct extern_func *elem;
2381 elem = TAILQ_FIRST(&p->extern_funcs);
2385 TAILQ_REMOVE(&p->extern_funcs, elem, node);
2393 static struct header *
2394 header_find(struct rte_swx_pipeline *p, const char *name)
2396 struct header *elem;
2398 TAILQ_FOREACH(elem, &p->headers, node)
2399 if (strcmp(elem->name, name) == 0)
2405 static struct header *
2406 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id)
2408 struct header *elem;
2410 TAILQ_FOREACH(elem, &p->headers, node)
2411 if (elem->struct_id == struct_id)
2417 static struct header *
2418 header_parse(struct rte_swx_pipeline *p,
2421 if (name[0] != 'h' || name[1] != '.')
2424 return header_find(p, &name[2]);
2427 static struct field *
2428 header_field_parse(struct rte_swx_pipeline *p,
2430 struct header **header)
2434 char *header_name, *field_name;
2436 if ((name[0] != 'h') || (name[1] != '.'))
2439 header_name = strdup(&name[2]);
2443 field_name = strchr(header_name, '.');
2452 h = header_find(p, header_name);
2458 f = struct_type_field_find(h->st, field_name);
2472 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2474 const char *struct_type_name)
2476 struct struct_type *st;
2478 size_t n_headers_max;
2481 CHECK_NAME(name, EINVAL);
2482 CHECK_NAME(struct_type_name, EINVAL);
2484 CHECK(!header_find(p, name), EEXIST);
2486 st = struct_type_find(p, struct_type_name);
2489 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2490 CHECK(p->n_headers < n_headers_max, ENOSPC);
2492 /* Node allocation. */
2493 h = calloc(1, sizeof(struct header));
2496 /* Node initialization. */
2497 strcpy(h->name, name);
2499 h->struct_id = p->n_structs;
2500 h->id = p->n_headers;
2502 /* Node add to tailq. */
2503 TAILQ_INSERT_TAIL(&p->headers, h, node);
2511 header_build(struct rte_swx_pipeline *p)
2514 uint32_t n_bytes = 0, i;
2516 TAILQ_FOREACH(h, &p->headers, node) {
2517 n_bytes += h->st->n_bits / 8;
2520 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2521 struct thread *t = &p->threads[i];
2522 uint32_t offset = 0;
2524 t->headers = calloc(p->n_headers,
2525 sizeof(struct header_runtime));
2526 CHECK(t->headers, ENOMEM);
2528 t->headers_out = calloc(p->n_headers,
2529 sizeof(struct header_out_runtime));
2530 CHECK(t->headers_out, ENOMEM);
2532 t->header_storage = calloc(1, n_bytes);
2533 CHECK(t->header_storage, ENOMEM);
2535 t->header_out_storage = calloc(1, n_bytes);
2536 CHECK(t->header_out_storage, ENOMEM);
2538 TAILQ_FOREACH(h, &p->headers, node) {
2539 uint8_t *header_storage;
2540 uint32_t n_bytes = h->st->n_bits / 8;
2542 header_storage = &t->header_storage[offset];
2545 t->headers[h->id].ptr0 = header_storage;
2546 t->headers[h->id].n_bytes = n_bytes;
2548 t->structs[h->struct_id] = header_storage;
2556 header_build_free(struct rte_swx_pipeline *p)
2560 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2561 struct thread *t = &p->threads[i];
2563 free(t->headers_out);
2564 t->headers_out = NULL;
2569 free(t->header_out_storage);
2570 t->header_out_storage = NULL;
2572 free(t->header_storage);
2573 t->header_storage = NULL;
2578 header_free(struct rte_swx_pipeline *p)
2580 header_build_free(p);
2583 struct header *elem;
2585 elem = TAILQ_FIRST(&p->headers);
2589 TAILQ_REMOVE(&p->headers, elem, node);
2597 static struct field *
2598 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2600 if (!p->metadata_st)
2603 if (name[0] != 'm' || name[1] != '.')
2606 return struct_type_field_find(p->metadata_st, &name[2]);
2610 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2611 const char *struct_type_name)
2613 struct struct_type *st = NULL;
2617 CHECK_NAME(struct_type_name, EINVAL);
2618 st = struct_type_find(p, struct_type_name);
2620 CHECK(!st->var_size, EINVAL);
2621 CHECK(!p->metadata_st, EINVAL);
2623 p->metadata_st = st;
2624 p->metadata_struct_id = p->n_structs;
2632 metadata_build(struct rte_swx_pipeline *p)
2634 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2637 /* Thread-level initialization. */
2638 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2639 struct thread *t = &p->threads[i];
2642 metadata = calloc(1, n_bytes);
2643 CHECK(metadata, ENOMEM);
2645 t->metadata = metadata;
2646 t->structs[p->metadata_struct_id] = metadata;
2653 metadata_build_free(struct rte_swx_pipeline *p)
2657 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2658 struct thread *t = &p->threads[i];
2666 metadata_free(struct rte_swx_pipeline *p)
2668 metadata_build_free(p);
2675 instruction_is_tx(enum instruction_type type)
2688 instruction_is_jmp(struct instruction *instr)
2690 switch (instr->type) {
2692 case INSTR_JMP_VALID:
2693 case INSTR_JMP_INVALID:
2695 case INSTR_JMP_MISS:
2696 case INSTR_JMP_ACTION_HIT:
2697 case INSTR_JMP_ACTION_MISS:
2699 case INSTR_JMP_EQ_MH:
2700 case INSTR_JMP_EQ_HM:
2701 case INSTR_JMP_EQ_HH:
2702 case INSTR_JMP_EQ_I:
2704 case INSTR_JMP_NEQ_MH:
2705 case INSTR_JMP_NEQ_HM:
2706 case INSTR_JMP_NEQ_HH:
2707 case INSTR_JMP_NEQ_I:
2709 case INSTR_JMP_LT_MH:
2710 case INSTR_JMP_LT_HM:
2711 case INSTR_JMP_LT_HH:
2712 case INSTR_JMP_LT_MI:
2713 case INSTR_JMP_LT_HI:
2715 case INSTR_JMP_GT_MH:
2716 case INSTR_JMP_GT_HM:
2717 case INSTR_JMP_GT_HH:
2718 case INSTR_JMP_GT_MI:
2719 case INSTR_JMP_GT_HI:
2727 static struct field *
2728 action_field_parse(struct action *action, const char *name);
2730 static struct field *
2731 struct_field_parse(struct rte_swx_pipeline *p,
2732 struct action *action,
2734 uint32_t *struct_id)
2741 struct header *header;
2743 f = header_field_parse(p, name, &header);
2747 *struct_id = header->struct_id;
2753 f = metadata_field_parse(p, name);
2757 *struct_id = p->metadata_struct_id;
2766 f = action_field_parse(action, name);
2776 struct extern_obj *obj;
2778 f = extern_obj_mailbox_field_parse(p, name, &obj);
2782 *struct_id = obj->struct_id;
2788 struct extern_func *func;
2790 f = extern_func_mailbox_field_parse(p, name, &func);
2794 *struct_id = func->struct_id;
2804 pipeline_port_inc(struct rte_swx_pipeline *p)
2806 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2810 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2812 t->ip = p->instructions;
2816 thread_ip_set(struct thread *t, struct instruction *ip)
2822 thread_ip_action_call(struct rte_swx_pipeline *p,
2827 t->ip = p->action_instructions[action_id];
2831 thread_ip_inc(struct rte_swx_pipeline *p);
2834 thread_ip_inc(struct rte_swx_pipeline *p)
2836 struct thread *t = &p->threads[p->thread_id];
2842 thread_ip_inc_cond(struct thread *t, int cond)
2848 thread_yield(struct rte_swx_pipeline *p)
2850 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2854 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2856 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2863 instr_rx_translate(struct rte_swx_pipeline *p,
2864 struct action *action,
2867 struct instruction *instr,
2868 struct instruction_data *data __rte_unused)
2872 CHECK(!action, EINVAL);
2873 CHECK(n_tokens == 2, EINVAL);
2875 f = metadata_field_parse(p, tokens[1]);
2878 instr->type = INSTR_RX;
2879 instr->io.io.offset = f->offset / 8;
2880 instr->io.io.n_bits = f->n_bits;
2885 instr_rx_exec(struct rte_swx_pipeline *p);
2888 instr_rx_exec(struct rte_swx_pipeline *p)
2890 struct thread *t = &p->threads[p->thread_id];
2891 struct instruction *ip = t->ip;
2892 struct port_in_runtime *port = &p->in[p->port_id];
2893 struct rte_swx_pkt *pkt = &t->pkt;
2897 pkt_received = port->pkt_rx(port->obj, pkt);
2898 t->ptr = &pkt->pkt[pkt->offset];
2899 rte_prefetch0(t->ptr);
2901 TRACE("[Thread %2u] rx %s from port %u\n",
2903 pkt_received ? "1 pkt" : "0 pkts",
2907 t->valid_headers = 0;
2908 t->n_headers_out = 0;
2911 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2914 t->table_state = p->table_state;
2917 pipeline_port_inc(p);
2918 thread_ip_inc_cond(t, pkt_received);
2926 instr_tx_translate(struct rte_swx_pipeline *p,
2927 struct action *action __rte_unused,
2930 struct instruction *instr,
2931 struct instruction_data *data __rte_unused)
2933 char *port = tokens[1];
2937 CHECK(n_tokens == 2, EINVAL);
2939 f = metadata_field_parse(p, port);
2941 instr->type = INSTR_TX;
2942 instr->io.io.offset = f->offset / 8;
2943 instr->io.io.n_bits = f->n_bits;
2948 port_val = strtoul(port, &port, 0);
2949 CHECK(!port[0], EINVAL);
2951 instr->type = INSTR_TX_I;
2952 instr->io.io.val = port_val;
2957 instr_drop_translate(struct rte_swx_pipeline *p,
2958 struct action *action __rte_unused,
2959 char **tokens __rte_unused,
2961 struct instruction *instr,
2962 struct instruction_data *data __rte_unused)
2964 CHECK(n_tokens == 1, EINVAL);
2967 instr->type = INSTR_TX_I;
2968 instr->io.io.val = p->n_ports_out - 1;
2973 emit_handler(struct thread *t)
2975 struct header_out_runtime *h0 = &t->headers_out[0];
2976 struct header_out_runtime *h1 = &t->headers_out[1];
2977 uint32_t offset = 0, i;
2979 /* No header change or header decapsulation. */
2980 if ((t->n_headers_out == 1) &&
2981 (h0->ptr + h0->n_bytes == t->ptr)) {
2982 TRACE("Emit handler: no header change or header decap.\n");
2984 t->pkt.offset -= h0->n_bytes;
2985 t->pkt.length += h0->n_bytes;
2990 /* Header encapsulation (optionally, with prior header decasulation). */
2991 if ((t->n_headers_out == 2) &&
2992 (h1->ptr + h1->n_bytes == t->ptr) &&
2993 (h0->ptr == h0->ptr0)) {
2996 TRACE("Emit handler: header encapsulation.\n");
2998 offset = h0->n_bytes + h1->n_bytes;
2999 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
3000 t->pkt.offset -= offset;
3001 t->pkt.length += offset;
3006 /* Header insertion. */
3009 /* Header extraction. */
3012 /* For any other case. */
3013 TRACE("Emit handler: complex case.\n");
3015 for (i = 0; i < t->n_headers_out; i++) {
3016 struct header_out_runtime *h = &t->headers_out[i];
3018 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
3019 offset += h->n_bytes;
3023 memcpy(t->ptr - offset, t->header_out_storage, offset);
3024 t->pkt.offset -= offset;
3025 t->pkt.length += offset;
3030 instr_tx_exec(struct rte_swx_pipeline *p);
3033 instr_tx_exec(struct rte_swx_pipeline *p)
3035 struct thread *t = &p->threads[p->thread_id];
3036 struct instruction *ip = t->ip;
3037 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
3038 struct port_out_runtime *port = &p->out[port_id];
3039 struct rte_swx_pkt *pkt = &t->pkt;
3041 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
3049 port->pkt_tx(port->obj, pkt);
3052 thread_ip_reset(p, t);
3057 instr_tx_i_exec(struct rte_swx_pipeline *p)
3059 struct thread *t = &p->threads[p->thread_id];
3060 struct instruction *ip = t->ip;
3061 uint64_t port_id = ip->io.io.val;
3062 struct port_out_runtime *port = &p->out[port_id];
3063 struct rte_swx_pkt *pkt = &t->pkt;
3065 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
3073 port->pkt_tx(port->obj, pkt);
3076 thread_ip_reset(p, t);
3084 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
3085 struct action *action,
3088 struct instruction *instr,
3089 struct instruction_data *data __rte_unused)
3093 CHECK(!action, EINVAL);
3094 CHECK((n_tokens == 2) || (n_tokens == 3), EINVAL);
3096 h = header_parse(p, tokens[1]);
3099 if (n_tokens == 2) {
3100 CHECK(!h->st->var_size, EINVAL);
3102 instr->type = INSTR_HDR_EXTRACT;
3103 instr->io.hdr.header_id[0] = h->id;
3104 instr->io.hdr.struct_id[0] = h->struct_id;
3105 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3109 CHECK(h->st->var_size, EINVAL);
3111 mf = metadata_field_parse(p, tokens[2]);
3113 CHECK(!mf->var_size, EINVAL);
3115 instr->type = INSTR_HDR_EXTRACT_M;
3116 instr->io.io.offset = mf->offset / 8;
3117 instr->io.io.n_bits = mf->n_bits;
3118 instr->io.hdr.header_id[0] = h->id;
3119 instr->io.hdr.struct_id[0] = h->struct_id;
3120 instr->io.hdr.n_bytes[0] = h->st->n_bits_min / 8;
3127 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
3130 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
3132 struct thread *t = &p->threads[p->thread_id];
3133 struct instruction *ip = t->ip;
3134 uint64_t valid_headers = t->valid_headers;
3135 uint8_t *ptr = t->ptr;
3136 uint32_t offset = t->pkt.offset;
3137 uint32_t length = t->pkt.length;
3140 for (i = 0; i < n_extract; i++) {
3141 uint32_t header_id = ip->io.hdr.header_id[i];
3142 uint32_t struct_id = ip->io.hdr.struct_id[i];
3143 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
3145 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
3151 t->structs[struct_id] = ptr;
3152 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3161 t->valid_headers = valid_headers;
3164 t->pkt.offset = offset;
3165 t->pkt.length = length;
3170 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
3172 __instr_hdr_extract_exec(p, 1);
3179 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
3181 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3184 __instr_hdr_extract_exec(p, 2);
3191 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
3193 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3196 __instr_hdr_extract_exec(p, 3);
3203 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
3205 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3208 __instr_hdr_extract_exec(p, 4);
3215 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
3217 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3220 __instr_hdr_extract_exec(p, 5);
3227 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
3229 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3232 __instr_hdr_extract_exec(p, 6);
3239 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
3241 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3244 __instr_hdr_extract_exec(p, 7);
3251 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
3253 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3256 __instr_hdr_extract_exec(p, 8);
3263 instr_hdr_extract_m_exec(struct rte_swx_pipeline *p)
3265 struct thread *t = &p->threads[p->thread_id];
3266 struct instruction *ip = t->ip;
3268 uint64_t valid_headers = t->valid_headers;
3269 uint8_t *ptr = t->ptr;
3270 uint32_t offset = t->pkt.offset;
3271 uint32_t length = t->pkt.length;
3273 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
3274 uint32_t header_id = ip->io.hdr.header_id[0];
3275 uint32_t struct_id = ip->io.hdr.struct_id[0];
3276 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
3278 struct header_runtime *h = &t->headers[header_id];
3280 TRACE("[Thread %2u]: extract header %u (%u + %u bytes)\n",
3286 n_bytes += n_bytes_last;
3289 t->structs[struct_id] = ptr;
3290 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3291 h->n_bytes = n_bytes;
3294 t->pkt.offset = offset + n_bytes;
3295 t->pkt.length = length - n_bytes;
3296 t->ptr = ptr + n_bytes;
3306 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
3307 struct action *action __rte_unused,
3310 struct instruction *instr,
3311 struct instruction_data *data __rte_unused)
3315 CHECK(n_tokens == 2, EINVAL);
3317 h = header_parse(p, tokens[1]);
3320 instr->type = INSTR_HDR_EMIT;
3321 instr->io.hdr.header_id[0] = h->id;
3322 instr->io.hdr.struct_id[0] = h->struct_id;
3323 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
3328 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
3331 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
3333 struct thread *t = &p->threads[p->thread_id];
3334 struct instruction *ip = t->ip;
3335 uint64_t valid_headers = t->valid_headers;
3336 uint32_t n_headers_out = t->n_headers_out;
3337 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
3338 uint8_t *ho_ptr = NULL;
3339 uint32_t ho_nbytes = 0, first = 1, i;
3341 for (i = 0; i < n_emit; i++) {
3342 uint32_t header_id = ip->io.hdr.header_id[i];
3343 uint32_t struct_id = ip->io.hdr.struct_id[i];
3345 struct header_runtime *hi = &t->headers[header_id];
3346 uint8_t *hi_ptr0 = hi->ptr0;
3347 uint32_t n_bytes = hi->n_bytes;
3349 uint8_t *hi_ptr = t->structs[struct_id];
3351 if (!MASK64_BIT_GET(valid_headers, header_id))
3354 TRACE("[Thread %2u]: emit header %u\n",
3362 if (!t->n_headers_out) {
3363 ho = &t->headers_out[0];
3369 ho_nbytes = n_bytes;
3376 ho_nbytes = ho->n_bytes;
3380 if (ho_ptr + ho_nbytes == hi_ptr) {
3381 ho_nbytes += n_bytes;
3383 ho->n_bytes = ho_nbytes;
3390 ho_nbytes = n_bytes;
3396 ho->n_bytes = ho_nbytes;
3397 t->n_headers_out = n_headers_out;
3401 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
3403 __instr_hdr_emit_exec(p, 1);
3410 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
3412 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3415 __instr_hdr_emit_exec(p, 1);
3420 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
3422 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3425 __instr_hdr_emit_exec(p, 2);
3430 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
3432 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3435 __instr_hdr_emit_exec(p, 3);
3440 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
3442 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3445 __instr_hdr_emit_exec(p, 4);
3450 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
3452 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3455 __instr_hdr_emit_exec(p, 5);
3460 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
3462 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3465 __instr_hdr_emit_exec(p, 6);
3470 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
3472 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3475 __instr_hdr_emit_exec(p, 7);
3480 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
3482 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
3485 __instr_hdr_emit_exec(p, 8);
3493 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
3494 struct action *action __rte_unused,
3497 struct instruction *instr,
3498 struct instruction_data *data __rte_unused)
3502 CHECK(n_tokens == 2, EINVAL);
3504 h = header_parse(p, tokens[1]);
3507 instr->type = INSTR_HDR_VALIDATE;
3508 instr->valid.header_id = h->id;
3513 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
3515 struct thread *t = &p->threads[p->thread_id];
3516 struct instruction *ip = t->ip;
3517 uint32_t header_id = ip->valid.header_id;
3519 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
3522 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
3532 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
3533 struct action *action __rte_unused,
3536 struct instruction *instr,
3537 struct instruction_data *data __rte_unused)
3541 CHECK(n_tokens == 2, EINVAL);
3543 h = header_parse(p, tokens[1]);
3546 instr->type = INSTR_HDR_INVALIDATE;
3547 instr->valid.header_id = h->id;
3552 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3554 struct thread *t = &p->threads[p->thread_id];
3555 struct instruction *ip = t->ip;
3556 uint32_t header_id = ip->valid.header_id;
3558 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3561 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3570 static struct table *
3571 table_find(struct rte_swx_pipeline *p, const char *name);
3573 static struct selector *
3574 selector_find(struct rte_swx_pipeline *p, const char *name);
3577 instr_table_translate(struct rte_swx_pipeline *p,
3578 struct action *action,
3581 struct instruction *instr,
3582 struct instruction_data *data __rte_unused)
3587 CHECK(!action, EINVAL);
3588 CHECK(n_tokens == 2, EINVAL);
3590 t = table_find(p, tokens[1]);
3592 instr->type = INSTR_TABLE;
3593 instr->table.table_id = t->id;
3597 s = selector_find(p, tokens[1]);
3599 instr->type = INSTR_SELECTOR;
3600 instr->table.table_id = s->id;
3608 instr_table_exec(struct rte_swx_pipeline *p)
3610 struct thread *t = &p->threads[p->thread_id];
3611 struct instruction *ip = t->ip;
3612 uint32_t table_id = ip->table.table_id;
3613 struct rte_swx_table_state *ts = &t->table_state[table_id];
3614 struct table_runtime *table = &t->tables[table_id];
3615 struct table_statistics *stats = &p->table_stats[table_id];
3616 uint64_t action_id, n_pkts_hit, n_pkts_action;
3617 uint8_t *action_data;
3621 done = table->func(ts->obj,
3629 TRACE("[Thread %2u] table %u (not finalized)\n",
3637 action_id = hit ? action_id : ts->default_action_id;
3638 action_data = hit ? action_data : ts->default_action_data;
3639 n_pkts_hit = stats->n_pkts_hit[hit];
3640 n_pkts_action = stats->n_pkts_action[action_id];
3642 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3645 hit ? "hit" : "miss",
3646 (uint32_t)action_id);
3648 t->action_id = action_id;
3649 t->structs[0] = action_data;
3651 stats->n_pkts_hit[hit] = n_pkts_hit + 1;
3652 stats->n_pkts_action[action_id] = n_pkts_action + 1;
3655 thread_ip_action_call(p, t, action_id);
3659 instr_selector_exec(struct rte_swx_pipeline *p)
3661 struct thread *t = &p->threads[p->thread_id];
3662 struct instruction *ip = t->ip;
3663 uint32_t selector_id = ip->table.table_id;
3664 struct rte_swx_table_state *ts = &t->table_state[p->n_tables + selector_id];
3665 struct selector_runtime *selector = &t->selectors[selector_id];
3666 struct selector_statistics *stats = &p->selector_stats[selector_id];
3667 uint64_t n_pkts = stats->n_pkts;
3671 done = rte_swx_table_selector_select(ts->obj,
3673 selector->group_id_buffer,
3674 selector->selector_buffer,
3675 selector->member_id_buffer);
3678 TRACE("[Thread %2u] selector %u (not finalized)\n",
3687 TRACE("[Thread %2u] selector %u\n",
3691 stats->n_pkts = n_pkts + 1;
3701 instr_extern_translate(struct rte_swx_pipeline *p,
3702 struct action *action __rte_unused,
3705 struct instruction *instr,
3706 struct instruction_data *data __rte_unused)
3708 char *token = tokens[1];
3710 CHECK(n_tokens == 2, EINVAL);
3712 if (token[0] == 'e') {
3713 struct extern_obj *obj;
3714 struct extern_type_member_func *func;
3716 func = extern_obj_member_func_parse(p, token, &obj);
3717 CHECK(func, EINVAL);
3719 instr->type = INSTR_EXTERN_OBJ;
3720 instr->ext_obj.ext_obj_id = obj->id;
3721 instr->ext_obj.func_id = func->id;
3726 if (token[0] == 'f') {
3727 struct extern_func *func;
3729 func = extern_func_parse(p, token);
3730 CHECK(func, EINVAL);
3732 instr->type = INSTR_EXTERN_FUNC;
3733 instr->ext_func.ext_func_id = func->id;
3742 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3744 struct thread *t = &p->threads[p->thread_id];
3745 struct instruction *ip = t->ip;
3746 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3747 uint32_t func_id = ip->ext_obj.func_id;
3748 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3749 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3751 TRACE("[Thread %2u] extern obj %u member func %u\n",
3756 /* Extern object member function execute. */
3757 uint32_t done = func(obj->obj, obj->mailbox);
3760 thread_ip_inc_cond(t, done);
3761 thread_yield_cond(p, done ^ 1);
3765 instr_extern_func_exec(struct rte_swx_pipeline *p)
3767 struct thread *t = &p->threads[p->thread_id];
3768 struct instruction *ip = t->ip;
3769 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3770 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3771 rte_swx_extern_func_t func = ext_func->func;
3773 TRACE("[Thread %2u] extern func %u\n",
3777 /* Extern function execute. */
3778 uint32_t done = func(ext_func->mailbox);
3781 thread_ip_inc_cond(t, done);
3782 thread_yield_cond(p, done ^ 1);
3789 instr_mov_translate(struct rte_swx_pipeline *p,
3790 struct action *action,
3793 struct instruction *instr,
3794 struct instruction_data *data __rte_unused)
3796 char *dst = tokens[1], *src = tokens[2];
3797 struct field *fdst, *fsrc;
3799 uint32_t dst_struct_id = 0, src_struct_id = 0;
3801 CHECK(n_tokens == 3, EINVAL);
3803 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3804 CHECK(fdst, EINVAL);
3805 CHECK(!fdst->var_size, EINVAL);
3807 /* MOV, MOV_MH, MOV_HM or MOV_HH. */
3808 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3810 CHECK(!fsrc->var_size, EINVAL);
3812 instr->type = INSTR_MOV;
3813 if (dst[0] != 'h' && src[0] == 'h')
3814 instr->type = INSTR_MOV_MH;
3815 if (dst[0] == 'h' && src[0] != 'h')
3816 instr->type = INSTR_MOV_HM;
3817 if (dst[0] == 'h' && src[0] == 'h')
3818 instr->type = INSTR_MOV_HH;
3820 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3821 instr->mov.dst.n_bits = fdst->n_bits;
3822 instr->mov.dst.offset = fdst->offset / 8;
3823 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3824 instr->mov.src.n_bits = fsrc->n_bits;
3825 instr->mov.src.offset = fsrc->offset / 8;
3830 src_val = strtoull(src, &src, 0);
3831 CHECK(!src[0], EINVAL);
3834 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3836 instr->type = INSTR_MOV_I;
3837 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3838 instr->mov.dst.n_bits = fdst->n_bits;
3839 instr->mov.dst.offset = fdst->offset / 8;
3840 instr->mov.src_val = src_val;
3845 instr_mov_exec(struct rte_swx_pipeline *p)
3847 struct thread *t = &p->threads[p->thread_id];
3848 struct instruction *ip = t->ip;
3850 TRACE("[Thread %2u] mov\n",
3860 instr_mov_mh_exec(struct rte_swx_pipeline *p)
3862 struct thread *t = &p->threads[p->thread_id];
3863 struct instruction *ip = t->ip;
3865 TRACE("[Thread %2u] mov (mh)\n",
3875 instr_mov_hm_exec(struct rte_swx_pipeline *p)
3877 struct thread *t = &p->threads[p->thread_id];
3878 struct instruction *ip = t->ip;
3880 TRACE("[Thread %2u] mov (hm)\n",
3890 instr_mov_hh_exec(struct rte_swx_pipeline *p)
3892 struct thread *t = &p->threads[p->thread_id];
3893 struct instruction *ip = t->ip;
3895 TRACE("[Thread %2u] mov (hh)\n",
3905 instr_mov_i_exec(struct rte_swx_pipeline *p)
3907 struct thread *t = &p->threads[p->thread_id];
3908 struct instruction *ip = t->ip;
3910 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3924 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3927 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3929 struct thread *t = &p->threads[p->thread_id];
3930 struct instruction *ip = t->ip;
3931 uint8_t *action_data = t->structs[0];
3932 uint64_t valid_headers = t->valid_headers;
3935 for (i = 0; i < n_dma; i++) {
3936 uint32_t header_id = ip->dma.dst.header_id[i];
3937 uint32_t struct_id = ip->dma.dst.struct_id[i];
3938 uint32_t offset = ip->dma.src.offset[i];
3939 uint32_t n_bytes = ip->dma.n_bytes[i];
3941 struct header_runtime *h = &t->headers[header_id];
3942 uint8_t *h_ptr0 = h->ptr0;
3943 uint8_t *h_ptr = t->structs[struct_id];
3945 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3947 void *src = &action_data[offset];
3949 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3952 memcpy(dst, src, n_bytes);
3953 t->structs[struct_id] = dst;
3954 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3957 t->valid_headers = valid_headers;
3961 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3963 __instr_dma_ht_exec(p, 1);
3970 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3972 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3975 __instr_dma_ht_exec(p, 2);
3982 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3984 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3987 __instr_dma_ht_exec(p, 3);
3994 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3996 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3999 __instr_dma_ht_exec(p, 4);
4006 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
4008 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
4011 __instr_dma_ht_exec(p, 5);
4018 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
4020 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
4023 __instr_dma_ht_exec(p, 6);
4030 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
4032 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
4035 __instr_dma_ht_exec(p, 7);
4042 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
4044 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
4047 __instr_dma_ht_exec(p, 8);
4057 instr_alu_add_translate(struct rte_swx_pipeline *p,
4058 struct action *action,
4061 struct instruction *instr,
4062 struct instruction_data *data __rte_unused)
4064 char *dst = tokens[1], *src = tokens[2];
4065 struct field *fdst, *fsrc;
4067 uint32_t dst_struct_id = 0, src_struct_id = 0;
4069 CHECK(n_tokens == 3, EINVAL);
4071 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4072 CHECK(fdst, EINVAL);
4073 CHECK(!fdst->var_size, EINVAL);
4075 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
4076 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4078 CHECK(!fsrc->var_size, EINVAL);
4080 instr->type = INSTR_ALU_ADD;
4081 if (dst[0] == 'h' && src[0] != 'h')
4082 instr->type = INSTR_ALU_ADD_HM;
4083 if (dst[0] != 'h' && src[0] == 'h')
4084 instr->type = INSTR_ALU_ADD_MH;
4085 if (dst[0] == 'h' && src[0] == 'h')
4086 instr->type = INSTR_ALU_ADD_HH;
4088 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4089 instr->alu.dst.n_bits = fdst->n_bits;
4090 instr->alu.dst.offset = fdst->offset / 8;
4091 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4092 instr->alu.src.n_bits = fsrc->n_bits;
4093 instr->alu.src.offset = fsrc->offset / 8;
4097 /* ADD_MI, ADD_HI. */
4098 src_val = strtoull(src, &src, 0);
4099 CHECK(!src[0], EINVAL);
4101 instr->type = INSTR_ALU_ADD_MI;
4103 instr->type = INSTR_ALU_ADD_HI;
4105 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4106 instr->alu.dst.n_bits = fdst->n_bits;
4107 instr->alu.dst.offset = fdst->offset / 8;
4108 instr->alu.src_val = src_val;
4113 instr_alu_sub_translate(struct rte_swx_pipeline *p,
4114 struct action *action,
4117 struct instruction *instr,
4118 struct instruction_data *data __rte_unused)
4120 char *dst = tokens[1], *src = tokens[2];
4121 struct field *fdst, *fsrc;
4123 uint32_t dst_struct_id = 0, src_struct_id = 0;
4125 CHECK(n_tokens == 3, EINVAL);
4127 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4128 CHECK(fdst, EINVAL);
4129 CHECK(!fdst->var_size, EINVAL);
4131 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
4132 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4134 CHECK(!fsrc->var_size, EINVAL);
4136 instr->type = INSTR_ALU_SUB;
4137 if (dst[0] == 'h' && src[0] != 'h')
4138 instr->type = INSTR_ALU_SUB_HM;
4139 if (dst[0] != 'h' && src[0] == 'h')
4140 instr->type = INSTR_ALU_SUB_MH;
4141 if (dst[0] == 'h' && src[0] == 'h')
4142 instr->type = INSTR_ALU_SUB_HH;
4144 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4145 instr->alu.dst.n_bits = fdst->n_bits;
4146 instr->alu.dst.offset = fdst->offset / 8;
4147 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4148 instr->alu.src.n_bits = fsrc->n_bits;
4149 instr->alu.src.offset = fsrc->offset / 8;
4153 /* SUB_MI, SUB_HI. */
4154 src_val = strtoull(src, &src, 0);
4155 CHECK(!src[0], EINVAL);
4157 instr->type = INSTR_ALU_SUB_MI;
4159 instr->type = INSTR_ALU_SUB_HI;
4161 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4162 instr->alu.dst.n_bits = fdst->n_bits;
4163 instr->alu.dst.offset = fdst->offset / 8;
4164 instr->alu.src_val = src_val;
4169 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
4170 struct action *action __rte_unused,
4173 struct instruction *instr,
4174 struct instruction_data *data __rte_unused)
4176 char *dst = tokens[1], *src = tokens[2];
4177 struct header *hdst, *hsrc;
4178 struct field *fdst, *fsrc;
4180 CHECK(n_tokens == 3, EINVAL);
4182 fdst = header_field_parse(p, dst, &hdst);
4183 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4184 CHECK(!fdst->var_size, EINVAL);
4187 fsrc = header_field_parse(p, src, &hsrc);
4189 CHECK(!fsrc->var_size, EINVAL);
4191 instr->type = INSTR_ALU_CKADD_FIELD;
4192 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4193 instr->alu.dst.n_bits = fdst->n_bits;
4194 instr->alu.dst.offset = fdst->offset / 8;
4195 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4196 instr->alu.src.n_bits = fsrc->n_bits;
4197 instr->alu.src.offset = fsrc->offset / 8;
4201 /* CKADD_STRUCT, CKADD_STRUCT20. */
4202 hsrc = header_parse(p, src);
4203 CHECK(hsrc, EINVAL);
4204 CHECK(!hsrc->st->var_size, EINVAL);
4206 instr->type = INSTR_ALU_CKADD_STRUCT;
4207 if ((hsrc->st->n_bits / 8) == 20)
4208 instr->type = INSTR_ALU_CKADD_STRUCT20;
4210 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4211 instr->alu.dst.n_bits = fdst->n_bits;
4212 instr->alu.dst.offset = fdst->offset / 8;
4213 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4214 instr->alu.src.n_bits = hsrc->st->n_bits;
4215 instr->alu.src.offset = 0; /* Unused. */
4220 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
4221 struct action *action __rte_unused,
4224 struct instruction *instr,
4225 struct instruction_data *data __rte_unused)
4227 char *dst = tokens[1], *src = tokens[2];
4228 struct header *hdst, *hsrc;
4229 struct field *fdst, *fsrc;
4231 CHECK(n_tokens == 3, EINVAL);
4233 fdst = header_field_parse(p, dst, &hdst);
4234 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
4235 CHECK(!fdst->var_size, EINVAL);
4237 fsrc = header_field_parse(p, src, &hsrc);
4238 CHECK(fsrc, EINVAL);
4239 CHECK(!fsrc->var_size, EINVAL);
4241 instr->type = INSTR_ALU_CKSUB_FIELD;
4242 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
4243 instr->alu.dst.n_bits = fdst->n_bits;
4244 instr->alu.dst.offset = fdst->offset / 8;
4245 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
4246 instr->alu.src.n_bits = fsrc->n_bits;
4247 instr->alu.src.offset = fsrc->offset / 8;
4252 instr_alu_shl_translate(struct rte_swx_pipeline *p,
4253 struct action *action,
4256 struct instruction *instr,
4257 struct instruction_data *data __rte_unused)
4259 char *dst = tokens[1], *src = tokens[2];
4260 struct field *fdst, *fsrc;
4262 uint32_t dst_struct_id = 0, src_struct_id = 0;
4264 CHECK(n_tokens == 3, EINVAL);
4266 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4267 CHECK(fdst, EINVAL);
4268 CHECK(!fdst->var_size, EINVAL);
4270 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
4271 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4273 CHECK(!fsrc->var_size, EINVAL);
4275 instr->type = INSTR_ALU_SHL;
4276 if (dst[0] == 'h' && src[0] != 'h')
4277 instr->type = INSTR_ALU_SHL_HM;
4278 if (dst[0] != 'h' && src[0] == 'h')
4279 instr->type = INSTR_ALU_SHL_MH;
4280 if (dst[0] == 'h' && src[0] == 'h')
4281 instr->type = INSTR_ALU_SHL_HH;
4283 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4284 instr->alu.dst.n_bits = fdst->n_bits;
4285 instr->alu.dst.offset = fdst->offset / 8;
4286 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4287 instr->alu.src.n_bits = fsrc->n_bits;
4288 instr->alu.src.offset = fsrc->offset / 8;
4292 /* SHL_MI, SHL_HI. */
4293 src_val = strtoull(src, &src, 0);
4294 CHECK(!src[0], EINVAL);
4296 instr->type = INSTR_ALU_SHL_MI;
4298 instr->type = INSTR_ALU_SHL_HI;
4300 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4301 instr->alu.dst.n_bits = fdst->n_bits;
4302 instr->alu.dst.offset = fdst->offset / 8;
4303 instr->alu.src_val = src_val;
4308 instr_alu_shr_translate(struct rte_swx_pipeline *p,
4309 struct action *action,
4312 struct instruction *instr,
4313 struct instruction_data *data __rte_unused)
4315 char *dst = tokens[1], *src = tokens[2];
4316 struct field *fdst, *fsrc;
4318 uint32_t dst_struct_id = 0, src_struct_id = 0;
4320 CHECK(n_tokens == 3, EINVAL);
4322 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4323 CHECK(fdst, EINVAL);
4324 CHECK(!fdst->var_size, EINVAL);
4326 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
4327 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4329 CHECK(!fsrc->var_size, EINVAL);
4331 instr->type = INSTR_ALU_SHR;
4332 if (dst[0] == 'h' && src[0] != 'h')
4333 instr->type = INSTR_ALU_SHR_HM;
4334 if (dst[0] != 'h' && src[0] == 'h')
4335 instr->type = INSTR_ALU_SHR_MH;
4336 if (dst[0] == 'h' && src[0] == 'h')
4337 instr->type = INSTR_ALU_SHR_HH;
4339 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4340 instr->alu.dst.n_bits = fdst->n_bits;
4341 instr->alu.dst.offset = fdst->offset / 8;
4342 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4343 instr->alu.src.n_bits = fsrc->n_bits;
4344 instr->alu.src.offset = fsrc->offset / 8;
4348 /* SHR_MI, SHR_HI. */
4349 src_val = strtoull(src, &src, 0);
4350 CHECK(!src[0], EINVAL);
4352 instr->type = INSTR_ALU_SHR_MI;
4354 instr->type = INSTR_ALU_SHR_HI;
4356 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4357 instr->alu.dst.n_bits = fdst->n_bits;
4358 instr->alu.dst.offset = fdst->offset / 8;
4359 instr->alu.src_val = src_val;
4364 instr_alu_and_translate(struct rte_swx_pipeline *p,
4365 struct action *action,
4368 struct instruction *instr,
4369 struct instruction_data *data __rte_unused)
4371 char *dst = tokens[1], *src = tokens[2];
4372 struct field *fdst, *fsrc;
4374 uint32_t dst_struct_id = 0, src_struct_id = 0;
4376 CHECK(n_tokens == 3, EINVAL);
4378 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4379 CHECK(fdst, EINVAL);
4380 CHECK(!fdst->var_size, EINVAL);
4382 /* AND, AND_MH, AND_HM, AND_HH. */
4383 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4385 CHECK(!fsrc->var_size, EINVAL);
4387 instr->type = INSTR_ALU_AND;
4388 if (dst[0] != 'h' && src[0] == 'h')
4389 instr->type = INSTR_ALU_AND_MH;
4390 if (dst[0] == 'h' && src[0] != 'h')
4391 instr->type = INSTR_ALU_AND_HM;
4392 if (dst[0] == 'h' && src[0] == 'h')
4393 instr->type = INSTR_ALU_AND_HH;
4395 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4396 instr->alu.dst.n_bits = fdst->n_bits;
4397 instr->alu.dst.offset = fdst->offset / 8;
4398 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4399 instr->alu.src.n_bits = fsrc->n_bits;
4400 instr->alu.src.offset = fsrc->offset / 8;
4405 src_val = strtoull(src, &src, 0);
4406 CHECK(!src[0], EINVAL);
4409 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4411 instr->type = INSTR_ALU_AND_I;
4412 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4413 instr->alu.dst.n_bits = fdst->n_bits;
4414 instr->alu.dst.offset = fdst->offset / 8;
4415 instr->alu.src_val = src_val;
4420 instr_alu_or_translate(struct rte_swx_pipeline *p,
4421 struct action *action,
4424 struct instruction *instr,
4425 struct instruction_data *data __rte_unused)
4427 char *dst = tokens[1], *src = tokens[2];
4428 struct field *fdst, *fsrc;
4430 uint32_t dst_struct_id = 0, src_struct_id = 0;
4432 CHECK(n_tokens == 3, EINVAL);
4434 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4435 CHECK(fdst, EINVAL);
4436 CHECK(!fdst->var_size, EINVAL);
4438 /* OR, OR_MH, OR_HM, OR_HH. */
4439 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4441 CHECK(!fsrc->var_size, EINVAL);
4443 instr->type = INSTR_ALU_OR;
4444 if (dst[0] != 'h' && src[0] == 'h')
4445 instr->type = INSTR_ALU_OR_MH;
4446 if (dst[0] == 'h' && src[0] != 'h')
4447 instr->type = INSTR_ALU_OR_HM;
4448 if (dst[0] == 'h' && src[0] == 'h')
4449 instr->type = INSTR_ALU_OR_HH;
4451 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4452 instr->alu.dst.n_bits = fdst->n_bits;
4453 instr->alu.dst.offset = fdst->offset / 8;
4454 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4455 instr->alu.src.n_bits = fsrc->n_bits;
4456 instr->alu.src.offset = fsrc->offset / 8;
4461 src_val = strtoull(src, &src, 0);
4462 CHECK(!src[0], EINVAL);
4465 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4467 instr->type = INSTR_ALU_OR_I;
4468 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4469 instr->alu.dst.n_bits = fdst->n_bits;
4470 instr->alu.dst.offset = fdst->offset / 8;
4471 instr->alu.src_val = src_val;
4476 instr_alu_xor_translate(struct rte_swx_pipeline *p,
4477 struct action *action,
4480 struct instruction *instr,
4481 struct instruction_data *data __rte_unused)
4483 char *dst = tokens[1], *src = tokens[2];
4484 struct field *fdst, *fsrc;
4486 uint32_t dst_struct_id = 0, src_struct_id = 0;
4488 CHECK(n_tokens == 3, EINVAL);
4490 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
4491 CHECK(fdst, EINVAL);
4492 CHECK(!fdst->var_size, EINVAL);
4494 /* XOR, XOR_MH, XOR_HM, XOR_HH. */
4495 fsrc = struct_field_parse(p, action, src, &src_struct_id);
4497 CHECK(!fsrc->var_size, EINVAL);
4499 instr->type = INSTR_ALU_XOR;
4500 if (dst[0] != 'h' && src[0] == 'h')
4501 instr->type = INSTR_ALU_XOR_MH;
4502 if (dst[0] == 'h' && src[0] != 'h')
4503 instr->type = INSTR_ALU_XOR_HM;
4504 if (dst[0] == 'h' && src[0] == 'h')
4505 instr->type = INSTR_ALU_XOR_HH;
4507 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4508 instr->alu.dst.n_bits = fdst->n_bits;
4509 instr->alu.dst.offset = fdst->offset / 8;
4510 instr->alu.src.struct_id = (uint8_t)src_struct_id;
4511 instr->alu.src.n_bits = fsrc->n_bits;
4512 instr->alu.src.offset = fsrc->offset / 8;
4517 src_val = strtoull(src, &src, 0);
4518 CHECK(!src[0], EINVAL);
4521 src_val = hton64(src_val) >> (64 - fdst->n_bits);
4523 instr->type = INSTR_ALU_XOR_I;
4524 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
4525 instr->alu.dst.n_bits = fdst->n_bits;
4526 instr->alu.dst.offset = fdst->offset / 8;
4527 instr->alu.src_val = src_val;
4532 instr_alu_add_exec(struct rte_swx_pipeline *p)
4534 struct thread *t = &p->threads[p->thread_id];
4535 struct instruction *ip = t->ip;
4537 TRACE("[Thread %2u] add\n", p->thread_id);
4547 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
4549 struct thread *t = &p->threads[p->thread_id];
4550 struct instruction *ip = t->ip;
4552 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
4562 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
4564 struct thread *t = &p->threads[p->thread_id];
4565 struct instruction *ip = t->ip;
4567 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
4577 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
4579 struct thread *t = &p->threads[p->thread_id];
4580 struct instruction *ip = t->ip;
4582 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
4592 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
4594 struct thread *t = &p->threads[p->thread_id];
4595 struct instruction *ip = t->ip;
4597 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
4607 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
4609 struct thread *t = &p->threads[p->thread_id];
4610 struct instruction *ip = t->ip;
4612 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
4622 instr_alu_sub_exec(struct rte_swx_pipeline *p)
4624 struct thread *t = &p->threads[p->thread_id];
4625 struct instruction *ip = t->ip;
4627 TRACE("[Thread %2u] sub\n", p->thread_id);
4637 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4639 struct thread *t = &p->threads[p->thread_id];
4640 struct instruction *ip = t->ip;
4642 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4652 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4654 struct thread *t = &p->threads[p->thread_id];
4655 struct instruction *ip = t->ip;
4657 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4667 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4669 struct thread *t = &p->threads[p->thread_id];
4670 struct instruction *ip = t->ip;
4672 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4682 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4684 struct thread *t = &p->threads[p->thread_id];
4685 struct instruction *ip = t->ip;
4687 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4697 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4699 struct thread *t = &p->threads[p->thread_id];
4700 struct instruction *ip = t->ip;
4702 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4712 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4714 struct thread *t = &p->threads[p->thread_id];
4715 struct instruction *ip = t->ip;
4717 TRACE("[Thread %2u] shl\n", p->thread_id);
4727 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4729 struct thread *t = &p->threads[p->thread_id];
4730 struct instruction *ip = t->ip;
4732 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4742 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4744 struct thread *t = &p->threads[p->thread_id];
4745 struct instruction *ip = t->ip;
4747 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4757 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4759 struct thread *t = &p->threads[p->thread_id];
4760 struct instruction *ip = t->ip;
4762 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4772 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4774 struct thread *t = &p->threads[p->thread_id];
4775 struct instruction *ip = t->ip;
4777 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4787 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4789 struct thread *t = &p->threads[p->thread_id];
4790 struct instruction *ip = t->ip;
4792 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4802 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4804 struct thread *t = &p->threads[p->thread_id];
4805 struct instruction *ip = t->ip;
4807 TRACE("[Thread %2u] shr\n", p->thread_id);
4817 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4819 struct thread *t = &p->threads[p->thread_id];
4820 struct instruction *ip = t->ip;
4822 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4832 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4834 struct thread *t = &p->threads[p->thread_id];
4835 struct instruction *ip = t->ip;
4837 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4847 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4849 struct thread *t = &p->threads[p->thread_id];
4850 struct instruction *ip = t->ip;
4852 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4862 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4864 struct thread *t = &p->threads[p->thread_id];
4865 struct instruction *ip = t->ip;
4867 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4877 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4879 struct thread *t = &p->threads[p->thread_id];
4880 struct instruction *ip = t->ip;
4882 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4892 instr_alu_and_exec(struct rte_swx_pipeline *p)
4894 struct thread *t = &p->threads[p->thread_id];
4895 struct instruction *ip = t->ip;
4897 TRACE("[Thread %2u] and\n", p->thread_id);
4907 instr_alu_and_mh_exec(struct rte_swx_pipeline *p)
4909 struct thread *t = &p->threads[p->thread_id];
4910 struct instruction *ip = t->ip;
4912 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
4922 instr_alu_and_hm_exec(struct rte_swx_pipeline *p)
4924 struct thread *t = &p->threads[p->thread_id];
4925 struct instruction *ip = t->ip;
4927 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
4930 ALU_HM_FAST(t, ip, &);
4937 instr_alu_and_hh_exec(struct rte_swx_pipeline *p)
4939 struct thread *t = &p->threads[p->thread_id];
4940 struct instruction *ip = t->ip;
4942 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
4945 ALU_HH_FAST(t, ip, &);
4952 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4954 struct thread *t = &p->threads[p->thread_id];
4955 struct instruction *ip = t->ip;
4957 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4967 instr_alu_or_exec(struct rte_swx_pipeline *p)
4969 struct thread *t = &p->threads[p->thread_id];
4970 struct instruction *ip = t->ip;
4972 TRACE("[Thread %2u] or\n", p->thread_id);
4982 instr_alu_or_mh_exec(struct rte_swx_pipeline *p)
4984 struct thread *t = &p->threads[p->thread_id];
4985 struct instruction *ip = t->ip;
4987 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
4997 instr_alu_or_hm_exec(struct rte_swx_pipeline *p)
4999 struct thread *t = &p->threads[p->thread_id];
5000 struct instruction *ip = t->ip;
5002 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
5005 ALU_HM_FAST(t, ip, |);
5012 instr_alu_or_hh_exec(struct rte_swx_pipeline *p)
5014 struct thread *t = &p->threads[p->thread_id];
5015 struct instruction *ip = t->ip;
5017 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
5020 ALU_HH_FAST(t, ip, |);
5027 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
5029 struct thread *t = &p->threads[p->thread_id];
5030 struct instruction *ip = t->ip;
5032 TRACE("[Thread %2u] or (i)\n", p->thread_id);
5042 instr_alu_xor_exec(struct rte_swx_pipeline *p)
5044 struct thread *t = &p->threads[p->thread_id];
5045 struct instruction *ip = t->ip;
5047 TRACE("[Thread %2u] xor\n", p->thread_id);
5057 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p)
5059 struct thread *t = &p->threads[p->thread_id];
5060 struct instruction *ip = t->ip;
5062 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
5072 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p)
5074 struct thread *t = &p->threads[p->thread_id];
5075 struct instruction *ip = t->ip;
5077 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
5080 ALU_HM_FAST(t, ip, ^);
5087 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p)
5089 struct thread *t = &p->threads[p->thread_id];
5090 struct instruction *ip = t->ip;
5092 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
5095 ALU_HH_FAST(t, ip, ^);
5102 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
5104 struct thread *t = &p->threads[p->thread_id];
5105 struct instruction *ip = t->ip;
5107 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
5117 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
5119 struct thread *t = &p->threads[p->thread_id];
5120 struct instruction *ip = t->ip;
5121 uint8_t *dst_struct, *src_struct;
5122 uint16_t *dst16_ptr, dst;
5123 uint64_t *src64_ptr, src64, src64_mask, src;
5126 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
5129 dst_struct = t->structs[ip->alu.dst.struct_id];
5130 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5133 src_struct = t->structs[ip->alu.src.struct_id];
5134 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5136 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5137 src = src64 & src64_mask;
5142 /* The first input (r) is a 16-bit number. The second and the third
5143 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
5144 * three numbers (output r) is a 34-bit number.
5146 r += (src >> 32) + (src & 0xFFFFFFFF);
5148 /* The first input is a 16-bit number. The second input is an 18-bit
5149 * number. In the worst case scenario, the sum of the two numbers is a
5152 r = (r & 0xFFFF) + (r >> 16);
5154 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5155 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
5157 r = (r & 0xFFFF) + (r >> 16);
5159 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5160 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5161 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
5162 * therefore the output r is always a 16-bit number.
5164 r = (r & 0xFFFF) + (r >> 16);
5169 *dst16_ptr = (uint16_t)r;
5176 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
5178 struct thread *t = &p->threads[p->thread_id];
5179 struct instruction *ip = t->ip;
5180 uint8_t *dst_struct, *src_struct;
5181 uint16_t *dst16_ptr, dst;
5182 uint64_t *src64_ptr, src64, src64_mask, src;
5185 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
5188 dst_struct = t->structs[ip->alu.dst.struct_id];
5189 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5192 src_struct = t->structs[ip->alu.src.struct_id];
5193 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
5195 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
5196 src = src64 & src64_mask;
5201 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
5202 * the following sequence of operations in 2's complement arithmetic:
5203 * a '- b = (a - b) % 0xFFFF.
5205 * In order to prevent an underflow for the below subtraction, in which
5206 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
5207 * minuend), we first add a multiple of the 0xFFFF modulus to the
5208 * minuend. The number we add to the minuend needs to be a 34-bit number
5209 * or higher, so for readability reasons we picked the 36-bit multiple.
5210 * We are effectively turning the 16-bit minuend into a 36-bit number:
5211 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
5213 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
5215 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
5216 * result (the output r) is a 36-bit number.
5218 r -= (src >> 32) + (src & 0xFFFFFFFF);
5220 /* The first input is a 16-bit number. The second input is a 20-bit
5221 * number. Their sum is a 21-bit number.
5223 r = (r & 0xFFFF) + (r >> 16);
5225 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5226 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
5228 r = (r & 0xFFFF) + (r >> 16);
5230 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5231 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5232 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5233 * generated, therefore the output r is always a 16-bit number.
5235 r = (r & 0xFFFF) + (r >> 16);
5240 *dst16_ptr = (uint16_t)r;
5247 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
5249 struct thread *t = &p->threads[p->thread_id];
5250 struct instruction *ip = t->ip;
5251 uint8_t *dst_struct, *src_struct;
5252 uint16_t *dst16_ptr;
5253 uint32_t *src32_ptr;
5256 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
5259 dst_struct = t->structs[ip->alu.dst.struct_id];
5260 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5262 src_struct = t->structs[ip->alu.src.struct_id];
5263 src32_ptr = (uint32_t *)&src_struct[0];
5265 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
5266 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
5267 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
5268 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
5269 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
5271 /* The first input is a 16-bit number. The second input is a 19-bit
5272 * number. Their sum is a 20-bit number.
5274 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5276 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5277 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
5279 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5281 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5282 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5283 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
5284 * generated, therefore the output r is always a 16-bit number.
5286 r0 = (r0 & 0xFFFF) + (r0 >> 16);
5289 r0 = r0 ? r0 : 0xFFFF;
5291 *dst16_ptr = (uint16_t)r0;
5298 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
5300 struct thread *t = &p->threads[p->thread_id];
5301 struct instruction *ip = t->ip;
5302 uint8_t *dst_struct, *src_struct;
5303 uint16_t *dst16_ptr;
5304 uint32_t *src32_ptr;
5308 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
5311 dst_struct = t->structs[ip->alu.dst.struct_id];
5312 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
5314 src_struct = t->structs[ip->alu.src.struct_id];
5315 src32_ptr = (uint32_t *)&src_struct[0];
5317 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
5318 * Therefore, in the worst case scenario, a 35-bit number is added to a
5319 * 16-bit number (the input r), so the output r is 36-bit number.
5321 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
5324 /* The first input is a 16-bit number. The second input is a 20-bit
5325 * number. Their sum is a 21-bit number.
5327 r = (r & 0xFFFF) + (r >> 16);
5329 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
5330 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
5332 r = (r & 0xFFFF) + (r >> 16);
5334 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
5335 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
5336 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
5337 * generated, therefore the output r is always a 16-bit number.
5339 r = (r & 0xFFFF) + (r >> 16);
5344 *dst16_ptr = (uint16_t)r;
5353 static struct regarray *
5354 regarray_find(struct rte_swx_pipeline *p, const char *name);
5357 instr_regprefetch_translate(struct rte_swx_pipeline *p,
5358 struct action *action,
5361 struct instruction *instr,
5362 struct instruction_data *data __rte_unused)
5364 char *regarray = tokens[1], *idx = tokens[2];
5367 uint32_t idx_struct_id, idx_val;
5369 CHECK(n_tokens == 3, EINVAL);
5371 r = regarray_find(p, regarray);
5374 /* REGPREFETCH_RH, REGPREFETCH_RM. */
5375 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5377 CHECK(!fidx->var_size, EINVAL);
5379 instr->type = INSTR_REGPREFETCH_RM;
5381 instr->type = INSTR_REGPREFETCH_RH;
5383 instr->regarray.regarray_id = r->id;
5384 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5385 instr->regarray.idx.n_bits = fidx->n_bits;
5386 instr->regarray.idx.offset = fidx->offset / 8;
5387 instr->regarray.dstsrc_val = 0; /* Unused. */
5391 /* REGPREFETCH_RI. */
5392 idx_val = strtoul(idx, &idx, 0);
5393 CHECK(!idx[0], EINVAL);
5395 instr->type = INSTR_REGPREFETCH_RI;
5396 instr->regarray.regarray_id = r->id;
5397 instr->regarray.idx_val = idx_val;
5398 instr->regarray.dstsrc_val = 0; /* Unused. */
5403 instr_regrd_translate(struct rte_swx_pipeline *p,
5404 struct action *action,
5407 struct instruction *instr,
5408 struct instruction_data *data __rte_unused)
5410 char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3];
5412 struct field *fdst, *fidx;
5413 uint32_t dst_struct_id, idx_struct_id, idx_val;
5415 CHECK(n_tokens == 4, EINVAL);
5417 r = regarray_find(p, regarray);
5420 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
5421 CHECK(fdst, EINVAL);
5422 CHECK(!fdst->var_size, EINVAL);
5424 /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */
5425 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5427 CHECK(!fidx->var_size, EINVAL);
5429 instr->type = INSTR_REGRD_MRM;
5430 if (dst[0] == 'h' && idx[0] != 'h')
5431 instr->type = INSTR_REGRD_HRM;
5432 if (dst[0] != 'h' && idx[0] == 'h')
5433 instr->type = INSTR_REGRD_MRH;
5434 if (dst[0] == 'h' && idx[0] == 'h')
5435 instr->type = INSTR_REGRD_HRH;
5437 instr->regarray.regarray_id = r->id;
5438 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5439 instr->regarray.idx.n_bits = fidx->n_bits;
5440 instr->regarray.idx.offset = fidx->offset / 8;
5441 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5442 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5443 instr->regarray.dstsrc.offset = fdst->offset / 8;
5447 /* REGRD_MRI, REGRD_HRI. */
5448 idx_val = strtoul(idx, &idx, 0);
5449 CHECK(!idx[0], EINVAL);
5451 instr->type = INSTR_REGRD_MRI;
5453 instr->type = INSTR_REGRD_HRI;
5455 instr->regarray.regarray_id = r->id;
5456 instr->regarray.idx_val = idx_val;
5457 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id;
5458 instr->regarray.dstsrc.n_bits = fdst->n_bits;
5459 instr->regarray.dstsrc.offset = fdst->offset / 8;
5464 instr_regwr_translate(struct rte_swx_pipeline *p,
5465 struct action *action,
5468 struct instruction *instr,
5469 struct instruction_data *data __rte_unused)
5471 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5473 struct field *fidx, *fsrc;
5475 uint32_t idx_struct_id, idx_val, src_struct_id;
5477 CHECK(n_tokens == 4, EINVAL);
5479 r = regarray_find(p, regarray);
5482 /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */
5483 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5484 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5486 CHECK(!fidx->var_size, EINVAL);
5487 CHECK(!fsrc->var_size, EINVAL);
5489 instr->type = INSTR_REGWR_RMM;
5490 if (idx[0] == 'h' && src[0] != 'h')
5491 instr->type = INSTR_REGWR_RHM;
5492 if (idx[0] != 'h' && src[0] == 'h')
5493 instr->type = INSTR_REGWR_RMH;
5494 if (idx[0] == 'h' && src[0] == 'h')
5495 instr->type = INSTR_REGWR_RHH;
5497 instr->regarray.regarray_id = r->id;
5498 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5499 instr->regarray.idx.n_bits = fidx->n_bits;
5500 instr->regarray.idx.offset = fidx->offset / 8;
5501 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5502 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5503 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5507 /* REGWR_RHI, REGWR_RMI. */
5508 if (fidx && !fsrc) {
5509 CHECK(!fidx->var_size, EINVAL);
5511 src_val = strtoull(src, &src, 0);
5512 CHECK(!src[0], EINVAL);
5514 instr->type = INSTR_REGWR_RMI;
5516 instr->type = INSTR_REGWR_RHI;
5518 instr->regarray.regarray_id = r->id;
5519 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5520 instr->regarray.idx.n_bits = fidx->n_bits;
5521 instr->regarray.idx.offset = fidx->offset / 8;
5522 instr->regarray.dstsrc_val = src_val;
5526 /* REGWR_RIH, REGWR_RIM. */
5527 if (!fidx && fsrc) {
5528 idx_val = strtoul(idx, &idx, 0);
5529 CHECK(!idx[0], EINVAL);
5531 CHECK(!fsrc->var_size, EINVAL);
5533 instr->type = INSTR_REGWR_RIM;
5535 instr->type = INSTR_REGWR_RIH;
5537 instr->regarray.regarray_id = r->id;
5538 instr->regarray.idx_val = idx_val;
5539 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5540 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5541 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5546 src_val = strtoull(src, &src, 0);
5547 CHECK(!src[0], EINVAL);
5549 idx_val = strtoul(idx, &idx, 0);
5550 CHECK(!idx[0], EINVAL);
5552 instr->type = INSTR_REGWR_RII;
5553 instr->regarray.idx_val = idx_val;
5554 instr->regarray.dstsrc_val = src_val;
5560 instr_regadd_translate(struct rte_swx_pipeline *p,
5561 struct action *action,
5564 struct instruction *instr,
5565 struct instruction_data *data __rte_unused)
5567 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3];
5569 struct field *fidx, *fsrc;
5571 uint32_t idx_struct_id, idx_val, src_struct_id;
5573 CHECK(n_tokens == 4, EINVAL);
5575 r = regarray_find(p, regarray);
5578 /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */
5579 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
5580 fsrc = struct_field_parse(p, action, src, &src_struct_id);
5582 CHECK(!fidx->var_size, EINVAL);
5583 CHECK(!fsrc->var_size, EINVAL);
5585 instr->type = INSTR_REGADD_RMM;
5586 if (idx[0] == 'h' && src[0] != 'h')
5587 instr->type = INSTR_REGADD_RHM;
5588 if (idx[0] != 'h' && src[0] == 'h')
5589 instr->type = INSTR_REGADD_RMH;
5590 if (idx[0] == 'h' && src[0] == 'h')
5591 instr->type = INSTR_REGADD_RHH;
5593 instr->regarray.regarray_id = r->id;
5594 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5595 instr->regarray.idx.n_bits = fidx->n_bits;
5596 instr->regarray.idx.offset = fidx->offset / 8;
5597 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5598 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5599 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5603 /* REGADD_RHI, REGADD_RMI. */
5604 if (fidx && !fsrc) {
5605 CHECK(!fidx->var_size, EINVAL);
5607 src_val = strtoull(src, &src, 0);
5608 CHECK(!src[0], EINVAL);
5610 instr->type = INSTR_REGADD_RMI;
5612 instr->type = INSTR_REGADD_RHI;
5614 instr->regarray.regarray_id = r->id;
5615 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id;
5616 instr->regarray.idx.n_bits = fidx->n_bits;
5617 instr->regarray.idx.offset = fidx->offset / 8;
5618 instr->regarray.dstsrc_val = src_val;
5622 /* REGADD_RIH, REGADD_RIM. */
5623 if (!fidx && fsrc) {
5624 idx_val = strtoul(idx, &idx, 0);
5625 CHECK(!idx[0], EINVAL);
5627 CHECK(!fsrc->var_size, EINVAL);
5629 instr->type = INSTR_REGADD_RIM;
5631 instr->type = INSTR_REGADD_RIH;
5633 instr->regarray.regarray_id = r->id;
5634 instr->regarray.idx_val = idx_val;
5635 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id;
5636 instr->regarray.dstsrc.n_bits = fsrc->n_bits;
5637 instr->regarray.dstsrc.offset = fsrc->offset / 8;
5642 src_val = strtoull(src, &src, 0);
5643 CHECK(!src[0], EINVAL);
5645 idx_val = strtoul(idx, &idx, 0);
5646 CHECK(!idx[0], EINVAL);
5648 instr->type = INSTR_REGADD_RII;
5649 instr->regarray.idx_val = idx_val;
5650 instr->regarray.dstsrc_val = src_val;
5654 static inline uint64_t *
5655 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip)
5657 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5661 static inline uint64_t
5662 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5664 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5666 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5667 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5668 uint64_t idx64 = *idx64_ptr;
5669 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
5670 uint64_t idx = idx64 & idx64_mask & r->size_mask;
5675 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5677 static inline uint64_t
5678 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
5680 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5682 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
5683 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
5684 uint64_t idx64 = *idx64_ptr;
5685 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
5692 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
5696 static inline uint64_t
5697 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
5699 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
5701 uint64_t idx = ip->regarray.idx_val & r->size_mask;
5706 static inline uint64_t
5707 instr_regarray_src_hbo(struct thread *t, struct instruction *ip)
5709 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5710 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5711 uint64_t src64 = *src64_ptr;
5712 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5713 uint64_t src = src64 & src64_mask;
5718 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5720 static inline uint64_t
5721 instr_regarray_src_nbo(struct thread *t, struct instruction *ip)
5723 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
5724 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
5725 uint64_t src64 = *src64_ptr;
5726 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
5733 #define instr_regarray_src_nbo instr_regarray_src_hbo
5738 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5740 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5741 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5742 uint64_t dst64 = *dst64_ptr;
5743 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5745 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5749 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5752 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src)
5754 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
5755 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
5756 uint64_t dst64 = *dst64_ptr;
5757 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
5759 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
5760 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
5765 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
5770 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p)
5772 struct thread *t = &p->threads[p->thread_id];
5773 struct instruction *ip = t->ip;
5774 uint64_t *regarray, idx;
5776 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
5779 regarray = instr_regarray_regarray(p, ip);
5780 idx = instr_regarray_idx_nbo(p, t, ip);
5781 rte_prefetch0(®array[idx]);
5788 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p)
5790 struct thread *t = &p->threads[p->thread_id];
5791 struct instruction *ip = t->ip;
5792 uint64_t *regarray, idx;
5794 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
5797 regarray = instr_regarray_regarray(p, ip);
5798 idx = instr_regarray_idx_hbo(p, t, ip);
5799 rte_prefetch0(®array[idx]);
5806 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p)
5808 struct thread *t = &p->threads[p->thread_id];
5809 struct instruction *ip = t->ip;
5810 uint64_t *regarray, idx;
5812 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
5815 regarray = instr_regarray_regarray(p, ip);
5816 idx = instr_regarray_idx_imm(p, ip);
5817 rte_prefetch0(®array[idx]);
5824 instr_regrd_hrh_exec(struct rte_swx_pipeline *p)
5826 struct thread *t = &p->threads[p->thread_id];
5827 struct instruction *ip = t->ip;
5828 uint64_t *regarray, idx;
5830 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
5833 regarray = instr_regarray_regarray(p, ip);
5834 idx = instr_regarray_idx_nbo(p, t, ip);
5835 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5842 instr_regrd_hrm_exec(struct rte_swx_pipeline *p)
5844 struct thread *t = &p->threads[p->thread_id];
5845 struct instruction *ip = t->ip;
5846 uint64_t *regarray, idx;
5848 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
5851 regarray = instr_regarray_regarray(p, ip);
5852 idx = instr_regarray_idx_hbo(p, t, ip);
5853 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5860 instr_regrd_mrh_exec(struct rte_swx_pipeline *p)
5862 struct thread *t = &p->threads[p->thread_id];
5863 struct instruction *ip = t->ip;
5864 uint64_t *regarray, idx;
5866 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
5869 regarray = instr_regarray_regarray(p, ip);
5870 idx = instr_regarray_idx_nbo(p, t, ip);
5871 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5878 instr_regrd_mrm_exec(struct rte_swx_pipeline *p)
5880 struct thread *t = &p->threads[p->thread_id];
5881 struct instruction *ip = t->ip;
5882 uint64_t *regarray, idx;
5885 regarray = instr_regarray_regarray(p, ip);
5886 idx = instr_regarray_idx_hbo(p, t, ip);
5887 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5894 instr_regrd_hri_exec(struct rte_swx_pipeline *p)
5896 struct thread *t = &p->threads[p->thread_id];
5897 struct instruction *ip = t->ip;
5898 uint64_t *regarray, idx;
5900 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
5903 regarray = instr_regarray_regarray(p, ip);
5904 idx = instr_regarray_idx_imm(p, ip);
5905 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
5912 instr_regrd_mri_exec(struct rte_swx_pipeline *p)
5914 struct thread *t = &p->threads[p->thread_id];
5915 struct instruction *ip = t->ip;
5916 uint64_t *regarray, idx;
5918 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
5921 regarray = instr_regarray_regarray(p, ip);
5922 idx = instr_regarray_idx_imm(p, ip);
5923 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
5930 instr_regwr_rhh_exec(struct rte_swx_pipeline *p)
5932 struct thread *t = &p->threads[p->thread_id];
5933 struct instruction *ip = t->ip;
5934 uint64_t *regarray, idx, src;
5936 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
5939 regarray = instr_regarray_regarray(p, ip);
5940 idx = instr_regarray_idx_nbo(p, t, ip);
5941 src = instr_regarray_src_nbo(t, ip);
5942 regarray[idx] = src;
5949 instr_regwr_rhm_exec(struct rte_swx_pipeline *p)
5951 struct thread *t = &p->threads[p->thread_id];
5952 struct instruction *ip = t->ip;
5953 uint64_t *regarray, idx, src;
5955 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
5958 regarray = instr_regarray_regarray(p, ip);
5959 idx = instr_regarray_idx_nbo(p, t, ip);
5960 src = instr_regarray_src_hbo(t, ip);
5961 regarray[idx] = src;
5968 instr_regwr_rmh_exec(struct rte_swx_pipeline *p)
5970 struct thread *t = &p->threads[p->thread_id];
5971 struct instruction *ip = t->ip;
5972 uint64_t *regarray, idx, src;
5974 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
5977 regarray = instr_regarray_regarray(p, ip);
5978 idx = instr_regarray_idx_hbo(p, t, ip);
5979 src = instr_regarray_src_nbo(t, ip);
5980 regarray[idx] = src;
5987 instr_regwr_rmm_exec(struct rte_swx_pipeline *p)
5989 struct thread *t = &p->threads[p->thread_id];
5990 struct instruction *ip = t->ip;
5991 uint64_t *regarray, idx, src;
5993 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
5996 regarray = instr_regarray_regarray(p, ip);
5997 idx = instr_regarray_idx_hbo(p, t, ip);
5998 src = instr_regarray_src_hbo(t, ip);
5999 regarray[idx] = src;
6006 instr_regwr_rhi_exec(struct rte_swx_pipeline *p)
6008 struct thread *t = &p->threads[p->thread_id];
6009 struct instruction *ip = t->ip;
6010 uint64_t *regarray, idx, src;
6012 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
6015 regarray = instr_regarray_regarray(p, ip);
6016 idx = instr_regarray_idx_nbo(p, t, ip);
6017 src = ip->regarray.dstsrc_val;
6018 regarray[idx] = src;
6025 instr_regwr_rmi_exec(struct rte_swx_pipeline *p)
6027 struct thread *t = &p->threads[p->thread_id];
6028 struct instruction *ip = t->ip;
6029 uint64_t *regarray, idx, src;
6031 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
6034 regarray = instr_regarray_regarray(p, ip);
6035 idx = instr_regarray_idx_hbo(p, t, ip);
6036 src = ip->regarray.dstsrc_val;
6037 regarray[idx] = src;
6044 instr_regwr_rih_exec(struct rte_swx_pipeline *p)
6046 struct thread *t = &p->threads[p->thread_id];
6047 struct instruction *ip = t->ip;
6048 uint64_t *regarray, idx, src;
6050 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
6053 regarray = instr_regarray_regarray(p, ip);
6054 idx = instr_regarray_idx_imm(p, ip);
6055 src = instr_regarray_src_nbo(t, ip);
6056 regarray[idx] = src;
6063 instr_regwr_rim_exec(struct rte_swx_pipeline *p)
6065 struct thread *t = &p->threads[p->thread_id];
6066 struct instruction *ip = t->ip;
6067 uint64_t *regarray, idx, src;
6069 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
6072 regarray = instr_regarray_regarray(p, ip);
6073 idx = instr_regarray_idx_imm(p, ip);
6074 src = instr_regarray_src_hbo(t, ip);
6075 regarray[idx] = src;
6082 instr_regwr_rii_exec(struct rte_swx_pipeline *p)
6084 struct thread *t = &p->threads[p->thread_id];
6085 struct instruction *ip = t->ip;
6086 uint64_t *regarray, idx, src;
6088 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
6091 regarray = instr_regarray_regarray(p, ip);
6092 idx = instr_regarray_idx_imm(p, ip);
6093 src = ip->regarray.dstsrc_val;
6094 regarray[idx] = src;
6101 instr_regadd_rhh_exec(struct rte_swx_pipeline *p)
6103 struct thread *t = &p->threads[p->thread_id];
6104 struct instruction *ip = t->ip;
6105 uint64_t *regarray, idx, src;
6107 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
6110 regarray = instr_regarray_regarray(p, ip);
6111 idx = instr_regarray_idx_nbo(p, t, ip);
6112 src = instr_regarray_src_nbo(t, ip);
6113 regarray[idx] += src;
6120 instr_regadd_rhm_exec(struct rte_swx_pipeline *p)
6122 struct thread *t = &p->threads[p->thread_id];
6123 struct instruction *ip = t->ip;
6124 uint64_t *regarray, idx, src;
6126 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
6129 regarray = instr_regarray_regarray(p, ip);
6130 idx = instr_regarray_idx_nbo(p, t, ip);
6131 src = instr_regarray_src_hbo(t, ip);
6132 regarray[idx] += src;
6139 instr_regadd_rmh_exec(struct rte_swx_pipeline *p)
6141 struct thread *t = &p->threads[p->thread_id];
6142 struct instruction *ip = t->ip;
6143 uint64_t *regarray, idx, src;
6145 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
6148 regarray = instr_regarray_regarray(p, ip);
6149 idx = instr_regarray_idx_hbo(p, t, ip);
6150 src = instr_regarray_src_nbo(t, ip);
6151 regarray[idx] += src;
6158 instr_regadd_rmm_exec(struct rte_swx_pipeline *p)
6160 struct thread *t = &p->threads[p->thread_id];
6161 struct instruction *ip = t->ip;
6162 uint64_t *regarray, idx, src;
6164 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
6167 regarray = instr_regarray_regarray(p, ip);
6168 idx = instr_regarray_idx_hbo(p, t, ip);
6169 src = instr_regarray_src_hbo(t, ip);
6170 regarray[idx] += src;
6177 instr_regadd_rhi_exec(struct rte_swx_pipeline *p)
6179 struct thread *t = &p->threads[p->thread_id];
6180 struct instruction *ip = t->ip;
6181 uint64_t *regarray, idx, src;
6183 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
6186 regarray = instr_regarray_regarray(p, ip);
6187 idx = instr_regarray_idx_nbo(p, t, ip);
6188 src = ip->regarray.dstsrc_val;
6189 regarray[idx] += src;
6196 instr_regadd_rmi_exec(struct rte_swx_pipeline *p)
6198 struct thread *t = &p->threads[p->thread_id];
6199 struct instruction *ip = t->ip;
6200 uint64_t *regarray, idx, src;
6202 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
6205 regarray = instr_regarray_regarray(p, ip);
6206 idx = instr_regarray_idx_hbo(p, t, ip);
6207 src = ip->regarray.dstsrc_val;
6208 regarray[idx] += src;
6215 instr_regadd_rih_exec(struct rte_swx_pipeline *p)
6217 struct thread *t = &p->threads[p->thread_id];
6218 struct instruction *ip = t->ip;
6219 uint64_t *regarray, idx, src;
6221 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
6224 regarray = instr_regarray_regarray(p, ip);
6225 idx = instr_regarray_idx_imm(p, ip);
6226 src = instr_regarray_src_nbo(t, ip);
6227 regarray[idx] += src;
6234 instr_regadd_rim_exec(struct rte_swx_pipeline *p)
6236 struct thread *t = &p->threads[p->thread_id];
6237 struct instruction *ip = t->ip;
6238 uint64_t *regarray, idx, src;
6240 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
6243 regarray = instr_regarray_regarray(p, ip);
6244 idx = instr_regarray_idx_imm(p, ip);
6245 src = instr_regarray_src_hbo(t, ip);
6246 regarray[idx] += src;
6253 instr_regadd_rii_exec(struct rte_swx_pipeline *p)
6255 struct thread *t = &p->threads[p->thread_id];
6256 struct instruction *ip = t->ip;
6257 uint64_t *regarray, idx, src;
6259 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
6262 regarray = instr_regarray_regarray(p, ip);
6263 idx = instr_regarray_idx_imm(p, ip);
6264 src = ip->regarray.dstsrc_val;
6265 regarray[idx] += src;
6274 static struct metarray *
6275 metarray_find(struct rte_swx_pipeline *p, const char *name);
6278 instr_metprefetch_translate(struct rte_swx_pipeline *p,
6279 struct action *action,
6282 struct instruction *instr,
6283 struct instruction_data *data __rte_unused)
6285 char *metarray = tokens[1], *idx = tokens[2];
6288 uint32_t idx_struct_id, idx_val;
6290 CHECK(n_tokens == 3, EINVAL);
6292 m = metarray_find(p, metarray);
6295 /* METPREFETCH_H, METPREFETCH_M. */
6296 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6298 CHECK(!fidx->var_size, EINVAL);
6300 instr->type = INSTR_METPREFETCH_M;
6302 instr->type = INSTR_METPREFETCH_H;
6304 instr->meter.metarray_id = m->id;
6305 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6306 instr->meter.idx.n_bits = fidx->n_bits;
6307 instr->meter.idx.offset = fidx->offset / 8;
6311 /* METPREFETCH_I. */
6312 idx_val = strtoul(idx, &idx, 0);
6313 CHECK(!idx[0], EINVAL);
6315 instr->type = INSTR_METPREFETCH_I;
6316 instr->meter.metarray_id = m->id;
6317 instr->meter.idx_val = idx_val;
6322 instr_meter_translate(struct rte_swx_pipeline *p,
6323 struct action *action,
6326 struct instruction *instr,
6327 struct instruction_data *data __rte_unused)
6329 char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3];
6330 char *color_in = tokens[4], *color_out = tokens[5];
6332 struct field *fidx, *flength, *fcin, *fcout;
6333 uint32_t idx_struct_id, length_struct_id;
6334 uint32_t color_in_struct_id, color_out_struct_id;
6336 CHECK(n_tokens == 6, EINVAL);
6338 m = metarray_find(p, metarray);
6341 fidx = struct_field_parse(p, action, idx, &idx_struct_id);
6343 flength = struct_field_parse(p, action, length, &length_struct_id);
6344 CHECK(flength, EINVAL);
6345 CHECK(!flength->var_size, EINVAL);
6347 fcin = struct_field_parse(p, action, color_in, &color_in_struct_id);
6349 fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id);
6350 CHECK(fcout, EINVAL);
6351 CHECK(!fcout->var_size, EINVAL);
6353 /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */
6355 CHECK(!fidx->var_size, EINVAL);
6356 CHECK(!fcin->var_size, EINVAL);
6358 instr->type = INSTR_METER_MMM;
6359 if (idx[0] == 'h' && length[0] == 'h')
6360 instr->type = INSTR_METER_HHM;
6361 if (idx[0] == 'h' && length[0] != 'h')
6362 instr->type = INSTR_METER_HMM;
6363 if (idx[0] != 'h' && length[0] == 'h')
6364 instr->type = INSTR_METER_MHM;
6366 instr->meter.metarray_id = m->id;
6368 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6369 instr->meter.idx.n_bits = fidx->n_bits;
6370 instr->meter.idx.offset = fidx->offset / 8;
6372 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6373 instr->meter.length.n_bits = flength->n_bits;
6374 instr->meter.length.offset = flength->offset / 8;
6376 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6377 instr->meter.color_in.n_bits = fcin->n_bits;
6378 instr->meter.color_in.offset = fcin->offset / 8;
6380 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6381 instr->meter.color_out.n_bits = fcout->n_bits;
6382 instr->meter.color_out.offset = fcout->offset / 8;
6387 /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */
6388 if (fidx && !fcin) {
6389 uint32_t color_in_val;
6391 CHECK(!fidx->var_size, EINVAL);
6393 color_in_val = strtoul(color_in, &color_in, 0);
6394 CHECK(!color_in[0], EINVAL);
6396 instr->type = INSTR_METER_MMI;
6397 if (idx[0] == 'h' && length[0] == 'h')
6398 instr->type = INSTR_METER_HHI;
6399 if (idx[0] == 'h' && length[0] != 'h')
6400 instr->type = INSTR_METER_HMI;
6401 if (idx[0] != 'h' && length[0] == 'h')
6402 instr->type = INSTR_METER_MHI;
6404 instr->meter.metarray_id = m->id;
6406 instr->meter.idx.struct_id = (uint8_t)idx_struct_id;
6407 instr->meter.idx.n_bits = fidx->n_bits;
6408 instr->meter.idx.offset = fidx->offset / 8;
6410 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6411 instr->meter.length.n_bits = flength->n_bits;
6412 instr->meter.length.offset = flength->offset / 8;
6414 instr->meter.color_in_val = color_in_val;
6416 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6417 instr->meter.color_out.n_bits = fcout->n_bits;
6418 instr->meter.color_out.offset = fcout->offset / 8;
6423 /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */
6424 if (!fidx && fcin) {
6427 idx_val = strtoul(idx, &idx, 0);
6428 CHECK(!idx[0], EINVAL);
6430 CHECK(!fcin->var_size, EINVAL);
6432 instr->type = INSTR_METER_IMM;
6433 if (length[0] == 'h')
6434 instr->type = INSTR_METER_IHM;
6436 instr->meter.metarray_id = m->id;
6438 instr->meter.idx_val = idx_val;
6440 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6441 instr->meter.length.n_bits = flength->n_bits;
6442 instr->meter.length.offset = flength->offset / 8;
6444 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id;
6445 instr->meter.color_in.n_bits = fcin->n_bits;
6446 instr->meter.color_in.offset = fcin->offset / 8;
6448 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6449 instr->meter.color_out.n_bits = fcout->n_bits;
6450 instr->meter.color_out.offset = fcout->offset / 8;
6455 /* index = I, length = HMEFT, color_in = I, color_out = MEF. */
6456 if (!fidx && !fcin) {
6457 uint32_t idx_val, color_in_val;
6459 idx_val = strtoul(idx, &idx, 0);
6460 CHECK(!idx[0], EINVAL);
6462 color_in_val = strtoul(color_in, &color_in, 0);
6463 CHECK(!color_in[0], EINVAL);
6465 instr->type = INSTR_METER_IMI;
6466 if (length[0] == 'h')
6467 instr->type = INSTR_METER_IHI;
6469 instr->meter.metarray_id = m->id;
6471 instr->meter.idx_val = idx_val;
6473 instr->meter.length.struct_id = (uint8_t)length_struct_id;
6474 instr->meter.length.n_bits = flength->n_bits;
6475 instr->meter.length.offset = flength->offset / 8;
6477 instr->meter.color_in_val = color_in_val;
6479 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id;
6480 instr->meter.color_out.n_bits = fcout->n_bits;
6481 instr->meter.color_out.offset = fcout->offset / 8;
6489 static inline struct meter *
6490 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6492 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6494 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6495 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6496 uint64_t idx64 = *idx64_ptr;
6497 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
6498 uint64_t idx = idx64 & idx64_mask & r->size_mask;
6500 return &r->metarray[idx];
6503 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6505 static inline struct meter *
6506 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip)
6508 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6510 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
6511 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
6512 uint64_t idx64 = *idx64_ptr;
6513 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
6515 return &r->metarray[idx];
6520 #define instr_meter_idx_nbo instr_meter_idx_hbo
6524 static inline struct meter *
6525 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip)
6527 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
6529 uint64_t idx = ip->meter.idx_val & r->size_mask;
6531 return &r->metarray[idx];
6534 static inline uint32_t
6535 instr_meter_length_hbo(struct thread *t, struct instruction *ip)
6537 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6538 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6539 uint64_t src64 = *src64_ptr;
6540 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
6541 uint64_t src = src64 & src64_mask;
6543 return (uint32_t)src;
6546 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6548 static inline uint32_t
6549 instr_meter_length_nbo(struct thread *t, struct instruction *ip)
6551 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
6552 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
6553 uint64_t src64 = *src64_ptr;
6554 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
6556 return (uint32_t)src;
6561 #define instr_meter_length_nbo instr_meter_length_hbo
6565 static inline enum rte_color
6566 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip)
6568 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
6569 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
6570 uint64_t src64 = *src64_ptr;
6571 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
6572 uint64_t src = src64 & src64_mask;
6574 return (enum rte_color)src;
6578 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out)
6580 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
6581 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
6582 uint64_t dst64 = *dst64_ptr;
6583 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
6585 uint64_t src = (uint64_t)color_out;
6587 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
6591 instr_metprefetch_h_exec(struct rte_swx_pipeline *p)
6593 struct thread *t = &p->threads[p->thread_id];
6594 struct instruction *ip = t->ip;
6597 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
6600 m = instr_meter_idx_nbo(p, t, ip);
6608 instr_metprefetch_m_exec(struct rte_swx_pipeline *p)
6610 struct thread *t = &p->threads[p->thread_id];
6611 struct instruction *ip = t->ip;
6614 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
6617 m = instr_meter_idx_hbo(p, t, ip);
6625 instr_metprefetch_i_exec(struct rte_swx_pipeline *p)
6627 struct thread *t = &p->threads[p->thread_id];
6628 struct instruction *ip = t->ip;
6631 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
6634 m = instr_meter_idx_imm(p, ip);
6642 instr_meter_hhm_exec(struct rte_swx_pipeline *p)
6644 struct thread *t = &p->threads[p->thread_id];
6645 struct instruction *ip = t->ip;
6647 uint64_t time, n_pkts, n_bytes;
6649 enum rte_color color_in, color_out;
6651 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
6654 m = instr_meter_idx_nbo(p, t, ip);
6655 rte_prefetch0(m->n_pkts);
6656 time = rte_get_tsc_cycles();
6657 length = instr_meter_length_nbo(t, ip);
6658 color_in = instr_meter_color_in_hbo(t, ip);
6660 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6661 &m->profile->profile,
6666 color_out &= m->color_mask;
6668 n_pkts = m->n_pkts[color_out];
6669 n_bytes = m->n_bytes[color_out];
6671 instr_meter_color_out_hbo_set(t, ip, color_out);
6673 m->n_pkts[color_out] = n_pkts + 1;
6674 m->n_bytes[color_out] = n_bytes + length;
6681 instr_meter_hhi_exec(struct rte_swx_pipeline *p)
6683 struct thread *t = &p->threads[p->thread_id];
6684 struct instruction *ip = t->ip;
6686 uint64_t time, n_pkts, n_bytes;
6688 enum rte_color color_in, color_out;
6690 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
6693 m = instr_meter_idx_nbo(p, t, ip);
6694 rte_prefetch0(m->n_pkts);
6695 time = rte_get_tsc_cycles();
6696 length = instr_meter_length_nbo(t, ip);
6697 color_in = (enum rte_color)ip->meter.color_in_val;
6699 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6700 &m->profile->profile,
6705 color_out &= m->color_mask;
6707 n_pkts = m->n_pkts[color_out];
6708 n_bytes = m->n_bytes[color_out];
6710 instr_meter_color_out_hbo_set(t, ip, color_out);
6712 m->n_pkts[color_out] = n_pkts + 1;
6713 m->n_bytes[color_out] = n_bytes + length;
6720 instr_meter_hmm_exec(struct rte_swx_pipeline *p)
6722 struct thread *t = &p->threads[p->thread_id];
6723 struct instruction *ip = t->ip;
6725 uint64_t time, n_pkts, n_bytes;
6727 enum rte_color color_in, color_out;
6729 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
6732 m = instr_meter_idx_nbo(p, t, ip);
6733 rte_prefetch0(m->n_pkts);
6734 time = rte_get_tsc_cycles();
6735 length = instr_meter_length_hbo(t, ip);
6736 color_in = instr_meter_color_in_hbo(t, ip);
6738 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6739 &m->profile->profile,
6744 color_out &= m->color_mask;
6746 n_pkts = m->n_pkts[color_out];
6747 n_bytes = m->n_bytes[color_out];
6749 instr_meter_color_out_hbo_set(t, ip, color_out);
6751 m->n_pkts[color_out] = n_pkts + 1;
6752 m->n_bytes[color_out] = n_bytes + length;
6758 instr_meter_hmi_exec(struct rte_swx_pipeline *p)
6760 struct thread *t = &p->threads[p->thread_id];
6761 struct instruction *ip = t->ip;
6763 uint64_t time, n_pkts, n_bytes;
6765 enum rte_color color_in, color_out;
6767 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
6770 m = instr_meter_idx_nbo(p, t, ip);
6771 rte_prefetch0(m->n_pkts);
6772 time = rte_get_tsc_cycles();
6773 length = instr_meter_length_hbo(t, ip);
6774 color_in = (enum rte_color)ip->meter.color_in_val;
6776 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6777 &m->profile->profile,
6782 color_out &= m->color_mask;
6784 n_pkts = m->n_pkts[color_out];
6785 n_bytes = m->n_bytes[color_out];
6787 instr_meter_color_out_hbo_set(t, ip, color_out);
6789 m->n_pkts[color_out] = n_pkts + 1;
6790 m->n_bytes[color_out] = n_bytes + length;
6797 instr_meter_mhm_exec(struct rte_swx_pipeline *p)
6799 struct thread *t = &p->threads[p->thread_id];
6800 struct instruction *ip = t->ip;
6802 uint64_t time, n_pkts, n_bytes;
6804 enum rte_color color_in, color_out;
6806 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
6809 m = instr_meter_idx_hbo(p, t, ip);
6810 rte_prefetch0(m->n_pkts);
6811 time = rte_get_tsc_cycles();
6812 length = instr_meter_length_nbo(t, ip);
6813 color_in = instr_meter_color_in_hbo(t, ip);
6815 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6816 &m->profile->profile,
6821 color_out &= m->color_mask;
6823 n_pkts = m->n_pkts[color_out];
6824 n_bytes = m->n_bytes[color_out];
6826 instr_meter_color_out_hbo_set(t, ip, color_out);
6828 m->n_pkts[color_out] = n_pkts + 1;
6829 m->n_bytes[color_out] = n_bytes + length;
6836 instr_meter_mhi_exec(struct rte_swx_pipeline *p)
6838 struct thread *t = &p->threads[p->thread_id];
6839 struct instruction *ip = t->ip;
6841 uint64_t time, n_pkts, n_bytes;
6843 enum rte_color color_in, color_out;
6845 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
6848 m = instr_meter_idx_hbo(p, t, ip);
6849 rte_prefetch0(m->n_pkts);
6850 time = rte_get_tsc_cycles();
6851 length = instr_meter_length_nbo(t, ip);
6852 color_in = (enum rte_color)ip->meter.color_in_val;
6854 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6855 &m->profile->profile,
6860 color_out &= m->color_mask;
6862 n_pkts = m->n_pkts[color_out];
6863 n_bytes = m->n_bytes[color_out];
6865 instr_meter_color_out_hbo_set(t, ip, color_out);
6867 m->n_pkts[color_out] = n_pkts + 1;
6868 m->n_bytes[color_out] = n_bytes + length;
6875 instr_meter_mmm_exec(struct rte_swx_pipeline *p)
6877 struct thread *t = &p->threads[p->thread_id];
6878 struct instruction *ip = t->ip;
6880 uint64_t time, n_pkts, n_bytes;
6882 enum rte_color color_in, color_out;
6884 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
6887 m = instr_meter_idx_hbo(p, t, ip);
6888 rte_prefetch0(m->n_pkts);
6889 time = rte_get_tsc_cycles();
6890 length = instr_meter_length_hbo(t, ip);
6891 color_in = instr_meter_color_in_hbo(t, ip);
6893 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6894 &m->profile->profile,
6899 color_out &= m->color_mask;
6901 n_pkts = m->n_pkts[color_out];
6902 n_bytes = m->n_bytes[color_out];
6904 instr_meter_color_out_hbo_set(t, ip, color_out);
6906 m->n_pkts[color_out] = n_pkts + 1;
6907 m->n_bytes[color_out] = n_bytes + length;
6914 instr_meter_mmi_exec(struct rte_swx_pipeline *p)
6916 struct thread *t = &p->threads[p->thread_id];
6917 struct instruction *ip = t->ip;
6919 uint64_t time, n_pkts, n_bytes;
6921 enum rte_color color_in, color_out;
6923 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
6926 m = instr_meter_idx_hbo(p, t, ip);
6927 rte_prefetch0(m->n_pkts);
6928 time = rte_get_tsc_cycles();
6929 length = instr_meter_length_hbo(t, ip);
6930 color_in = (enum rte_color)ip->meter.color_in_val;
6932 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6933 &m->profile->profile,
6938 color_out &= m->color_mask;
6940 n_pkts = m->n_pkts[color_out];
6941 n_bytes = m->n_bytes[color_out];
6943 instr_meter_color_out_hbo_set(t, ip, color_out);
6945 m->n_pkts[color_out] = n_pkts + 1;
6946 m->n_bytes[color_out] = n_bytes + length;
6953 instr_meter_ihm_exec(struct rte_swx_pipeline *p)
6955 struct thread *t = &p->threads[p->thread_id];
6956 struct instruction *ip = t->ip;
6958 uint64_t time, n_pkts, n_bytes;
6960 enum rte_color color_in, color_out;
6962 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
6965 m = instr_meter_idx_imm(p, ip);
6966 rte_prefetch0(m->n_pkts);
6967 time = rte_get_tsc_cycles();
6968 length = instr_meter_length_nbo(t, ip);
6969 color_in = instr_meter_color_in_hbo(t, ip);
6971 color_out = rte_meter_trtcm_color_aware_check(&m->m,
6972 &m->profile->profile,
6977 color_out &= m->color_mask;
6979 n_pkts = m->n_pkts[color_out];
6980 n_bytes = m->n_bytes[color_out];
6982 instr_meter_color_out_hbo_set(t, ip, color_out);
6984 m->n_pkts[color_out] = n_pkts + 1;
6985 m->n_bytes[color_out] = n_bytes + length;
6992 instr_meter_ihi_exec(struct rte_swx_pipeline *p)
6994 struct thread *t = &p->threads[p->thread_id];
6995 struct instruction *ip = t->ip;
6997 uint64_t time, n_pkts, n_bytes;
6999 enum rte_color color_in, color_out;
7001 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
7004 m = instr_meter_idx_imm(p, ip);
7005 rte_prefetch0(m->n_pkts);
7006 time = rte_get_tsc_cycles();
7007 length = instr_meter_length_nbo(t, ip);
7008 color_in = (enum rte_color)ip->meter.color_in_val;
7010 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7011 &m->profile->profile,
7016 color_out &= m->color_mask;
7018 n_pkts = m->n_pkts[color_out];
7019 n_bytes = m->n_bytes[color_out];
7021 instr_meter_color_out_hbo_set(t, ip, color_out);
7023 m->n_pkts[color_out] = n_pkts + 1;
7024 m->n_bytes[color_out] = n_bytes + length;
7031 instr_meter_imm_exec(struct rte_swx_pipeline *p)
7033 struct thread *t = &p->threads[p->thread_id];
7034 struct instruction *ip = t->ip;
7036 uint64_t time, n_pkts, n_bytes;
7038 enum rte_color color_in, color_out;
7040 TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
7043 m = instr_meter_idx_imm(p, ip);
7044 rte_prefetch0(m->n_pkts);
7045 time = rte_get_tsc_cycles();
7046 length = instr_meter_length_hbo(t, ip);
7047 color_in = instr_meter_color_in_hbo(t, ip);
7049 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7050 &m->profile->profile,
7055 color_out &= m->color_mask;
7057 n_pkts = m->n_pkts[color_out];
7058 n_bytes = m->n_bytes[color_out];
7060 instr_meter_color_out_hbo_set(t, ip, color_out);
7062 m->n_pkts[color_out] = n_pkts + 1;
7063 m->n_bytes[color_out] = n_bytes + length;
7069 instr_meter_imi_exec(struct rte_swx_pipeline *p)
7071 struct thread *t = &p->threads[p->thread_id];
7072 struct instruction *ip = t->ip;
7074 uint64_t time, n_pkts, n_bytes;
7076 enum rte_color color_in, color_out;
7078 TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
7081 m = instr_meter_idx_imm(p, ip);
7082 rte_prefetch0(m->n_pkts);
7083 time = rte_get_tsc_cycles();
7084 length = instr_meter_length_hbo(t, ip);
7085 color_in = (enum rte_color)ip->meter.color_in_val;
7087 color_out = rte_meter_trtcm_color_aware_check(&m->m,
7088 &m->profile->profile,
7093 color_out &= m->color_mask;
7095 n_pkts = m->n_pkts[color_out];
7096 n_bytes = m->n_bytes[color_out];
7098 instr_meter_color_out_hbo_set(t, ip, color_out);
7100 m->n_pkts[color_out] = n_pkts + 1;
7101 m->n_bytes[color_out] = n_bytes + length;
7110 static struct action *
7111 action_find(struct rte_swx_pipeline *p, const char *name);
7114 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
7115 struct action *action __rte_unused,
7118 struct instruction *instr,
7119 struct instruction_data *data)
7121 CHECK(n_tokens == 2, EINVAL);
7123 strcpy(data->jmp_label, tokens[1]);
7125 instr->type = INSTR_JMP;
7126 instr->jmp.ip = NULL; /* Resolved later. */
7131 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
7132 struct action *action __rte_unused,
7135 struct instruction *instr,
7136 struct instruction_data *data)
7140 CHECK(n_tokens == 3, EINVAL);
7142 strcpy(data->jmp_label, tokens[1]);
7144 h = header_parse(p, tokens[2]);
7147 instr->type = INSTR_JMP_VALID;
7148 instr->jmp.ip = NULL; /* Resolved later. */
7149 instr->jmp.header_id = h->id;
7154 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
7155 struct action *action __rte_unused,
7158 struct instruction *instr,
7159 struct instruction_data *data)
7163 CHECK(n_tokens == 3, EINVAL);
7165 strcpy(data->jmp_label, tokens[1]);
7167 h = header_parse(p, tokens[2]);
7170 instr->type = INSTR_JMP_INVALID;
7171 instr->jmp.ip = NULL; /* Resolved later. */
7172 instr->jmp.header_id = h->id;
7177 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
7178 struct action *action,
7181 struct instruction *instr,
7182 struct instruction_data *data)
7184 CHECK(!action, EINVAL);
7185 CHECK(n_tokens == 2, EINVAL);
7187 strcpy(data->jmp_label, tokens[1]);
7189 instr->type = INSTR_JMP_HIT;
7190 instr->jmp.ip = NULL; /* Resolved later. */
7195 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
7196 struct action *action,
7199 struct instruction *instr,
7200 struct instruction_data *data)
7202 CHECK(!action, EINVAL);
7203 CHECK(n_tokens == 2, EINVAL);
7205 strcpy(data->jmp_label, tokens[1]);
7207 instr->type = INSTR_JMP_MISS;
7208 instr->jmp.ip = NULL; /* Resolved later. */
7213 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
7214 struct action *action,
7217 struct instruction *instr,
7218 struct instruction_data *data)
7222 CHECK(!action, EINVAL);
7223 CHECK(n_tokens == 3, EINVAL);
7225 strcpy(data->jmp_label, tokens[1]);
7227 a = action_find(p, tokens[2]);
7230 instr->type = INSTR_JMP_ACTION_HIT;
7231 instr->jmp.ip = NULL; /* Resolved later. */
7232 instr->jmp.action_id = a->id;
7237 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
7238 struct action *action,
7241 struct instruction *instr,
7242 struct instruction_data *data)
7246 CHECK(!action, EINVAL);
7247 CHECK(n_tokens == 3, EINVAL);
7249 strcpy(data->jmp_label, tokens[1]);
7251 a = action_find(p, tokens[2]);
7254 instr->type = INSTR_JMP_ACTION_MISS;
7255 instr->jmp.ip = NULL; /* Resolved later. */
7256 instr->jmp.action_id = a->id;
7261 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
7262 struct action *action,
7265 struct instruction *instr,
7266 struct instruction_data *data)
7268 char *a = tokens[2], *b = tokens[3];
7269 struct field *fa, *fb;
7271 uint32_t a_struct_id, b_struct_id;
7273 CHECK(n_tokens == 4, EINVAL);
7275 strcpy(data->jmp_label, tokens[1]);
7277 fa = struct_field_parse(p, action, a, &a_struct_id);
7279 CHECK(!fa->var_size, EINVAL);
7281 /* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */
7282 fb = struct_field_parse(p, action, b, &b_struct_id);
7284 CHECK(!fb->var_size, EINVAL);
7286 instr->type = INSTR_JMP_EQ;
7287 if (a[0] != 'h' && b[0] == 'h')
7288 instr->type = INSTR_JMP_EQ_MH;
7289 if (a[0] == 'h' && b[0] != 'h')
7290 instr->type = INSTR_JMP_EQ_HM;
7291 if (a[0] == 'h' && b[0] == 'h')
7292 instr->type = INSTR_JMP_EQ_HH;
7293 instr->jmp.ip = NULL; /* Resolved later. */
7295 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7296 instr->jmp.a.n_bits = fa->n_bits;
7297 instr->jmp.a.offset = fa->offset / 8;
7298 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7299 instr->jmp.b.n_bits = fb->n_bits;
7300 instr->jmp.b.offset = fb->offset / 8;
7305 b_val = strtoull(b, &b, 0);
7306 CHECK(!b[0], EINVAL);
7309 b_val = hton64(b_val) >> (64 - fa->n_bits);
7311 instr->type = INSTR_JMP_EQ_I;
7312 instr->jmp.ip = NULL; /* Resolved later. */
7313 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7314 instr->jmp.a.n_bits = fa->n_bits;
7315 instr->jmp.a.offset = fa->offset / 8;
7316 instr->jmp.b_val = b_val;
7321 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
7322 struct action *action,
7325 struct instruction *instr,
7326 struct instruction_data *data)
7328 char *a = tokens[2], *b = tokens[3];
7329 struct field *fa, *fb;
7331 uint32_t a_struct_id, b_struct_id;
7333 CHECK(n_tokens == 4, EINVAL);
7335 strcpy(data->jmp_label, tokens[1]);
7337 fa = struct_field_parse(p, action, a, &a_struct_id);
7339 CHECK(!fa->var_size, EINVAL);
7341 /* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */
7342 fb = struct_field_parse(p, action, b, &b_struct_id);
7344 CHECK(!fb->var_size, EINVAL);
7346 instr->type = INSTR_JMP_NEQ;
7347 if (a[0] != 'h' && b[0] == 'h')
7348 instr->type = INSTR_JMP_NEQ_MH;
7349 if (a[0] == 'h' && b[0] != 'h')
7350 instr->type = INSTR_JMP_NEQ_HM;
7351 if (a[0] == 'h' && b[0] == 'h')
7352 instr->type = INSTR_JMP_NEQ_HH;
7353 instr->jmp.ip = NULL; /* Resolved later. */
7355 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7356 instr->jmp.a.n_bits = fa->n_bits;
7357 instr->jmp.a.offset = fa->offset / 8;
7358 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7359 instr->jmp.b.n_bits = fb->n_bits;
7360 instr->jmp.b.offset = fb->offset / 8;
7365 b_val = strtoull(b, &b, 0);
7366 CHECK(!b[0], EINVAL);
7369 b_val = hton64(b_val) >> (64 - fa->n_bits);
7371 instr->type = INSTR_JMP_NEQ_I;
7372 instr->jmp.ip = NULL; /* Resolved later. */
7373 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7374 instr->jmp.a.n_bits = fa->n_bits;
7375 instr->jmp.a.offset = fa->offset / 8;
7376 instr->jmp.b_val = b_val;
7381 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
7382 struct action *action,
7385 struct instruction *instr,
7386 struct instruction_data *data)
7388 char *a = tokens[2], *b = tokens[3];
7389 struct field *fa, *fb;
7391 uint32_t a_struct_id, b_struct_id;
7393 CHECK(n_tokens == 4, EINVAL);
7395 strcpy(data->jmp_label, tokens[1]);
7397 fa = struct_field_parse(p, action, a, &a_struct_id);
7399 CHECK(!fa->var_size, EINVAL);
7401 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
7402 fb = struct_field_parse(p, action, b, &b_struct_id);
7404 CHECK(!fb->var_size, EINVAL);
7406 instr->type = INSTR_JMP_LT;
7407 if (a[0] == 'h' && b[0] != 'h')
7408 instr->type = INSTR_JMP_LT_HM;
7409 if (a[0] != 'h' && b[0] == 'h')
7410 instr->type = INSTR_JMP_LT_MH;
7411 if (a[0] == 'h' && b[0] == 'h')
7412 instr->type = INSTR_JMP_LT_HH;
7413 instr->jmp.ip = NULL; /* Resolved later. */
7415 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7416 instr->jmp.a.n_bits = fa->n_bits;
7417 instr->jmp.a.offset = fa->offset / 8;
7418 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7419 instr->jmp.b.n_bits = fb->n_bits;
7420 instr->jmp.b.offset = fb->offset / 8;
7424 /* JMP_LT_MI, JMP_LT_HI. */
7425 b_val = strtoull(b, &b, 0);
7426 CHECK(!b[0], EINVAL);
7428 instr->type = INSTR_JMP_LT_MI;
7430 instr->type = INSTR_JMP_LT_HI;
7431 instr->jmp.ip = NULL; /* Resolved later. */
7433 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7434 instr->jmp.a.n_bits = fa->n_bits;
7435 instr->jmp.a.offset = fa->offset / 8;
7436 instr->jmp.b_val = b_val;
7441 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
7442 struct action *action,
7445 struct instruction *instr,
7446 struct instruction_data *data)
7448 char *a = tokens[2], *b = tokens[3];
7449 struct field *fa, *fb;
7451 uint32_t a_struct_id, b_struct_id;
7453 CHECK(n_tokens == 4, EINVAL);
7455 strcpy(data->jmp_label, tokens[1]);
7457 fa = struct_field_parse(p, action, a, &a_struct_id);
7459 CHECK(!fa->var_size, EINVAL);
7461 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
7462 fb = struct_field_parse(p, action, b, &b_struct_id);
7464 CHECK(!fb->var_size, EINVAL);
7466 instr->type = INSTR_JMP_GT;
7467 if (a[0] == 'h' && b[0] != 'h')
7468 instr->type = INSTR_JMP_GT_HM;
7469 if (a[0] != 'h' && b[0] == 'h')
7470 instr->type = INSTR_JMP_GT_MH;
7471 if (a[0] == 'h' && b[0] == 'h')
7472 instr->type = INSTR_JMP_GT_HH;
7473 instr->jmp.ip = NULL; /* Resolved later. */
7475 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7476 instr->jmp.a.n_bits = fa->n_bits;
7477 instr->jmp.a.offset = fa->offset / 8;
7478 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
7479 instr->jmp.b.n_bits = fb->n_bits;
7480 instr->jmp.b.offset = fb->offset / 8;
7484 /* JMP_GT_MI, JMP_GT_HI. */
7485 b_val = strtoull(b, &b, 0);
7486 CHECK(!b[0], EINVAL);
7488 instr->type = INSTR_JMP_GT_MI;
7490 instr->type = INSTR_JMP_GT_HI;
7491 instr->jmp.ip = NULL; /* Resolved later. */
7493 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
7494 instr->jmp.a.n_bits = fa->n_bits;
7495 instr->jmp.a.offset = fa->offset / 8;
7496 instr->jmp.b_val = b_val;
7501 instr_jmp_exec(struct rte_swx_pipeline *p)
7503 struct thread *t = &p->threads[p->thread_id];
7504 struct instruction *ip = t->ip;
7506 TRACE("[Thread %2u] jmp\n", p->thread_id);
7508 thread_ip_set(t, ip->jmp.ip);
7512 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
7514 struct thread *t = &p->threads[p->thread_id];
7515 struct instruction *ip = t->ip;
7516 uint32_t header_id = ip->jmp.header_id;
7518 TRACE("[Thread %2u] jmpv\n", p->thread_id);
7520 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
7524 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
7526 struct thread *t = &p->threads[p->thread_id];
7527 struct instruction *ip = t->ip;
7528 uint32_t header_id = ip->jmp.header_id;
7530 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
7532 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
7536 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
7538 struct thread *t = &p->threads[p->thread_id];
7539 struct instruction *ip = t->ip;
7540 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
7542 TRACE("[Thread %2u] jmph\n", p->thread_id);
7544 t->ip = ip_next[t->hit];
7548 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
7550 struct thread *t = &p->threads[p->thread_id];
7551 struct instruction *ip = t->ip;
7552 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
7554 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
7556 t->ip = ip_next[t->hit];
7560 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
7562 struct thread *t = &p->threads[p->thread_id];
7563 struct instruction *ip = t->ip;
7565 TRACE("[Thread %2u] jmpa\n", p->thread_id);
7567 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
7571 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
7573 struct thread *t = &p->threads[p->thread_id];
7574 struct instruction *ip = t->ip;
7576 TRACE("[Thread %2u] jmpna\n", p->thread_id);
7578 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
7582 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
7584 struct thread *t = &p->threads[p->thread_id];
7585 struct instruction *ip = t->ip;
7587 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
7593 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p)
7595 struct thread *t = &p->threads[p->thread_id];
7596 struct instruction *ip = t->ip;
7598 TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id);
7600 JMP_CMP_MH(t, ip, ==);
7604 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p)
7606 struct thread *t = &p->threads[p->thread_id];
7607 struct instruction *ip = t->ip;
7609 TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id);
7611 JMP_CMP_HM(t, ip, ==);
7615 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p)
7617 struct thread *t = &p->threads[p->thread_id];
7618 struct instruction *ip = t->ip;
7620 TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id);
7622 JMP_CMP_HH_FAST(t, ip, ==);
7626 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
7628 struct thread *t = &p->threads[p->thread_id];
7629 struct instruction *ip = t->ip;
7631 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
7633 JMP_CMP_I(t, ip, ==);
7637 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
7639 struct thread *t = &p->threads[p->thread_id];
7640 struct instruction *ip = t->ip;
7642 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
7648 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p)
7650 struct thread *t = &p->threads[p->thread_id];
7651 struct instruction *ip = t->ip;
7653 TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id);
7655 JMP_CMP_MH(t, ip, !=);
7659 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p)
7661 struct thread *t = &p->threads[p->thread_id];
7662 struct instruction *ip = t->ip;
7664 TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id);
7666 JMP_CMP_HM(t, ip, !=);
7670 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p)
7672 struct thread *t = &p->threads[p->thread_id];
7673 struct instruction *ip = t->ip;
7675 TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id);
7677 JMP_CMP_HH_FAST(t, ip, !=);
7681 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
7683 struct thread *t = &p->threads[p->thread_id];
7684 struct instruction *ip = t->ip;
7686 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
7688 JMP_CMP_I(t, ip, !=);
7692 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
7694 struct thread *t = &p->threads[p->thread_id];
7695 struct instruction *ip = t->ip;
7697 TRACE("[Thread %2u] jmplt\n", p->thread_id);
7703 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
7705 struct thread *t = &p->threads[p->thread_id];
7706 struct instruction *ip = t->ip;
7708 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
7710 JMP_CMP_MH(t, ip, <);
7714 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
7716 struct thread *t = &p->threads[p->thread_id];
7717 struct instruction *ip = t->ip;
7719 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
7721 JMP_CMP_HM(t, ip, <);
7725 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
7727 struct thread *t = &p->threads[p->thread_id];
7728 struct instruction *ip = t->ip;
7730 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
7732 JMP_CMP_HH(t, ip, <);
7736 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
7738 struct thread *t = &p->threads[p->thread_id];
7739 struct instruction *ip = t->ip;
7741 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
7743 JMP_CMP_MI(t, ip, <);
7747 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
7749 struct thread *t = &p->threads[p->thread_id];
7750 struct instruction *ip = t->ip;
7752 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
7754 JMP_CMP_HI(t, ip, <);
7758 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
7760 struct thread *t = &p->threads[p->thread_id];
7761 struct instruction *ip = t->ip;
7763 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
7769 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
7771 struct thread *t = &p->threads[p->thread_id];
7772 struct instruction *ip = t->ip;
7774 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
7776 JMP_CMP_MH(t, ip, >);
7780 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
7782 struct thread *t = &p->threads[p->thread_id];
7783 struct instruction *ip = t->ip;
7785 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
7787 JMP_CMP_HM(t, ip, >);
7791 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
7793 struct thread *t = &p->threads[p->thread_id];
7794 struct instruction *ip = t->ip;
7796 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
7798 JMP_CMP_HH(t, ip, >);
7802 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
7804 struct thread *t = &p->threads[p->thread_id];
7805 struct instruction *ip = t->ip;
7807 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
7809 JMP_CMP_MI(t, ip, >);
7813 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
7815 struct thread *t = &p->threads[p->thread_id];
7816 struct instruction *ip = t->ip;
7818 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
7820 JMP_CMP_HI(t, ip, >);
7827 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
7828 struct action *action,
7829 char **tokens __rte_unused,
7831 struct instruction *instr,
7832 struct instruction_data *data __rte_unused)
7834 CHECK(action, EINVAL);
7835 CHECK(n_tokens == 1, EINVAL);
7837 instr->type = INSTR_RETURN;
7842 instr_return_exec(struct rte_swx_pipeline *p)
7844 struct thread *t = &p->threads[p->thread_id];
7846 TRACE("[Thread %2u] return\n", p->thread_id);
7852 instr_translate(struct rte_swx_pipeline *p,
7853 struct action *action,
7855 struct instruction *instr,
7856 struct instruction_data *data)
7858 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
7859 int n_tokens = 0, tpos = 0;
7861 /* Parse the instruction string into tokens. */
7865 token = strtok_r(string, " \t\v", &string);
7869 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
7870 CHECK_NAME(token, EINVAL);
7872 tokens[n_tokens] = token;
7876 CHECK(n_tokens, EINVAL);
7878 /* Handle the optional instruction label. */
7879 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
7880 strcpy(data->label, tokens[0]);
7883 CHECK(n_tokens - tpos, EINVAL);
7886 /* Identify the instruction type. */
7887 if (!strcmp(tokens[tpos], "rx"))
7888 return instr_rx_translate(p,
7895 if (!strcmp(tokens[tpos], "tx"))
7896 return instr_tx_translate(p,
7903 if (!strcmp(tokens[tpos], "drop"))
7904 return instr_drop_translate(p,
7911 if (!strcmp(tokens[tpos], "extract"))
7912 return instr_hdr_extract_translate(p,
7919 if (!strcmp(tokens[tpos], "emit"))
7920 return instr_hdr_emit_translate(p,
7927 if (!strcmp(tokens[tpos], "validate"))
7928 return instr_hdr_validate_translate(p,
7935 if (!strcmp(tokens[tpos], "invalidate"))
7936 return instr_hdr_invalidate_translate(p,
7943 if (!strcmp(tokens[tpos], "mov"))
7944 return instr_mov_translate(p,
7951 if (!strcmp(tokens[tpos], "add"))
7952 return instr_alu_add_translate(p,
7959 if (!strcmp(tokens[tpos], "sub"))
7960 return instr_alu_sub_translate(p,
7967 if (!strcmp(tokens[tpos], "ckadd"))
7968 return instr_alu_ckadd_translate(p,
7975 if (!strcmp(tokens[tpos], "cksub"))
7976 return instr_alu_cksub_translate(p,
7983 if (!strcmp(tokens[tpos], "and"))
7984 return instr_alu_and_translate(p,
7991 if (!strcmp(tokens[tpos], "or"))
7992 return instr_alu_or_translate(p,
7999 if (!strcmp(tokens[tpos], "xor"))
8000 return instr_alu_xor_translate(p,
8007 if (!strcmp(tokens[tpos], "shl"))
8008 return instr_alu_shl_translate(p,
8015 if (!strcmp(tokens[tpos], "shr"))
8016 return instr_alu_shr_translate(p,
8023 if (!strcmp(tokens[tpos], "regprefetch"))
8024 return instr_regprefetch_translate(p,
8031 if (!strcmp(tokens[tpos], "regrd"))
8032 return instr_regrd_translate(p,
8039 if (!strcmp(tokens[tpos], "regwr"))
8040 return instr_regwr_translate(p,
8047 if (!strcmp(tokens[tpos], "regadd"))
8048 return instr_regadd_translate(p,
8055 if (!strcmp(tokens[tpos], "metprefetch"))
8056 return instr_metprefetch_translate(p,
8063 if (!strcmp(tokens[tpos], "meter"))
8064 return instr_meter_translate(p,
8071 if (!strcmp(tokens[tpos], "table"))
8072 return instr_table_translate(p,
8079 if (!strcmp(tokens[tpos], "extern"))
8080 return instr_extern_translate(p,
8087 if (!strcmp(tokens[tpos], "jmp"))
8088 return instr_jmp_translate(p,
8095 if (!strcmp(tokens[tpos], "jmpv"))
8096 return instr_jmp_valid_translate(p,
8103 if (!strcmp(tokens[tpos], "jmpnv"))
8104 return instr_jmp_invalid_translate(p,
8111 if (!strcmp(tokens[tpos], "jmph"))
8112 return instr_jmp_hit_translate(p,
8119 if (!strcmp(tokens[tpos], "jmpnh"))
8120 return instr_jmp_miss_translate(p,
8127 if (!strcmp(tokens[tpos], "jmpa"))
8128 return instr_jmp_action_hit_translate(p,
8135 if (!strcmp(tokens[tpos], "jmpna"))
8136 return instr_jmp_action_miss_translate(p,
8143 if (!strcmp(tokens[tpos], "jmpeq"))
8144 return instr_jmp_eq_translate(p,
8151 if (!strcmp(tokens[tpos], "jmpneq"))
8152 return instr_jmp_neq_translate(p,
8159 if (!strcmp(tokens[tpos], "jmplt"))
8160 return instr_jmp_lt_translate(p,
8167 if (!strcmp(tokens[tpos], "jmpgt"))
8168 return instr_jmp_gt_translate(p,
8175 if (!strcmp(tokens[tpos], "return"))
8176 return instr_return_translate(p,
8186 static struct instruction_data *
8187 label_find(struct instruction_data *data, uint32_t n, const char *label)
8191 for (i = 0; i < n; i++)
8192 if (!strcmp(label, data[i].label))
8199 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
8201 uint32_t count = 0, i;
8206 for (i = 0; i < n; i++)
8207 if (!strcmp(label, data[i].jmp_label))
8214 instr_label_check(struct instruction_data *instruction_data,
8215 uint32_t n_instructions)
8219 /* Check that all instruction labels are unique. */
8220 for (i = 0; i < n_instructions; i++) {
8221 struct instruction_data *data = &instruction_data[i];
8222 char *label = data->label;
8228 for (j = i + 1; j < n_instructions; j++)
8229 CHECK(strcmp(label, data[j].label), EINVAL);
8232 /* Get users for each instruction label. */
8233 for (i = 0; i < n_instructions; i++) {
8234 struct instruction_data *data = &instruction_data[i];
8235 char *label = data->label;
8237 data->n_users = label_is_used(instruction_data,
8246 instr_jmp_resolve(struct instruction *instructions,
8247 struct instruction_data *instruction_data,
8248 uint32_t n_instructions)
8252 for (i = 0; i < n_instructions; i++) {
8253 struct instruction *instr = &instructions[i];
8254 struct instruction_data *data = &instruction_data[i];
8255 struct instruction_data *found;
8257 if (!instruction_is_jmp(instr))
8260 found = label_find(instruction_data,
8263 CHECK(found, EINVAL);
8265 instr->jmp.ip = &instructions[found - instruction_data];
8272 instr_verify(struct rte_swx_pipeline *p __rte_unused,
8274 struct instruction *instr,
8275 struct instruction_data *data __rte_unused,
8276 uint32_t n_instructions)
8279 enum instruction_type type;
8282 /* Check that the first instruction is rx. */
8283 CHECK(instr[0].type == INSTR_RX, EINVAL);
8285 /* Check that there is at least one tx instruction. */
8286 for (i = 0; i < n_instructions; i++) {
8287 type = instr[i].type;
8289 if (instruction_is_tx(type))
8292 CHECK(i < n_instructions, EINVAL);
8294 /* Check that the last instruction is either tx or unconditional
8297 type = instr[n_instructions - 1].type;
8298 CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL);
8302 enum instruction_type type;
8305 /* Check that there is at least one return or tx instruction. */
8306 for (i = 0; i < n_instructions; i++) {
8307 type = instr[i].type;
8309 if ((type == INSTR_RETURN) || instruction_is_tx(type))
8312 CHECK(i < n_instructions, EINVAL);
8319 instr_compact(struct instruction *instructions,
8320 struct instruction_data *instruction_data,
8321 uint32_t n_instructions)
8323 uint32_t i, pos = 0;
8325 /* Eliminate the invalid instructions that have been optimized out. */
8326 for (i = 0; i < n_instructions; i++) {
8327 struct instruction *instr = &instructions[i];
8328 struct instruction_data *data = &instruction_data[i];
8334 memcpy(&instructions[pos], instr, sizeof(*instr));
8335 memcpy(&instruction_data[pos], data, sizeof(*data));
8345 instr_pattern_extract_many_search(struct instruction *instr,
8346 struct instruction_data *data,
8348 uint32_t *n_pattern_instr)
8352 for (i = 0; i < n_instr; i++) {
8353 if (data[i].invalid)
8356 if (instr[i].type != INSTR_HDR_EXTRACT)
8359 if (i == RTE_DIM(instr->io.hdr.header_id))
8362 if (i && data[i].n_users)
8369 *n_pattern_instr = i;
8374 instr_pattern_extract_many_replace(struct instruction *instr,
8375 struct instruction_data *data,
8380 for (i = 1; i < n_instr; i++) {
8382 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8383 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8384 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8386 data[i].invalid = 1;
8391 instr_pattern_extract_many_optimize(struct instruction *instructions,
8392 struct instruction_data *instruction_data,
8393 uint32_t n_instructions)
8397 for (i = 0; i < n_instructions; ) {
8398 struct instruction *instr = &instructions[i];
8399 struct instruction_data *data = &instruction_data[i];
8400 uint32_t n_instr = 0;
8404 detected = instr_pattern_extract_many_search(instr,
8409 instr_pattern_extract_many_replace(instr,
8416 /* No pattern starting at the current instruction. */
8420 /* Eliminate the invalid instructions that have been optimized out. */
8421 n_instructions = instr_compact(instructions,
8425 return n_instructions;
8429 instr_pattern_emit_many_tx_search(struct instruction *instr,
8430 struct instruction_data *data,
8432 uint32_t *n_pattern_instr)
8436 for (i = 0; i < n_instr; i++) {
8437 if (data[i].invalid)
8440 if (instr[i].type != INSTR_HDR_EMIT)
8443 if (i == RTE_DIM(instr->io.hdr.header_id))
8446 if (i && data[i].n_users)
8453 if (!instruction_is_tx(instr[i].type))
8456 if (data[i].n_users)
8461 *n_pattern_instr = i;
8466 instr_pattern_emit_many_tx_replace(struct instruction *instr,
8467 struct instruction_data *data,
8472 /* Any emit instruction in addition to the first one. */
8473 for (i = 1; i < n_instr - 1; i++) {
8475 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
8476 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
8477 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
8479 data[i].invalid = 1;
8482 /* The TX instruction is the last one in the pattern. */
8484 instr[0].io.io.offset = instr[i].io.io.offset;
8485 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
8486 data[i].invalid = 1;
8490 instr_pattern_emit_many_tx_optimize(struct instruction *instructions,
8491 struct instruction_data *instruction_data,
8492 uint32_t n_instructions)
8496 for (i = 0; i < n_instructions; ) {
8497 struct instruction *instr = &instructions[i];
8498 struct instruction_data *data = &instruction_data[i];
8499 uint32_t n_instr = 0;
8502 /* Emit many + TX. */
8503 detected = instr_pattern_emit_many_tx_search(instr,
8508 instr_pattern_emit_many_tx_replace(instr,
8515 /* No pattern starting at the current instruction. */
8519 /* Eliminate the invalid instructions that have been optimized out. */
8520 n_instructions = instr_compact(instructions,
8524 return n_instructions;
8528 action_arg_src_mov_count(struct action *a,
8530 struct instruction *instructions,
8531 struct instruction_data *instruction_data,
8532 uint32_t n_instructions);
8535 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p,
8537 struct instruction *instr,
8538 struct instruction_data *data,
8540 struct instruction *instructions,
8541 struct instruction_data *instruction_data,
8542 uint32_t n_instructions,
8543 uint32_t *n_pattern_instr)
8546 uint32_t src_field_id, i, j;
8548 /* Prerequisites. */
8552 /* First instruction: MOV_HM. */
8553 if (data[0].invalid || (instr[0].type != INSTR_MOV_HM))
8556 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8557 if (!h || h->st->var_size)
8560 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8561 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8564 if (src_field_id == a->st->n_fields)
8567 if (instr[0].mov.dst.offset ||
8568 (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) ||
8569 instr[0].mov.src.struct_id ||
8570 (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) ||
8571 (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits))
8574 if ((n_instr < h->st->n_fields + 1) ||
8575 (a->st->n_fields < src_field_id + h->st->n_fields + 1))
8578 /* Subsequent instructions: MOV_HM. */
8579 for (i = 1; i < h->st->n_fields; i++)
8580 if (data[i].invalid ||
8582 (instr[i].type != INSTR_MOV_HM) ||
8583 (instr[i].mov.dst.struct_id != h->struct_id) ||
8584 (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) ||
8585 (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) ||
8586 instr[i].mov.src.struct_id ||
8587 (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) ||
8588 (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) ||
8589 (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits))
8592 /* Last instruction: HDR_VALIDATE. */
8593 if ((instr[i].type != INSTR_HDR_VALIDATE) ||
8594 (instr[i].valid.header_id != h->id))
8597 /* Check that none of the action args that are used as source for this
8598 * DMA transfer are not used as source in any other mov instruction.
8600 for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) {
8603 n_users = action_arg_src_mov_count(a,
8612 *n_pattern_instr = 1 + i;
8617 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p,
8619 struct instruction *instr,
8620 struct instruction_data *data,
8624 uint32_t src_field_id, src_offset, i;
8626 /* Read from the instructions before they are modified. */
8627 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id);
8631 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++)
8632 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8)
8635 if (src_field_id == a->st->n_fields)
8638 src_offset = instr[0].mov.src.offset;
8640 /* Modify the instructions. */
8641 instr[0].type = INSTR_DMA_HT;
8642 instr[0].dma.dst.header_id[0] = h->id;
8643 instr[0].dma.dst.struct_id[0] = h->struct_id;
8644 instr[0].dma.src.offset[0] = (uint8_t)src_offset;
8645 instr[0].dma.n_bytes[0] = h->st->n_bits / 8;
8647 for (i = 1; i < n_instr; i++)
8648 data[i].invalid = 1;
8650 /* Update the endianness of the action arguments to header endianness. */
8651 for (i = 0; i < h->st->n_fields; i++)
8652 a->args_endianness[src_field_id + i] = 1;
8656 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p,
8658 struct instruction *instructions,
8659 struct instruction_data *instruction_data,
8660 uint32_t n_instructions)
8665 return n_instructions;
8667 for (i = 0; i < n_instructions; ) {
8668 struct instruction *instr = &instructions[i];
8669 struct instruction_data *data = &instruction_data[i];
8670 uint32_t n_instr = 0;
8673 /* Mov all + validate. */
8674 detected = instr_pattern_mov_all_validate_search(p,
8684 instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr);
8689 /* No pattern starting at the current instruction. */
8693 /* Eliminate the invalid instructions that have been optimized out. */
8694 n_instructions = instr_compact(instructions,
8698 return n_instructions;
8702 instr_pattern_dma_many_search(struct instruction *instr,
8703 struct instruction_data *data,
8705 uint32_t *n_pattern_instr)
8709 for (i = 0; i < n_instr; i++) {
8710 if (data[i].invalid)
8713 if (instr[i].type != INSTR_DMA_HT)
8716 if (i == RTE_DIM(instr->dma.dst.header_id))
8719 if (i && data[i].n_users)
8726 *n_pattern_instr = i;
8731 instr_pattern_dma_many_replace(struct instruction *instr,
8732 struct instruction_data *data,
8737 for (i = 1; i < n_instr; i++) {
8739 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
8740 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
8741 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
8742 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
8744 data[i].invalid = 1;
8749 instr_pattern_dma_many_optimize(struct instruction *instructions,
8750 struct instruction_data *instruction_data,
8751 uint32_t n_instructions)
8755 for (i = 0; i < n_instructions; ) {
8756 struct instruction *instr = &instructions[i];
8757 struct instruction_data *data = &instruction_data[i];
8758 uint32_t n_instr = 0;
8762 detected = instr_pattern_dma_many_search(instr,
8767 instr_pattern_dma_many_replace(instr, data, n_instr);
8772 /* No pattern starting at the current instruction. */
8776 /* Eliminate the invalid instructions that have been optimized out. */
8777 n_instructions = instr_compact(instructions,
8781 return n_instructions;
8785 instr_optimize(struct rte_swx_pipeline *p,
8787 struct instruction *instructions,
8788 struct instruction_data *instruction_data,
8789 uint32_t n_instructions)
8792 n_instructions = instr_pattern_extract_many_optimize(instructions,
8796 /* Emit many + TX. */
8797 n_instructions = instr_pattern_emit_many_tx_optimize(instructions,
8801 /* Mov all + validate. */
8802 n_instructions = instr_pattern_mov_all_validate_optimize(p,
8809 n_instructions = instr_pattern_dma_many_optimize(instructions,
8813 return n_instructions;
8817 instruction_config(struct rte_swx_pipeline *p,
8819 const char **instructions,
8820 uint32_t n_instructions)
8822 struct instruction *instr = NULL;
8823 struct instruction_data *data = NULL;
8827 CHECK(n_instructions, EINVAL);
8828 CHECK(instructions, EINVAL);
8829 for (i = 0; i < n_instructions; i++)
8830 CHECK_INSTRUCTION(instructions[i], EINVAL);
8832 /* Memory allocation. */
8833 instr = calloc(n_instructions, sizeof(struct instruction));
8839 data = calloc(n_instructions, sizeof(struct instruction_data));
8845 for (i = 0; i < n_instructions; i++) {
8846 char *string = strdup(instructions[i]);
8852 err = instr_translate(p, a, string, &instr[i], &data[i]);
8861 err = instr_label_check(data, n_instructions);
8865 err = instr_verify(p, a, instr, data, n_instructions);
8869 n_instructions = instr_optimize(p, a, instr, data, n_instructions);
8871 err = instr_jmp_resolve(instr, data, n_instructions);
8876 a->instructions = instr;
8877 a->n_instructions = n_instructions;
8879 p->instructions = instr;
8880 p->n_instructions = n_instructions;
8892 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
8894 static instr_exec_t instruction_table[] = {
8895 [INSTR_RX] = instr_rx_exec,
8896 [INSTR_TX] = instr_tx_exec,
8897 [INSTR_TX_I] = instr_tx_i_exec,
8899 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
8900 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
8901 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
8902 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
8903 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
8904 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
8905 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
8906 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
8907 [INSTR_HDR_EXTRACT_M] = instr_hdr_extract_m_exec,
8909 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
8910 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
8911 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
8912 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
8913 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
8914 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
8915 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
8916 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
8917 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
8919 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
8920 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
8922 [INSTR_MOV] = instr_mov_exec,
8923 [INSTR_MOV_MH] = instr_mov_mh_exec,
8924 [INSTR_MOV_HM] = instr_mov_hm_exec,
8925 [INSTR_MOV_HH] = instr_mov_hh_exec,
8926 [INSTR_MOV_I] = instr_mov_i_exec,
8928 [INSTR_DMA_HT] = instr_dma_ht_exec,
8929 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
8930 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
8931 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
8932 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
8933 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
8934 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
8935 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
8937 [INSTR_ALU_ADD] = instr_alu_add_exec,
8938 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
8939 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
8940 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
8941 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
8942 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
8944 [INSTR_ALU_SUB] = instr_alu_sub_exec,
8945 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
8946 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
8947 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
8948 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
8949 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
8951 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
8952 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
8953 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
8954 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
8956 [INSTR_ALU_AND] = instr_alu_and_exec,
8957 [INSTR_ALU_AND_MH] = instr_alu_and_mh_exec,
8958 [INSTR_ALU_AND_HM] = instr_alu_and_hm_exec,
8959 [INSTR_ALU_AND_HH] = instr_alu_and_hh_exec,
8960 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
8962 [INSTR_ALU_OR] = instr_alu_or_exec,
8963 [INSTR_ALU_OR_MH] = instr_alu_or_mh_exec,
8964 [INSTR_ALU_OR_HM] = instr_alu_or_hm_exec,
8965 [INSTR_ALU_OR_HH] = instr_alu_or_hh_exec,
8966 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
8968 [INSTR_ALU_XOR] = instr_alu_xor_exec,
8969 [INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec,
8970 [INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec,
8971 [INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec,
8972 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
8974 [INSTR_ALU_SHL] = instr_alu_shl_exec,
8975 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
8976 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
8977 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
8978 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
8979 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
8981 [INSTR_ALU_SHR] = instr_alu_shr_exec,
8982 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
8983 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
8984 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
8985 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
8986 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
8988 [INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec,
8989 [INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec,
8990 [INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec,
8992 [INSTR_REGRD_HRH] = instr_regrd_hrh_exec,
8993 [INSTR_REGRD_HRM] = instr_regrd_hrm_exec,
8994 [INSTR_REGRD_MRH] = instr_regrd_mrh_exec,
8995 [INSTR_REGRD_MRM] = instr_regrd_mrm_exec,
8996 [INSTR_REGRD_HRI] = instr_regrd_hri_exec,
8997 [INSTR_REGRD_MRI] = instr_regrd_mri_exec,
8999 [INSTR_REGWR_RHH] = instr_regwr_rhh_exec,
9000 [INSTR_REGWR_RHM] = instr_regwr_rhm_exec,
9001 [INSTR_REGWR_RMH] = instr_regwr_rmh_exec,
9002 [INSTR_REGWR_RMM] = instr_regwr_rmm_exec,
9003 [INSTR_REGWR_RHI] = instr_regwr_rhi_exec,
9004 [INSTR_REGWR_RMI] = instr_regwr_rmi_exec,
9005 [INSTR_REGWR_RIH] = instr_regwr_rih_exec,
9006 [INSTR_REGWR_RIM] = instr_regwr_rim_exec,
9007 [INSTR_REGWR_RII] = instr_regwr_rii_exec,
9009 [INSTR_REGADD_RHH] = instr_regadd_rhh_exec,
9010 [INSTR_REGADD_RHM] = instr_regadd_rhm_exec,
9011 [INSTR_REGADD_RMH] = instr_regadd_rmh_exec,
9012 [INSTR_REGADD_RMM] = instr_regadd_rmm_exec,
9013 [INSTR_REGADD_RHI] = instr_regadd_rhi_exec,
9014 [INSTR_REGADD_RMI] = instr_regadd_rmi_exec,
9015 [INSTR_REGADD_RIH] = instr_regadd_rih_exec,
9016 [INSTR_REGADD_RIM] = instr_regadd_rim_exec,
9017 [INSTR_REGADD_RII] = instr_regadd_rii_exec,
9019 [INSTR_METPREFETCH_H] = instr_metprefetch_h_exec,
9020 [INSTR_METPREFETCH_M] = instr_metprefetch_m_exec,
9021 [INSTR_METPREFETCH_I] = instr_metprefetch_i_exec,
9023 [INSTR_METER_HHM] = instr_meter_hhm_exec,
9024 [INSTR_METER_HHI] = instr_meter_hhi_exec,
9025 [INSTR_METER_HMM] = instr_meter_hmm_exec,
9026 [INSTR_METER_HMI] = instr_meter_hmi_exec,
9027 [INSTR_METER_MHM] = instr_meter_mhm_exec,
9028 [INSTR_METER_MHI] = instr_meter_mhi_exec,
9029 [INSTR_METER_MMM] = instr_meter_mmm_exec,
9030 [INSTR_METER_MMI] = instr_meter_mmi_exec,
9031 [INSTR_METER_IHM] = instr_meter_ihm_exec,
9032 [INSTR_METER_IHI] = instr_meter_ihi_exec,
9033 [INSTR_METER_IMM] = instr_meter_imm_exec,
9034 [INSTR_METER_IMI] = instr_meter_imi_exec,
9036 [INSTR_TABLE] = instr_table_exec,
9037 [INSTR_SELECTOR] = instr_selector_exec,
9038 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
9039 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
9041 [INSTR_JMP] = instr_jmp_exec,
9042 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
9043 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
9044 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
9045 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
9046 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
9047 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
9049 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
9050 [INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec,
9051 [INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec,
9052 [INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec,
9053 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
9055 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
9056 [INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec,
9057 [INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec,
9058 [INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec,
9059 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
9061 [INSTR_JMP_LT] = instr_jmp_lt_exec,
9062 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
9063 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
9064 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
9065 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
9066 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
9068 [INSTR_JMP_GT] = instr_jmp_gt_exec,
9069 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
9070 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
9071 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
9072 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
9073 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
9075 [INSTR_RETURN] = instr_return_exec,
9079 instr_exec(struct rte_swx_pipeline *p)
9081 struct thread *t = &p->threads[p->thread_id];
9082 struct instruction *ip = t->ip;
9083 instr_exec_t instr = instruction_table[ip->type];
9091 static struct action *
9092 action_find(struct rte_swx_pipeline *p, const char *name)
9094 struct action *elem;
9099 TAILQ_FOREACH(elem, &p->actions, node)
9100 if (strcmp(elem->name, name) == 0)
9106 static struct action *
9107 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9109 struct action *action = NULL;
9111 TAILQ_FOREACH(action, &p->actions, node)
9112 if (action->id == id)
9118 static struct field *
9119 action_field_find(struct action *a, const char *name)
9121 return a->st ? struct_type_field_find(a->st, name) : NULL;
9124 static struct field *
9125 action_field_parse(struct action *action, const char *name)
9127 if (name[0] != 't' || name[1] != '.')
9130 return action_field_find(action, &name[2]);
9134 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
9136 const char *args_struct_type_name,
9137 const char **instructions,
9138 uint32_t n_instructions)
9140 struct struct_type *args_struct_type = NULL;
9146 CHECK_NAME(name, EINVAL);
9147 CHECK(!action_find(p, name), EEXIST);
9149 if (args_struct_type_name) {
9150 CHECK_NAME(args_struct_type_name, EINVAL);
9151 args_struct_type = struct_type_find(p, args_struct_type_name);
9152 CHECK(args_struct_type, EINVAL);
9153 CHECK(!args_struct_type->var_size, EINVAL);
9156 /* Node allocation. */
9157 a = calloc(1, sizeof(struct action));
9159 if (args_struct_type) {
9160 a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int));
9161 if (!a->args_endianness) {
9167 /* Node initialization. */
9168 strcpy(a->name, name);
9169 a->st = args_struct_type;
9170 a->id = p->n_actions;
9172 /* Instruction translation. */
9173 err = instruction_config(p, a, instructions, n_instructions);
9175 free(a->args_endianness);
9180 /* Node add to tailq. */
9181 TAILQ_INSERT_TAIL(&p->actions, a, node);
9188 action_build(struct rte_swx_pipeline *p)
9190 struct action *action;
9192 p->action_instructions = calloc(p->n_actions,
9193 sizeof(struct instruction *));
9194 CHECK(p->action_instructions, ENOMEM);
9196 TAILQ_FOREACH(action, &p->actions, node)
9197 p->action_instructions[action->id] = action->instructions;
9203 action_build_free(struct rte_swx_pipeline *p)
9205 free(p->action_instructions);
9206 p->action_instructions = NULL;
9210 action_free(struct rte_swx_pipeline *p)
9212 action_build_free(p);
9215 struct action *action;
9217 action = TAILQ_FIRST(&p->actions);
9221 TAILQ_REMOVE(&p->actions, action, node);
9222 free(action->instructions);
9228 action_arg_src_mov_count(struct action *a,
9230 struct instruction *instructions,
9231 struct instruction_data *instruction_data,
9232 uint32_t n_instructions)
9234 uint32_t offset, n_users = 0, i;
9237 (arg_id >= a->st->n_fields) ||
9239 !instruction_data ||
9243 offset = a->st->fields[arg_id].offset / 8;
9245 for (i = 0; i < n_instructions; i++) {
9246 struct instruction *instr = &instructions[i];
9247 struct instruction_data *data = &instruction_data[i];
9249 if (data->invalid ||
9250 ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) ||
9251 instr->mov.src.struct_id ||
9252 (instr->mov.src.offset != offset))
9264 static struct table_type *
9265 table_type_find(struct rte_swx_pipeline *p, const char *name)
9267 struct table_type *elem;
9269 TAILQ_FOREACH(elem, &p->table_types, node)
9270 if (strcmp(elem->name, name) == 0)
9276 static struct table_type *
9277 table_type_resolve(struct rte_swx_pipeline *p,
9278 const char *recommended_type_name,
9279 enum rte_swx_table_match_type match_type)
9281 struct table_type *elem;
9283 /* Only consider the recommended type if the match type is correct. */
9284 if (recommended_type_name)
9285 TAILQ_FOREACH(elem, &p->table_types, node)
9286 if (!strcmp(elem->name, recommended_type_name) &&
9287 (elem->match_type == match_type))
9290 /* Ignore the recommended type and get the first element with this match
9293 TAILQ_FOREACH(elem, &p->table_types, node)
9294 if (elem->match_type == match_type)
9300 static struct table *
9301 table_find(struct rte_swx_pipeline *p, const char *name)
9305 TAILQ_FOREACH(elem, &p->tables, node)
9306 if (strcmp(elem->name, name) == 0)
9312 static struct table *
9313 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9315 struct table *table = NULL;
9317 TAILQ_FOREACH(table, &p->tables, node)
9318 if (table->id == id)
9325 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
9327 enum rte_swx_table_match_type match_type,
9328 struct rte_swx_table_ops *ops)
9330 struct table_type *elem;
9334 CHECK_NAME(name, EINVAL);
9335 CHECK(!table_type_find(p, name), EEXIST);
9338 CHECK(ops->create, EINVAL);
9339 CHECK(ops->lkp, EINVAL);
9340 CHECK(ops->free, EINVAL);
9342 /* Node allocation. */
9343 elem = calloc(1, sizeof(struct table_type));
9344 CHECK(elem, ENOMEM);
9346 /* Node initialization. */
9347 strcpy(elem->name, name);
9348 elem->match_type = match_type;
9349 memcpy(&elem->ops, ops, sizeof(*ops));
9351 /* Node add to tailq. */
9352 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
9358 table_match_type_resolve(struct rte_swx_match_field_params *fields,
9360 enum rte_swx_table_match_type *match_type)
9362 uint32_t n_fields_em = 0, n_fields_lpm = 0, i;
9364 for (i = 0; i < n_fields; i++) {
9365 struct rte_swx_match_field_params *f = &fields[i];
9367 if (f->match_type == RTE_SWX_TABLE_MATCH_EXACT)
9370 if (f->match_type == RTE_SWX_TABLE_MATCH_LPM)
9374 if ((n_fields_lpm > 1) ||
9375 (n_fields_lpm && (n_fields_em != n_fields - 1)))
9378 *match_type = (n_fields_em == n_fields) ?
9379 RTE_SWX_TABLE_MATCH_EXACT :
9380 RTE_SWX_TABLE_MATCH_WILDCARD;
9386 table_match_fields_check(struct rte_swx_pipeline *p,
9387 struct rte_swx_pipeline_table_params *params,
9388 struct header **header)
9390 struct header *h0 = NULL;
9391 struct field *hf, *mf;
9392 uint32_t *offset = NULL, i;
9395 /* Return if no match fields. */
9396 if (!params->n_fields) {
9397 if (params->fields) {
9408 /* Memory allocation. */
9409 offset = calloc(params->n_fields, sizeof(uint32_t));
9415 /* Check that all the match fields belong to either the same header or
9418 hf = header_field_parse(p, params->fields[0].name, &h0);
9419 mf = metadata_field_parse(p, params->fields[0].name);
9420 if ((!hf && !mf) || (hf && hf->var_size)) {
9425 offset[0] = h0 ? hf->offset : mf->offset;
9427 for (i = 1; i < params->n_fields; i++)
9431 hf = header_field_parse(p, params->fields[i].name, &h);
9432 if (!hf || (h->id != h0->id) || hf->var_size) {
9437 offset[i] = hf->offset;
9439 mf = metadata_field_parse(p, params->fields[i].name);
9445 offset[i] = mf->offset;
9448 /* Check that there are no duplicated match fields. */
9449 for (i = 0; i < params->n_fields; i++) {
9452 for (j = 0; j < i; j++)
9453 if (offset[j] == offset[i]) {
9469 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
9471 struct rte_swx_pipeline_table_params *params,
9472 const char *recommended_table_type_name,
9476 struct table_type *type;
9478 struct action *default_action;
9479 struct header *header = NULL;
9480 uint32_t action_data_size_max = 0, i;
9485 CHECK_NAME(name, EINVAL);
9486 CHECK(!table_find(p, name), EEXIST);
9487 CHECK(!selector_find(p, name), EEXIST);
9489 CHECK(params, EINVAL);
9492 status = table_match_fields_check(p, params, &header);
9496 /* Action checks. */
9497 CHECK(params->n_actions, EINVAL);
9498 CHECK(params->action_names, EINVAL);
9499 for (i = 0; i < params->n_actions; i++) {
9500 const char *action_name = params->action_names[i];
9502 uint32_t action_data_size;
9504 CHECK_NAME(action_name, EINVAL);
9506 a = action_find(p, action_name);
9509 action_data_size = a->st ? a->st->n_bits / 8 : 0;
9510 if (action_data_size > action_data_size_max)
9511 action_data_size_max = action_data_size;
9514 CHECK_NAME(params->default_action_name, EINVAL);
9515 for (i = 0; i < p->n_actions; i++)
9516 if (!strcmp(params->action_names[i],
9517 params->default_action_name))
9519 CHECK(i < params->n_actions, EINVAL);
9520 default_action = action_find(p, params->default_action_name);
9521 CHECK((default_action->st && params->default_action_data) ||
9522 !params->default_action_data, EINVAL);
9524 /* Table type checks. */
9525 if (recommended_table_type_name)
9526 CHECK_NAME(recommended_table_type_name, EINVAL);
9528 if (params->n_fields) {
9529 enum rte_swx_table_match_type match_type;
9531 status = table_match_type_resolve(params->fields, params->n_fields, &match_type);
9535 type = table_type_resolve(p, recommended_table_type_name, match_type);
9536 CHECK(type, EINVAL);
9541 /* Memory allocation. */
9542 t = calloc(1, sizeof(struct table));
9545 t->fields = calloc(params->n_fields, sizeof(struct match_field));
9551 t->actions = calloc(params->n_actions, sizeof(struct action *));
9558 if (action_data_size_max) {
9559 t->default_action_data = calloc(1, action_data_size_max);
9560 if (!t->default_action_data) {
9568 /* Node initialization. */
9569 strcpy(t->name, name);
9570 if (args && args[0])
9571 strcpy(t->args, args);
9574 for (i = 0; i < params->n_fields; i++) {
9575 struct rte_swx_match_field_params *field = ¶ms->fields[i];
9576 struct match_field *f = &t->fields[i];
9578 f->match_type = field->match_type;
9580 header_field_parse(p, field->name, NULL) :
9581 metadata_field_parse(p, field->name);
9583 t->n_fields = params->n_fields;
9586 for (i = 0; i < params->n_actions; i++)
9587 t->actions[i] = action_find(p, params->action_names[i]);
9588 t->default_action = default_action;
9589 if (default_action->st)
9590 memcpy(t->default_action_data,
9591 params->default_action_data,
9592 default_action->st->n_bits / 8);
9593 t->n_actions = params->n_actions;
9594 t->default_action_is_const = params->default_action_is_const;
9595 t->action_data_size_max = action_data_size_max;
9598 t->id = p->n_tables;
9600 /* Node add to tailq. */
9601 TAILQ_INSERT_TAIL(&p->tables, t, node);
9607 static struct rte_swx_table_params *
9608 table_params_get(struct table *table)
9610 struct rte_swx_table_params *params;
9611 struct field *first, *last;
9613 uint32_t key_size, key_offset, action_data_size, i;
9615 /* Memory allocation. */
9616 params = calloc(1, sizeof(struct rte_swx_table_params));
9620 /* Find first (smallest offset) and last (biggest offset) match fields. */
9621 first = table->fields[0].field;
9622 last = table->fields[0].field;
9624 for (i = 0; i < table->n_fields; i++) {
9625 struct field *f = table->fields[i].field;
9627 if (f->offset < first->offset)
9630 if (f->offset > last->offset)
9634 /* Key offset and size. */
9635 key_offset = first->offset / 8;
9636 key_size = (last->offset + last->n_bits - first->offset) / 8;
9638 /* Memory allocation. */
9639 key_mask = calloc(1, key_size);
9646 for (i = 0; i < table->n_fields; i++) {
9647 struct field *f = table->fields[i].field;
9648 uint32_t start = (f->offset - first->offset) / 8;
9649 size_t size = f->n_bits / 8;
9651 memset(&key_mask[start], 0xFF, size);
9654 /* Action data size. */
9655 action_data_size = 0;
9656 for (i = 0; i < table->n_actions; i++) {
9657 struct action *action = table->actions[i];
9658 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
9660 if (ads > action_data_size)
9661 action_data_size = ads;
9665 params->match_type = table->type->match_type;
9666 params->key_size = key_size;
9667 params->key_offset = key_offset;
9668 params->key_mask0 = key_mask;
9669 params->action_data_size = action_data_size;
9670 params->n_keys_max = table->size;
9676 table_params_free(struct rte_swx_table_params *params)
9681 free(params->key_mask0);
9686 table_stub_lkp(void *table __rte_unused,
9687 void *mailbox __rte_unused,
9688 uint8_t **key __rte_unused,
9689 uint64_t *action_id __rte_unused,
9690 uint8_t **action_data __rte_unused,
9694 return 1; /* DONE. */
9698 table_build(struct rte_swx_pipeline *p)
9702 /* Per pipeline: table statistics. */
9703 p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics));
9704 CHECK(p->table_stats, ENOMEM);
9706 for (i = 0; i < p->n_tables; i++) {
9707 p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t));
9708 CHECK(p->table_stats[i].n_pkts_action, ENOMEM);
9711 /* Per thread: table runt-time. */
9712 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9713 struct thread *t = &p->threads[i];
9714 struct table *table;
9716 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
9717 CHECK(t->tables, ENOMEM);
9719 TAILQ_FOREACH(table, &p->tables, node) {
9720 struct table_runtime *r = &t->tables[table->id];
9725 size = table->type->ops.mailbox_size_get();
9728 r->func = table->type->ops.lkp;
9732 r->mailbox = calloc(1, size);
9733 CHECK(r->mailbox, ENOMEM);
9737 r->key = table->header ?
9738 &t->structs[table->header->struct_id] :
9739 &t->structs[p->metadata_struct_id];
9741 r->func = table_stub_lkp;
9750 table_build_free(struct rte_swx_pipeline *p)
9754 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
9755 struct thread *t = &p->threads[i];
9761 for (j = 0; j < p->n_tables; j++) {
9762 struct table_runtime *r = &t->tables[j];
9771 if (p->table_stats) {
9772 for (i = 0; i < p->n_tables; i++)
9773 free(p->table_stats[i].n_pkts_action);
9775 free(p->table_stats);
9780 table_free(struct rte_swx_pipeline *p)
9782 table_build_free(p);
9788 elem = TAILQ_FIRST(&p->tables);
9792 TAILQ_REMOVE(&p->tables, elem, node);
9794 free(elem->actions);
9795 free(elem->default_action_data);
9801 struct table_type *elem;
9803 elem = TAILQ_FIRST(&p->table_types);
9807 TAILQ_REMOVE(&p->table_types, elem, node);
9815 static struct selector *
9816 selector_find(struct rte_swx_pipeline *p, const char *name)
9820 TAILQ_FOREACH(s, &p->selectors, node)
9821 if (strcmp(s->name, name) == 0)
9827 static struct selector *
9828 selector_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
9830 struct selector *s = NULL;
9832 TAILQ_FOREACH(s, &p->selectors, node)
9840 selector_fields_check(struct rte_swx_pipeline *p,
9841 struct rte_swx_pipeline_selector_params *params,
9842 struct header **header)
9844 struct header *h0 = NULL;
9845 struct field *hf, *mf;
9848 /* Return if no selector fields. */
9849 if (!params->n_selector_fields || !params->selector_field_names)
9852 /* Check that all the selector fields either belong to the same header
9853 * or are all meta-data fields.
9855 hf = header_field_parse(p, params->selector_field_names[0], &h0);
9856 mf = metadata_field_parse(p, params->selector_field_names[0]);
9860 for (i = 1; i < params->n_selector_fields; i++)
9864 hf = header_field_parse(p, params->selector_field_names[i], &h);
9865 if (!hf || (h->id != h0->id))
9868 mf = metadata_field_parse(p, params->selector_field_names[i]);
9873 /* Check that there are no duplicated match fields. */
9874 for (i = 0; i < params->n_selector_fields; i++) {
9875 const char *field_name = params->selector_field_names[i];
9878 for (j = i + 1; j < params->n_selector_fields; j++)
9879 if (!strcmp(params->selector_field_names[j], field_name))
9891 rte_swx_pipeline_selector_config(struct rte_swx_pipeline *p,
9893 struct rte_swx_pipeline_selector_params *params)
9896 struct header *selector_header = NULL;
9897 struct field *group_id_field, *member_id_field;
9903 CHECK_NAME(name, EINVAL);
9904 CHECK(!table_find(p, name), EEXIST);
9905 CHECK(!selector_find(p, name), EEXIST);
9907 CHECK(params, EINVAL);
9909 CHECK_NAME(params->group_id_field_name, EINVAL);
9910 group_id_field = metadata_field_parse(p, params->group_id_field_name);
9911 CHECK(group_id_field, EINVAL);
9913 for (i = 0; i < params->n_selector_fields; i++) {
9914 const char *field_name = params->selector_field_names[i];
9916 CHECK_NAME(field_name, EINVAL);
9918 status = selector_fields_check(p, params, &selector_header);
9922 CHECK_NAME(params->member_id_field_name, EINVAL);
9923 member_id_field = metadata_field_parse(p, params->member_id_field_name);
9924 CHECK(member_id_field, EINVAL);
9926 CHECK(params->n_groups_max, EINVAL);
9928 CHECK(params->n_members_per_group_max, EINVAL);
9930 /* Memory allocation. */
9931 s = calloc(1, sizeof(struct selector));
9937 s->selector_fields = calloc(params->n_selector_fields, sizeof(struct field *));
9938 if (!s->selector_fields) {
9943 /* Node initialization. */
9944 strcpy(s->name, name);
9946 s->group_id_field = group_id_field;
9948 for (i = 0; i < params->n_selector_fields; i++) {
9949 const char *field_name = params->selector_field_names[i];
9951 s->selector_fields[i] = selector_header ?
9952 header_field_parse(p, field_name, NULL) :
9953 metadata_field_parse(p, field_name);
9956 s->n_selector_fields = params->n_selector_fields;
9958 s->selector_header = selector_header;
9960 s->member_id_field = member_id_field;
9962 s->n_groups_max = params->n_groups_max;
9964 s->n_members_per_group_max = params->n_members_per_group_max;
9966 s->id = p->n_selectors;
9968 /* Node add to tailq. */
9969 TAILQ_INSERT_TAIL(&p->selectors, s, node);
9978 free(s->selector_fields);
9986 selector_params_free(struct rte_swx_table_selector_params *params)
9991 free(params->selector_mask);
9996 static struct rte_swx_table_selector_params *
9997 selector_table_params_get(struct selector *s)
9999 struct rte_swx_table_selector_params *params = NULL;
10000 struct field *first, *last;
10003 /* Memory allocation. */
10004 params = calloc(1, sizeof(struct rte_swx_table_selector_params));
10009 params->group_id_offset = s->group_id_field->offset / 8;
10011 /* Find first (smallest offset) and last (biggest offset) selector fields. */
10012 first = s->selector_fields[0];
10013 last = s->selector_fields[0];
10015 for (i = 0; i < s->n_selector_fields; i++) {
10016 struct field *f = s->selector_fields[i];
10018 if (f->offset < first->offset)
10021 if (f->offset > last->offset)
10025 /* Selector offset and size. */
10026 params->selector_offset = first->offset / 8;
10027 params->selector_size = (last->offset + last->n_bits - first->offset) / 8;
10029 /* Memory allocation. */
10030 params->selector_mask = calloc(1, params->selector_size);
10031 if (!params->selector_mask)
10034 /* Selector mask. */
10035 for (i = 0; i < s->n_selector_fields; i++) {
10036 struct field *f = s->selector_fields[i];
10037 uint32_t start = (f->offset - first->offset) / 8;
10038 size_t size = f->n_bits / 8;
10040 memset(¶ms->selector_mask[start], 0xFF, size);
10044 params->member_id_offset = s->member_id_field->offset / 8;
10046 /* Maximum number of groups. */
10047 params->n_groups_max = s->n_groups_max;
10049 /* Maximum number of members per group. */
10050 params->n_members_per_group_max = s->n_members_per_group_max;
10055 selector_params_free(params);
10060 selector_build_free(struct rte_swx_pipeline *p)
10064 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10065 struct thread *t = &p->threads[i];
10071 for (j = 0; j < p->n_selectors; j++) {
10072 struct selector_runtime *r = &t->selectors[j];
10077 free(t->selectors);
10078 t->selectors = NULL;
10081 free(p->selector_stats);
10082 p->selector_stats = NULL;
10086 selector_build(struct rte_swx_pipeline *p)
10091 /* Per pipeline: selector statistics. */
10092 p->selector_stats = calloc(p->n_selectors, sizeof(struct selector_statistics));
10093 if (!p->selector_stats) {
10098 /* Per thread: selector run-time. */
10099 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10100 struct thread *t = &p->threads[i];
10101 struct selector *s;
10103 t->selectors = calloc(p->n_selectors, sizeof(struct selector_runtime));
10104 if (!t->selectors) {
10109 TAILQ_FOREACH(s, &p->selectors, node) {
10110 struct selector_runtime *r = &t->selectors[s->id];
10114 size = rte_swx_table_selector_mailbox_size_get();
10116 r->mailbox = calloc(1, size);
10123 /* r->group_id_buffer. */
10124 r->group_id_buffer = &t->structs[p->metadata_struct_id];
10126 /* r->selector_buffer. */
10127 r->selector_buffer = s->selector_header ?
10128 &t->structs[s->selector_header->struct_id] :
10129 &t->structs[p->metadata_struct_id];
10131 /* r->member_id_buffer. */
10132 r->member_id_buffer = &t->structs[p->metadata_struct_id];
10139 selector_build_free(p);
10144 selector_free(struct rte_swx_pipeline *p)
10146 selector_build_free(p);
10148 /* Selector tables. */
10150 struct selector *elem;
10152 elem = TAILQ_FIRST(&p->selectors);
10156 TAILQ_REMOVE(&p->selectors, elem, node);
10157 free(elem->selector_fields);
10166 table_state_build(struct rte_swx_pipeline *p)
10168 struct table *table;
10169 struct selector *s;
10171 p->table_state = calloc(p->n_tables + p->n_selectors,
10172 sizeof(struct rte_swx_table_state));
10173 CHECK(p->table_state, ENOMEM);
10175 TAILQ_FOREACH(table, &p->tables, node) {
10176 struct rte_swx_table_state *ts = &p->table_state[table->id];
10179 struct rte_swx_table_params *params;
10182 params = table_params_get(table);
10183 CHECK(params, ENOMEM);
10185 ts->obj = table->type->ops.create(params,
10190 table_params_free(params);
10191 CHECK(ts->obj, ENODEV);
10194 /* ts->default_action_data. */
10195 if (table->action_data_size_max) {
10196 ts->default_action_data =
10197 malloc(table->action_data_size_max);
10198 CHECK(ts->default_action_data, ENOMEM);
10200 memcpy(ts->default_action_data,
10201 table->default_action_data,
10202 table->action_data_size_max);
10205 /* ts->default_action_id. */
10206 ts->default_action_id = table->default_action->id;
10209 TAILQ_FOREACH(s, &p->selectors, node) {
10210 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + s->id];
10211 struct rte_swx_table_selector_params *params;
10214 params = selector_table_params_get(s);
10215 CHECK(params, ENOMEM);
10217 ts->obj = rte_swx_table_selector_create(params, NULL, p->numa_node);
10219 selector_params_free(params);
10220 CHECK(ts->obj, ENODEV);
10227 table_state_build_free(struct rte_swx_pipeline *p)
10231 if (!p->table_state)
10234 for (i = 0; i < p->n_tables; i++) {
10235 struct rte_swx_table_state *ts = &p->table_state[i];
10236 struct table *table = table_find_by_id(p, i);
10239 if (table->type && ts->obj)
10240 table->type->ops.free(ts->obj);
10242 /* ts->default_action_data. */
10243 free(ts->default_action_data);
10246 for (i = 0; i < p->n_selectors; i++) {
10247 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + i];
10251 rte_swx_table_selector_free(ts->obj);
10254 free(p->table_state);
10255 p->table_state = NULL;
10259 table_state_free(struct rte_swx_pipeline *p)
10261 table_state_build_free(p);
10267 static struct regarray *
10268 regarray_find(struct rte_swx_pipeline *p, const char *name)
10270 struct regarray *elem;
10272 TAILQ_FOREACH(elem, &p->regarrays, node)
10273 if (!strcmp(elem->name, name))
10279 static struct regarray *
10280 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10282 struct regarray *elem = NULL;
10284 TAILQ_FOREACH(elem, &p->regarrays, node)
10285 if (elem->id == id)
10292 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p,
10297 struct regarray *r;
10301 CHECK_NAME(name, EINVAL);
10302 CHECK(!regarray_find(p, name), EEXIST);
10304 CHECK(size, EINVAL);
10305 size = rte_align32pow2(size);
10307 /* Memory allocation. */
10308 r = calloc(1, sizeof(struct regarray));
10311 /* Node initialization. */
10312 strcpy(r->name, name);
10313 r->init_val = init_val;
10315 r->id = p->n_regarrays;
10317 /* Node add to tailq. */
10318 TAILQ_INSERT_TAIL(&p->regarrays, r, node);
10325 regarray_build(struct rte_swx_pipeline *p)
10327 struct regarray *regarray;
10329 if (!p->n_regarrays)
10332 p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime));
10333 CHECK(p->regarray_runtime, ENOMEM);
10335 TAILQ_FOREACH(regarray, &p->regarrays, node) {
10336 struct regarray_runtime *r = &p->regarray_runtime[regarray->id];
10339 r->regarray = env_malloc(regarray->size * sizeof(uint64_t),
10340 RTE_CACHE_LINE_SIZE,
10342 CHECK(r->regarray, ENOMEM);
10344 if (regarray->init_val)
10345 for (i = 0; i < regarray->size; i++)
10346 r->regarray[i] = regarray->init_val;
10348 r->size_mask = regarray->size - 1;
10355 regarray_build_free(struct rte_swx_pipeline *p)
10359 if (!p->regarray_runtime)
10362 for (i = 0; i < p->n_regarrays; i++) {
10363 struct regarray *regarray = regarray_find_by_id(p, i);
10364 struct regarray_runtime *r = &p->regarray_runtime[i];
10366 env_free(r->regarray, regarray->size * sizeof(uint64_t));
10369 free(p->regarray_runtime);
10370 p->regarray_runtime = NULL;
10374 regarray_free(struct rte_swx_pipeline *p)
10376 regarray_build_free(p);
10379 struct regarray *elem;
10381 elem = TAILQ_FIRST(&p->regarrays);
10385 TAILQ_REMOVE(&p->regarrays, elem, node);
10393 static struct meter_profile *
10394 meter_profile_find(struct rte_swx_pipeline *p, const char *name)
10396 struct meter_profile *elem;
10398 TAILQ_FOREACH(elem, &p->meter_profiles, node)
10399 if (!strcmp(elem->name, name))
10405 static struct metarray *
10406 metarray_find(struct rte_swx_pipeline *p, const char *name)
10408 struct metarray *elem;
10410 TAILQ_FOREACH(elem, &p->metarrays, node)
10411 if (!strcmp(elem->name, name))
10417 static struct metarray *
10418 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
10420 struct metarray *elem = NULL;
10422 TAILQ_FOREACH(elem, &p->metarrays, node)
10423 if (elem->id == id)
10430 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p,
10434 struct metarray *m;
10438 CHECK_NAME(name, EINVAL);
10439 CHECK(!metarray_find(p, name), EEXIST);
10441 CHECK(size, EINVAL);
10442 size = rte_align32pow2(size);
10444 /* Memory allocation. */
10445 m = calloc(1, sizeof(struct metarray));
10448 /* Node initialization. */
10449 strcpy(m->name, name);
10451 m->id = p->n_metarrays;
10453 /* Node add to tailq. */
10454 TAILQ_INSERT_TAIL(&p->metarrays, m, node);
10460 struct meter_profile meter_profile_default = {
10469 .cir_bytes_per_period = 1,
10471 .pir_bytes_per_period = 1,
10478 meter_init(struct meter *m)
10480 memset(m, 0, sizeof(struct meter));
10481 rte_meter_trtcm_config(&m->m, &meter_profile_default.profile);
10482 m->profile = &meter_profile_default;
10483 m->color_mask = RTE_COLOR_GREEN;
10485 meter_profile_default.n_users++;
10489 metarray_build(struct rte_swx_pipeline *p)
10491 struct metarray *m;
10493 if (!p->n_metarrays)
10496 p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime));
10497 CHECK(p->metarray_runtime, ENOMEM);
10499 TAILQ_FOREACH(m, &p->metarrays, node) {
10500 struct metarray_runtime *r = &p->metarray_runtime[m->id];
10503 r->metarray = env_malloc(m->size * sizeof(struct meter),
10504 RTE_CACHE_LINE_SIZE,
10506 CHECK(r->metarray, ENOMEM);
10508 for (i = 0; i < m->size; i++)
10509 meter_init(&r->metarray[i]);
10511 r->size_mask = m->size - 1;
10518 metarray_build_free(struct rte_swx_pipeline *p)
10522 if (!p->metarray_runtime)
10525 for (i = 0; i < p->n_metarrays; i++) {
10526 struct metarray *m = metarray_find_by_id(p, i);
10527 struct metarray_runtime *r = &p->metarray_runtime[i];
10529 env_free(r->metarray, m->size * sizeof(struct meter));
10532 free(p->metarray_runtime);
10533 p->metarray_runtime = NULL;
10537 metarray_free(struct rte_swx_pipeline *p)
10539 metarray_build_free(p);
10541 /* Meter arrays. */
10543 struct metarray *elem;
10545 elem = TAILQ_FIRST(&p->metarrays);
10549 TAILQ_REMOVE(&p->metarrays, elem, node);
10553 /* Meter profiles. */
10555 struct meter_profile *elem;
10557 elem = TAILQ_FIRST(&p->meter_profiles);
10561 TAILQ_REMOVE(&p->meter_profiles, elem, node);
10570 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
10572 struct rte_swx_pipeline *pipeline;
10574 /* Check input parameters. */
10577 /* Memory allocation. */
10578 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
10579 CHECK(pipeline, ENOMEM);
10581 /* Initialization. */
10582 TAILQ_INIT(&pipeline->struct_types);
10583 TAILQ_INIT(&pipeline->port_in_types);
10584 TAILQ_INIT(&pipeline->ports_in);
10585 TAILQ_INIT(&pipeline->port_out_types);
10586 TAILQ_INIT(&pipeline->ports_out);
10587 TAILQ_INIT(&pipeline->extern_types);
10588 TAILQ_INIT(&pipeline->extern_objs);
10589 TAILQ_INIT(&pipeline->extern_funcs);
10590 TAILQ_INIT(&pipeline->headers);
10591 TAILQ_INIT(&pipeline->actions);
10592 TAILQ_INIT(&pipeline->table_types);
10593 TAILQ_INIT(&pipeline->tables);
10594 TAILQ_INIT(&pipeline->selectors);
10595 TAILQ_INIT(&pipeline->regarrays);
10596 TAILQ_INIT(&pipeline->meter_profiles);
10597 TAILQ_INIT(&pipeline->metarrays);
10599 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
10600 pipeline->numa_node = numa_node;
10607 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
10612 free(p->instructions);
10616 table_state_free(p);
10622 extern_func_free(p);
10623 extern_obj_free(p);
10632 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
10633 const char **instructions,
10634 uint32_t n_instructions)
10639 err = instruction_config(p, NULL, instructions, n_instructions);
10643 /* Thread instruction pointer reset. */
10644 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
10645 struct thread *t = &p->threads[i];
10647 thread_ip_reset(p, t);
10654 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
10659 CHECK(p->build_done == 0, EEXIST);
10661 status = port_in_build(p);
10665 status = port_out_build(p);
10669 status = struct_build(p);
10673 status = extern_obj_build(p);
10677 status = extern_func_build(p);
10681 status = header_build(p);
10685 status = metadata_build(p);
10689 status = action_build(p);
10693 status = table_build(p);
10697 status = selector_build(p);
10701 status = table_state_build(p);
10705 status = regarray_build(p);
10709 status = metarray_build(p);
10717 metarray_build_free(p);
10718 regarray_build_free(p);
10719 table_state_build_free(p);
10720 selector_build_free(p);
10721 table_build_free(p);
10722 action_build_free(p);
10723 metadata_build_free(p);
10724 header_build_free(p);
10725 extern_func_build_free(p);
10726 extern_obj_build_free(p);
10727 port_out_build_free(p);
10728 port_in_build_free(p);
10729 struct_build_free(p);
10735 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
10739 for (i = 0; i < n_instructions; i++)
10744 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
10748 for (i = 0; i < p->n_ports_out; i++) {
10749 struct port_out_runtime *port = &p->out[i];
10752 port->flush(port->obj);
10760 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
10761 struct rte_swx_ctl_pipeline_info *pipeline)
10763 struct action *action;
10764 struct table *table;
10765 uint32_t n_actions = 0, n_tables = 0;
10767 if (!p || !pipeline)
10770 TAILQ_FOREACH(action, &p->actions, node)
10773 TAILQ_FOREACH(table, &p->tables, node)
10776 pipeline->n_ports_in = p->n_ports_in;
10777 pipeline->n_ports_out = p->n_ports_out;
10778 pipeline->n_actions = n_actions;
10779 pipeline->n_tables = n_tables;
10780 pipeline->n_selectors = p->n_selectors;
10781 pipeline->n_regarrays = p->n_regarrays;
10782 pipeline->n_metarrays = p->n_metarrays;
10788 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
10790 if (!p || !numa_node)
10793 *numa_node = p->numa_node;
10798 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
10799 uint32_t action_id,
10800 struct rte_swx_ctl_action_info *action)
10802 struct action *a = NULL;
10804 if (!p || (action_id >= p->n_actions) || !action)
10807 a = action_find_by_id(p, action_id);
10811 strcpy(action->name, a->name);
10812 action->n_args = a->st ? a->st->n_fields : 0;
10817 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
10818 uint32_t action_id,
10819 uint32_t action_arg_id,
10820 struct rte_swx_ctl_action_arg_info *action_arg)
10822 struct action *a = NULL;
10823 struct field *arg = NULL;
10825 if (!p || (action_id >= p->n_actions) || !action_arg)
10828 a = action_find_by_id(p, action_id);
10829 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
10832 arg = &a->st->fields[action_arg_id];
10833 strcpy(action_arg->name, arg->name);
10834 action_arg->n_bits = arg->n_bits;
10835 action_arg->is_network_byte_order = a->args_endianness[action_arg_id];
10841 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
10843 struct rte_swx_ctl_table_info *table)
10845 struct table *t = NULL;
10850 t = table_find_by_id(p, table_id);
10854 strcpy(table->name, t->name);
10855 strcpy(table->args, t->args);
10856 table->n_match_fields = t->n_fields;
10857 table->n_actions = t->n_actions;
10858 table->default_action_is_const = t->default_action_is_const;
10859 table->size = t->size;
10864 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
10866 uint32_t match_field_id,
10867 struct rte_swx_ctl_table_match_field_info *match_field)
10870 struct match_field *f;
10872 if (!p || (table_id >= p->n_tables) || !match_field)
10875 t = table_find_by_id(p, table_id);
10876 if (!t || (match_field_id >= t->n_fields))
10879 f = &t->fields[match_field_id];
10880 match_field->match_type = f->match_type;
10881 match_field->is_header = t->header ? 1 : 0;
10882 match_field->n_bits = f->field->n_bits;
10883 match_field->offset = f->field->offset;
10889 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
10891 uint32_t table_action_id,
10892 struct rte_swx_ctl_table_action_info *table_action)
10896 if (!p || (table_id >= p->n_tables) || !table_action)
10899 t = table_find_by_id(p, table_id);
10900 if (!t || (table_action_id >= t->n_actions))
10903 table_action->action_id = t->actions[table_action_id]->id;
10909 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
10911 struct rte_swx_table_ops *table_ops,
10916 if (!p || (table_id >= p->n_tables))
10919 t = table_find_by_id(p, table_id);
10925 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
10935 rte_swx_ctl_selector_info_get(struct rte_swx_pipeline *p,
10936 uint32_t selector_id,
10937 struct rte_swx_ctl_selector_info *selector)
10939 struct selector *s = NULL;
10941 if (!p || !selector)
10944 s = selector_find_by_id(p, selector_id);
10948 strcpy(selector->name, s->name);
10950 selector->n_selector_fields = s->n_selector_fields;
10951 selector->n_groups_max = s->n_groups_max;
10952 selector->n_members_per_group_max = s->n_members_per_group_max;
10958 rte_swx_ctl_selector_group_id_field_info_get(struct rte_swx_pipeline *p,
10959 uint32_t selector_id,
10960 struct rte_swx_ctl_table_match_field_info *field)
10962 struct selector *s;
10964 if (!p || (selector_id >= p->n_selectors) || !field)
10967 s = selector_find_by_id(p, selector_id);
10971 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10972 field->is_header = 0;
10973 field->n_bits = s->group_id_field->n_bits;
10974 field->offset = s->group_id_field->offset;
10980 rte_swx_ctl_selector_field_info_get(struct rte_swx_pipeline *p,
10981 uint32_t selector_id,
10982 uint32_t selector_field_id,
10983 struct rte_swx_ctl_table_match_field_info *field)
10985 struct selector *s;
10988 if (!p || (selector_id >= p->n_selectors) || !field)
10991 s = selector_find_by_id(p, selector_id);
10992 if (!s || (selector_field_id >= s->n_selector_fields))
10995 f = s->selector_fields[selector_field_id];
10996 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
10997 field->is_header = s->selector_header ? 1 : 0;
10998 field->n_bits = f->n_bits;
10999 field->offset = f->offset;
11005 rte_swx_ctl_selector_member_id_field_info_get(struct rte_swx_pipeline *p,
11006 uint32_t selector_id,
11007 struct rte_swx_ctl_table_match_field_info *field)
11009 struct selector *s;
11011 if (!p || (selector_id >= p->n_selectors) || !field)
11014 s = selector_find_by_id(p, selector_id);
11018 field->match_type = RTE_SWX_TABLE_MATCH_EXACT;
11019 field->is_header = 0;
11020 field->n_bits = s->member_id_field->n_bits;
11021 field->offset = s->member_id_field->offset;
11027 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
11028 struct rte_swx_table_state **table_state)
11030 if (!p || !table_state || !p->build_done)
11033 *table_state = p->table_state;
11038 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
11039 struct rte_swx_table_state *table_state)
11041 if (!p || !table_state || !p->build_done)
11044 p->table_state = table_state;
11049 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
11051 struct rte_swx_port_in_stats *stats)
11053 struct port_in *port;
11058 port = port_in_find(p, port_id);
11062 port->type->ops.stats_read(port->obj, stats);
11067 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
11069 struct rte_swx_port_out_stats *stats)
11071 struct port_out *port;
11076 port = port_out_find(p, port_id);
11080 port->type->ops.stats_read(port->obj, stats);
11085 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p,
11086 const char *table_name,
11087 struct rte_swx_table_stats *stats)
11089 struct table *table;
11090 struct table_statistics *table_stats;
11092 if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action)
11095 table = table_find(p, table_name);
11099 table_stats = &p->table_stats[table->id];
11101 memcpy(stats->n_pkts_action,
11102 table_stats->n_pkts_action,
11103 p->n_actions * sizeof(uint64_t));
11105 stats->n_pkts_hit = table_stats->n_pkts_hit[1];
11106 stats->n_pkts_miss = table_stats->n_pkts_hit[0];
11112 rte_swx_ctl_pipeline_selector_stats_read(struct rte_swx_pipeline *p,
11113 const char *selector_name,
11114 struct rte_swx_pipeline_selector_stats *stats)
11116 struct selector *s;
11118 if (!p || !selector_name || !selector_name[0] || !stats)
11121 s = selector_find(p, selector_name);
11125 stats->n_pkts = p->selector_stats[s->id].n_pkts;
11131 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p,
11132 uint32_t regarray_id,
11133 struct rte_swx_ctl_regarray_info *regarray)
11135 struct regarray *r;
11137 if (!p || !regarray)
11140 r = regarray_find_by_id(p, regarray_id);
11144 strcpy(regarray->name, r->name);
11145 regarray->size = r->size;
11150 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p,
11151 const char *regarray_name,
11152 uint32_t regarray_index,
11155 struct regarray *regarray;
11156 struct regarray_runtime *r;
11158 if (!p || !regarray_name || !value)
11161 regarray = regarray_find(p, regarray_name);
11162 if (!regarray || (regarray_index >= regarray->size))
11165 r = &p->regarray_runtime[regarray->id];
11166 *value = r->regarray[regarray_index];
11171 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p,
11172 const char *regarray_name,
11173 uint32_t regarray_index,
11176 struct regarray *regarray;
11177 struct regarray_runtime *r;
11179 if (!p || !regarray_name)
11182 regarray = regarray_find(p, regarray_name);
11183 if (!regarray || (regarray_index >= regarray->size))
11186 r = &p->regarray_runtime[regarray->id];
11187 r->regarray[regarray_index] = value;
11192 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p,
11193 uint32_t metarray_id,
11194 struct rte_swx_ctl_metarray_info *metarray)
11196 struct metarray *m;
11198 if (!p || !metarray)
11201 m = metarray_find_by_id(p, metarray_id);
11205 strcpy(metarray->name, m->name);
11206 metarray->size = m->size;
11211 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p,
11213 struct rte_meter_trtcm_params *params)
11215 struct meter_profile *mp;
11219 CHECK_NAME(name, EINVAL);
11220 CHECK(params, EINVAL);
11221 CHECK(!meter_profile_find(p, name), EEXIST);
11223 /* Node allocation. */
11224 mp = calloc(1, sizeof(struct meter_profile));
11227 /* Node initialization. */
11228 strcpy(mp->name, name);
11229 memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params));
11230 status = rte_meter_trtcm_profile_config(&mp->profile, params);
11236 /* Node add to tailq. */
11237 TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node);
11243 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p,
11246 struct meter_profile *mp;
11249 CHECK_NAME(name, EINVAL);
11251 mp = meter_profile_find(p, name);
11253 CHECK(!mp->n_users, EBUSY);
11255 /* Remove node from tailq. */
11256 TAILQ_REMOVE(&p->meter_profiles, mp, node);
11263 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p,
11264 const char *metarray_name,
11265 uint32_t metarray_index)
11267 struct meter_profile *mp_old;
11268 struct metarray *metarray;
11269 struct metarray_runtime *metarray_runtime;
11273 CHECK_NAME(metarray_name, EINVAL);
11275 metarray = metarray_find(p, metarray_name);
11276 CHECK(metarray, EINVAL);
11277 CHECK(metarray_index < metarray->size, EINVAL);
11279 metarray_runtime = &p->metarray_runtime[metarray->id];
11280 m = &metarray_runtime->metarray[metarray_index];
11281 mp_old = m->profile;
11291 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p,
11292 const char *metarray_name,
11293 uint32_t metarray_index,
11294 const char *profile_name)
11296 struct meter_profile *mp, *mp_old;
11297 struct metarray *metarray;
11298 struct metarray_runtime *metarray_runtime;
11302 CHECK_NAME(metarray_name, EINVAL);
11304 metarray = metarray_find(p, metarray_name);
11305 CHECK(metarray, EINVAL);
11306 CHECK(metarray_index < metarray->size, EINVAL);
11308 mp = meter_profile_find(p, profile_name);
11311 metarray_runtime = &p->metarray_runtime[metarray->id];
11312 m = &metarray_runtime->metarray[metarray_index];
11313 mp_old = m->profile;
11315 memset(m, 0, sizeof(struct meter));
11316 rte_meter_trtcm_config(&m->m, &mp->profile);
11318 m->color_mask = RTE_COLORS;
11327 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p,
11328 const char *metarray_name,
11329 uint32_t metarray_index,
11330 struct rte_swx_ctl_meter_stats *stats)
11332 struct metarray *metarray;
11333 struct metarray_runtime *metarray_runtime;
11337 CHECK_NAME(metarray_name, EINVAL);
11339 metarray = metarray_find(p, metarray_name);
11340 CHECK(metarray, EINVAL);
11341 CHECK(metarray_index < metarray->size, EINVAL);
11343 CHECK(stats, EINVAL);
11345 metarray_runtime = &p->metarray_runtime[metarray->id];
11346 m = &metarray_runtime->metarray[metarray_index];
11348 memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts));
11349 memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes));