1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_prefetch.h>
15 #include <rte_meter.h>
17 #include <rte_swx_table_selector.h>
18 #include <rte_swx_table_learner.h>
19 #include <rte_swx_pipeline.h>
20 #include <rte_swx_ctl.h>
27 #define TRACE(...) printf(__VA_ARGS__)
35 #define ntoh64(x) rte_be_to_cpu_64(x)
36 #define hton64(x) rte_cpu_to_be_64(x)
42 char name[RTE_SWX_NAME_SIZE];
49 TAILQ_ENTRY(struct_type) node;
50 char name[RTE_SWX_NAME_SIZE];
58 TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
65 char name[RTE_SWX_NAME_SIZE];
66 struct rte_swx_port_in_ops ops;
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78 TAILQ_HEAD(port_in_tailq, port_in);
80 struct port_in_runtime {
81 rte_swx_port_in_pkt_rx_t pkt_rx;
88 struct port_out_type {
89 TAILQ_ENTRY(port_out_type) node;
90 char name[RTE_SWX_NAME_SIZE];
91 struct rte_swx_port_out_ops ops;
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103 TAILQ_HEAD(port_out_tailq, port_out);
105 struct port_out_runtime {
106 rte_swx_port_out_pkt_tx_t pkt_tx;
107 rte_swx_port_out_flush_t flush;
114 struct extern_type_member_func {
115 TAILQ_ENTRY(extern_type_member_func) node;
116 char name[RTE_SWX_NAME_SIZE];
117 rte_swx_extern_type_member_func_t func;
121 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
124 TAILQ_ENTRY(extern_type) node;
125 char name[RTE_SWX_NAME_SIZE];
126 struct struct_type *mailbox_struct_type;
127 rte_swx_extern_type_constructor_t constructor;
128 rte_swx_extern_type_destructor_t destructor;
129 struct extern_type_member_func_tailq funcs;
133 TAILQ_HEAD(extern_type_tailq, extern_type);
136 TAILQ_ENTRY(extern_obj) node;
137 char name[RTE_SWX_NAME_SIZE];
138 struct extern_type *type;
144 TAILQ_HEAD(extern_obj_tailq, extern_obj);
146 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
147 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
150 struct extern_obj_runtime {
153 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
160 TAILQ_ENTRY(extern_func) node;
161 char name[RTE_SWX_NAME_SIZE];
162 struct struct_type *mailbox_struct_type;
163 rte_swx_extern_func_t func;
168 TAILQ_HEAD(extern_func_tailq, extern_func);
170 struct extern_func_runtime {
172 rte_swx_extern_func_t func;
179 TAILQ_ENTRY(header) node;
180 char name[RTE_SWX_NAME_SIZE];
181 struct struct_type *st;
186 TAILQ_HEAD(header_tailq, header);
188 struct header_runtime {
193 struct header_out_runtime {
203 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
204 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
205 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
206 * when transferred to packet meta-data and in NBO when transferred to packet
210 /* Notation conventions:
211 * -Header field: H = h.header.field (dst/src)
212 * -Meta-data field: M = m.field (dst/src)
213 * -Extern object mailbox field: E = e.field (dst/src)
214 * -Extern function mailbox field: F = f.field (dst/src)
215 * -Table action data field: T = t.field (src only)
216 * -Immediate value: I = 32-bit unsigned value (src only)
219 enum instruction_type {
226 INSTR_TX, /* port_out = M */
227 INSTR_TX_I, /* port_out = I */
229 /* extract h.header */
239 /* extract h.header m.last_field_size */
242 /* lookahead h.header */
256 /* validate h.header */
259 /* invalidate h.header */
260 INSTR_HDR_INVALIDATE,
264 * dst = HMEF, src = HMEFTI
266 INSTR_MOV, /* dst = MEF, src = MEFT */
267 INSTR_MOV_MH, /* dst = MEF, src = H */
268 INSTR_MOV_HM, /* dst = H, src = MEFT */
269 INSTR_MOV_HH, /* dst = H, src = H */
270 INSTR_MOV_I, /* dst = HMEF, src = I */
272 /* dma h.header t.field
273 * memcpy(h.header, t.field, sizeof(h.header))
286 * dst = HMEF, src = HMEFTI
288 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
289 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
290 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
291 INSTR_ALU_ADD_HH, /* dst = H, src = H */
292 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
293 INSTR_ALU_ADD_HI, /* dst = H, src = I */
297 * dst = HMEF, src = HMEFTI
299 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
300 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
301 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
302 INSTR_ALU_SUB_HH, /* dst = H, src = H */
303 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
304 INSTR_ALU_SUB_HI, /* dst = H, src = I */
307 * dst = dst '+ src[0:1] '+ src[2:3] + ...
308 * dst = H, src = {H, h.header}
310 INSTR_ALU_CKADD_FIELD, /* src = H */
311 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
312 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
318 INSTR_ALU_CKSUB_FIELD,
322 * dst = HMEF, src = HMEFTI
324 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
325 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
326 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
327 INSTR_ALU_AND_HH, /* dst = H, src = H */
328 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
332 * dst = HMEF, src = HMEFTI
334 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
335 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
336 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
337 INSTR_ALU_OR_HH, /* dst = H, src = H */
338 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
342 * dst = HMEF, src = HMEFTI
344 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
345 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
346 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
347 INSTR_ALU_XOR_HH, /* dst = H, src = H */
348 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
352 * dst = HMEF, src = HMEFTI
354 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
355 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
356 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
357 INSTR_ALU_SHL_HH, /* dst = H, src = H */
358 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
359 INSTR_ALU_SHL_HI, /* dst = H, src = I */
363 * dst = HMEF, src = HMEFTI
365 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
366 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
367 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
368 INSTR_ALU_SHR_HH, /* dst = H, src = H */
369 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
370 INSTR_ALU_SHR_HI, /* dst = H, src = I */
372 /* regprefetch REGARRAY index
373 * prefetch REGARRAY[index]
376 INSTR_REGPREFETCH_RH, /* index = H */
377 INSTR_REGPREFETCH_RM, /* index = MEFT */
378 INSTR_REGPREFETCH_RI, /* index = I */
380 /* regrd dst REGARRAY index
381 * dst = REGARRAY[index]
382 * dst = HMEF, index = HMEFTI
384 INSTR_REGRD_HRH, /* dst = H, index = H */
385 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
386 INSTR_REGRD_HRI, /* dst = H, index = I */
387 INSTR_REGRD_MRH, /* dst = MEF, index = H */
388 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
389 INSTR_REGRD_MRI, /* dst = MEF, index = I */
391 /* regwr REGARRAY index src
392 * REGARRAY[index] = src
393 * index = HMEFTI, src = HMEFTI
395 INSTR_REGWR_RHH, /* index = H, src = H */
396 INSTR_REGWR_RHM, /* index = H, src = MEFT */
397 INSTR_REGWR_RHI, /* index = H, src = I */
398 INSTR_REGWR_RMH, /* index = MEFT, src = H */
399 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
400 INSTR_REGWR_RMI, /* index = MEFT, src = I */
401 INSTR_REGWR_RIH, /* index = I, src = H */
402 INSTR_REGWR_RIM, /* index = I, src = MEFT */
403 INSTR_REGWR_RII, /* index = I, src = I */
405 /* regadd REGARRAY index src
406 * REGARRAY[index] += src
407 * index = HMEFTI, src = HMEFTI
409 INSTR_REGADD_RHH, /* index = H, src = H */
410 INSTR_REGADD_RHM, /* index = H, src = MEFT */
411 INSTR_REGADD_RHI, /* index = H, src = I */
412 INSTR_REGADD_RMH, /* index = MEFT, src = H */
413 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
414 INSTR_REGADD_RMI, /* index = MEFT, src = I */
415 INSTR_REGADD_RIH, /* index = I, src = H */
416 INSTR_REGADD_RIM, /* index = I, src = MEFT */
417 INSTR_REGADD_RII, /* index = I, src = I */
419 /* metprefetch METARRAY index
420 * prefetch METARRAY[index]
423 INSTR_METPREFETCH_H, /* index = H */
424 INSTR_METPREFETCH_M, /* index = MEFT */
425 INSTR_METPREFETCH_I, /* index = I */
427 /* meter METARRAY index length color_in color_out
428 * color_out = meter(METARRAY[index], length, color_in)
429 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
431 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
432 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
433 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
434 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
435 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
436 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
437 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
438 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
439 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
440 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
441 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
442 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
449 /* learn LEARNER ACTION_NAME */
453 INSTR_LEARNER_FORGET,
455 /* extern e.obj.func */
466 /* jmpv LABEL h.header
467 * Jump if header is valid
471 /* jmpnv LABEL h.header
472 * Jump if header is invalid
477 * Jump if table lookup hit
482 * Jump if table lookup miss
489 INSTR_JMP_ACTION_HIT,
491 /* jmpna LABEL ACTION
492 * Jump if action not run
494 INSTR_JMP_ACTION_MISS,
497 * Jump if a is equal to b
498 * a = HMEFT, b = HMEFTI
500 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
501 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
502 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
503 INSTR_JMP_EQ_HH, /* a = H, b = H */
504 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
507 * Jump if a is not equal to b
508 * a = HMEFT, b = HMEFTI
510 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
511 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
512 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
513 INSTR_JMP_NEQ_HH, /* a = H, b = H */
514 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
517 * Jump if a is less than b
518 * a = HMEFT, b = HMEFTI
520 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
521 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
522 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
523 INSTR_JMP_LT_HH, /* a = H, b = H */
524 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
525 INSTR_JMP_LT_HI, /* a = H, b = I */
528 * Jump if a is greater than b
529 * a = HMEFT, b = HMEFTI
531 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
532 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
533 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
534 INSTR_JMP_GT_HH, /* a = H, b = H */
535 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
536 INSTR_JMP_GT_HI, /* a = H, b = I */
544 struct instr_operand {
565 uint8_t header_id[8];
566 uint8_t struct_id[8];
571 struct instr_hdr_validity {
583 struct instr_extern_obj {
588 struct instr_extern_func {
592 struct instr_dst_src {
593 struct instr_operand dst;
595 struct instr_operand src;
600 struct instr_regarray {
605 struct instr_operand idx;
610 struct instr_operand dstsrc;
620 struct instr_operand idx;
624 struct instr_operand length;
627 struct instr_operand color_in;
628 uint32_t color_in_val;
631 struct instr_operand color_out;
636 uint8_t header_id[8];
637 uint8_t struct_id[8];
648 struct instruction *ip;
651 struct instr_operand a;
657 struct instr_operand b;
663 enum instruction_type type;
666 struct instr_hdr_validity valid;
667 struct instr_dst_src mov;
668 struct instr_regarray regarray;
669 struct instr_meter meter;
670 struct instr_dma dma;
671 struct instr_dst_src alu;
672 struct instr_table table;
673 struct instr_learn learn;
674 struct instr_extern_obj ext_obj;
675 struct instr_extern_func ext_func;
676 struct instr_jmp jmp;
680 struct instruction_data {
681 char label[RTE_SWX_NAME_SIZE];
682 char jmp_label[RTE_SWX_NAME_SIZE];
683 uint32_t n_users; /* user = jmp instruction to this instruction. */
691 TAILQ_ENTRY(action) node;
692 char name[RTE_SWX_NAME_SIZE];
693 struct struct_type *st;
694 int *args_endianness; /* 0 = Host Byte Order (HBO); 1 = Network Byte Order (NBO). */
695 struct instruction *instructions;
696 uint32_t n_instructions;
700 TAILQ_HEAD(action_tailq, action);
706 TAILQ_ENTRY(table_type) node;
707 char name[RTE_SWX_NAME_SIZE];
708 enum rte_swx_table_match_type match_type;
709 struct rte_swx_table_ops ops;
712 TAILQ_HEAD(table_type_tailq, table_type);
715 enum rte_swx_table_match_type match_type;
720 TAILQ_ENTRY(table) node;
721 char name[RTE_SWX_NAME_SIZE];
722 char args[RTE_SWX_NAME_SIZE];
723 struct table_type *type; /* NULL when n_fields == 0. */
726 struct match_field *fields;
728 struct header *header; /* Only valid when n_fields > 0. */
731 struct action **actions;
732 struct action *default_action;
733 uint8_t *default_action_data;
735 int default_action_is_const;
736 uint32_t action_data_size_max;
742 TAILQ_HEAD(table_tailq, table);
744 struct table_runtime {
745 rte_swx_table_lookup_t func;
750 struct table_statistics {
751 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
752 uint64_t *n_pkts_action;
759 TAILQ_ENTRY(selector) node;
760 char name[RTE_SWX_NAME_SIZE];
762 struct field *group_id_field;
763 struct field **selector_fields;
764 uint32_t n_selector_fields;
765 struct header *selector_header;
766 struct field *member_id_field;
768 uint32_t n_groups_max;
769 uint32_t n_members_per_group_max;
774 TAILQ_HEAD(selector_tailq, selector);
776 struct selector_runtime {
778 uint8_t **group_id_buffer;
779 uint8_t **selector_buffer;
780 uint8_t **member_id_buffer;
783 struct selector_statistics {
791 TAILQ_ENTRY(learner) node;
792 char name[RTE_SWX_NAME_SIZE];
795 struct field **fields;
797 struct header *header;
800 struct action **actions;
801 struct field **action_arg;
802 struct action *default_action;
803 uint8_t *default_action_data;
805 int default_action_is_const;
806 uint32_t action_data_size_max;
813 TAILQ_HEAD(learner_tailq, learner);
815 struct learner_runtime {
818 uint8_t **action_data;
821 struct learner_statistics {
822 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
823 uint64_t n_pkts_learn[2]; /* 0 = Learn OK, 1 = Learn error. */
824 uint64_t n_pkts_forget;
825 uint64_t *n_pkts_action;
832 TAILQ_ENTRY(regarray) node;
833 char name[RTE_SWX_NAME_SIZE];
839 TAILQ_HEAD(regarray_tailq, regarray);
841 struct regarray_runtime {
849 struct meter_profile {
850 TAILQ_ENTRY(meter_profile) node;
851 char name[RTE_SWX_NAME_SIZE];
852 struct rte_meter_trtcm_params params;
853 struct rte_meter_trtcm_profile profile;
857 TAILQ_HEAD(meter_profile_tailq, meter_profile);
860 TAILQ_ENTRY(metarray) node;
861 char name[RTE_SWX_NAME_SIZE];
866 TAILQ_HEAD(metarray_tailq, metarray);
869 struct rte_meter_trtcm m;
870 struct meter_profile *profile;
871 enum rte_color color_mask;
874 uint64_t n_pkts[RTE_COLORS];
875 uint64_t n_bytes[RTE_COLORS];
878 struct metarray_runtime {
879 struct meter *metarray;
888 struct rte_swx_pkt pkt;
894 /* Packet headers. */
895 struct header_runtime *headers; /* Extracted or generated headers. */
896 struct header_out_runtime *headers_out; /* Emitted headers. */
897 uint8_t *header_storage;
898 uint8_t *header_out_storage;
899 uint64_t valid_headers;
900 uint32_t n_headers_out;
902 /* Packet meta-data. */
906 struct table_runtime *tables;
907 struct selector_runtime *selectors;
908 struct learner_runtime *learners;
909 struct rte_swx_table_state *table_state;
911 int hit; /* 0 = Miss, 1 = Hit. */
915 /* Extern objects and functions. */
916 struct extern_obj_runtime *extern_objs;
917 struct extern_func_runtime *extern_funcs;
920 struct instruction *ip;
921 struct instruction *ret;
924 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
925 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
926 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
928 #define HEADER_VALID(thread, header_id) \
929 MASK64_BIT_GET((thread)->valid_headers, header_id)
931 #define ALU(thread, ip, operator) \
933 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
934 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
935 uint64_t dst64 = *dst64_ptr; \
936 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
937 uint64_t dst = dst64 & dst64_mask; \
939 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
940 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
941 uint64_t src64 = *src64_ptr; \
942 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
943 uint64_t src = src64 & src64_mask; \
945 uint64_t result = dst operator src; \
947 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
950 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
952 #define ALU_MH(thread, ip, operator) \
954 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
955 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
956 uint64_t dst64 = *dst64_ptr; \
957 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
958 uint64_t dst = dst64 & dst64_mask; \
960 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
961 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
962 uint64_t src64 = *src64_ptr; \
963 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
965 uint64_t result = dst operator src; \
967 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
970 #define ALU_HM(thread, ip, operator) \
972 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
973 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
974 uint64_t dst64 = *dst64_ptr; \
975 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
976 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
978 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
979 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
980 uint64_t src64 = *src64_ptr; \
981 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
982 uint64_t src = src64 & src64_mask; \
984 uint64_t result = dst operator src; \
985 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
987 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
990 #define ALU_HM_FAST(thread, ip, operator) \
992 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
993 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
994 uint64_t dst64 = *dst64_ptr; \
995 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
996 uint64_t dst = dst64 & dst64_mask; \
998 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
999 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1000 uint64_t src64 = *src64_ptr; \
1001 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1002 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1004 uint64_t result = dst operator src; \
1006 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1009 #define ALU_HH(thread, ip, operator) \
1011 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1012 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1013 uint64_t dst64 = *dst64_ptr; \
1014 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1015 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1017 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1018 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1019 uint64_t src64 = *src64_ptr; \
1020 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1022 uint64_t result = dst operator src; \
1023 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1025 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1028 #define ALU_HH_FAST(thread, ip, operator) \
1030 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1031 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1032 uint64_t dst64 = *dst64_ptr; \
1033 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1034 uint64_t dst = dst64 & dst64_mask; \
1036 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1037 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1038 uint64_t src64 = *src64_ptr; \
1039 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1041 uint64_t result = dst operator src; \
1043 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1050 #define ALU_HM_FAST ALU
1052 #define ALU_HH_FAST ALU
1056 #define ALU_I(thread, ip, operator) \
1058 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1059 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1060 uint64_t dst64 = *dst64_ptr; \
1061 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1062 uint64_t dst = dst64 & dst64_mask; \
1064 uint64_t src = (ip)->alu.src_val; \
1066 uint64_t result = dst operator src; \
1068 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1071 #define ALU_MI ALU_I
1073 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1075 #define ALU_HI(thread, ip, operator) \
1077 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1078 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1079 uint64_t dst64 = *dst64_ptr; \
1080 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1081 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1083 uint64_t src = (ip)->alu.src_val; \
1085 uint64_t result = dst operator src; \
1086 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1088 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1093 #define ALU_HI ALU_I
1097 #define MOV(thread, ip) \
1099 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1100 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1101 uint64_t dst64 = *dst64_ptr; \
1102 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1104 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1105 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1106 uint64_t src64 = *src64_ptr; \
1107 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1108 uint64_t src = src64 & src64_mask; \
1110 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1113 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1115 #define MOV_MH(thread, ip) \
1117 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1118 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1119 uint64_t dst64 = *dst64_ptr; \
1120 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1122 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1123 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1124 uint64_t src64 = *src64_ptr; \
1125 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1127 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1130 #define MOV_HM(thread, ip) \
1132 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1133 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1134 uint64_t dst64 = *dst64_ptr; \
1135 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1137 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1138 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1139 uint64_t src64 = *src64_ptr; \
1140 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1141 uint64_t src = src64 & src64_mask; \
1143 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1144 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1147 #define MOV_HH(thread, ip) \
1149 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1150 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1151 uint64_t dst64 = *dst64_ptr; \
1152 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1154 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1155 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1156 uint64_t src64 = *src64_ptr; \
1158 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1159 src = src >> (64 - (ip)->mov.dst.n_bits); \
1160 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1171 #define MOV_I(thread, ip) \
1173 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1174 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1175 uint64_t dst64 = *dst64_ptr; \
1176 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1178 uint64_t src = (ip)->mov.src_val; \
1180 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1183 #define JMP_CMP(thread, ip, operator) \
1185 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1186 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1187 uint64_t a64 = *a64_ptr; \
1188 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1189 uint64_t a = a64 & a64_mask; \
1191 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1192 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1193 uint64_t b64 = *b64_ptr; \
1194 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1195 uint64_t b = b64 & b64_mask; \
1197 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1200 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1202 #define JMP_CMP_MH(thread, ip, operator) \
1204 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1205 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1206 uint64_t a64 = *a64_ptr; \
1207 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1208 uint64_t a = a64 & a64_mask; \
1210 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1211 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1212 uint64_t b64 = *b64_ptr; \
1213 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1215 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1218 #define JMP_CMP_HM(thread, ip, operator) \
1220 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1221 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1222 uint64_t a64 = *a64_ptr; \
1223 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1225 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1226 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1227 uint64_t b64 = *b64_ptr; \
1228 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1229 uint64_t b = b64 & b64_mask; \
1231 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1234 #define JMP_CMP_HH(thread, ip, operator) \
1236 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1237 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1238 uint64_t a64 = *a64_ptr; \
1239 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1241 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1242 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1243 uint64_t b64 = *b64_ptr; \
1244 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1246 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1249 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1251 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1252 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1253 uint64_t a64 = *a64_ptr; \
1254 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1256 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1257 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1258 uint64_t b64 = *b64_ptr; \
1259 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1261 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1266 #define JMP_CMP_MH JMP_CMP
1267 #define JMP_CMP_HM JMP_CMP
1268 #define JMP_CMP_HH JMP_CMP
1269 #define JMP_CMP_HH_FAST JMP_CMP
1273 #define JMP_CMP_I(thread, ip, operator) \
1275 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1276 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1277 uint64_t a64 = *a64_ptr; \
1278 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1279 uint64_t a = a64 & a64_mask; \
1281 uint64_t b = (ip)->jmp.b_val; \
1283 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1286 #define JMP_CMP_MI JMP_CMP_I
1288 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1290 #define JMP_CMP_HI(thread, ip, operator) \
1292 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1293 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1294 uint64_t a64 = *a64_ptr; \
1295 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1297 uint64_t b = (ip)->jmp.b_val; \
1299 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1304 #define JMP_CMP_HI JMP_CMP_I
1308 #define METADATA_READ(thread, offset, n_bits) \
1310 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1311 uint64_t m64 = *m64_ptr; \
1312 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1316 #define METADATA_WRITE(thread, offset, n_bits, value) \
1318 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1319 uint64_t m64 = *m64_ptr; \
1320 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1322 uint64_t m_new = value; \
1324 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1327 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1328 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1331 struct rte_swx_pipeline {
1332 struct struct_type_tailq struct_types;
1333 struct port_in_type_tailq port_in_types;
1334 struct port_in_tailq ports_in;
1335 struct port_out_type_tailq port_out_types;
1336 struct port_out_tailq ports_out;
1337 struct extern_type_tailq extern_types;
1338 struct extern_obj_tailq extern_objs;
1339 struct extern_func_tailq extern_funcs;
1340 struct header_tailq headers;
1341 struct struct_type *metadata_st;
1342 uint32_t metadata_struct_id;
1343 struct action_tailq actions;
1344 struct table_type_tailq table_types;
1345 struct table_tailq tables;
1346 struct selector_tailq selectors;
1347 struct learner_tailq learners;
1348 struct regarray_tailq regarrays;
1349 struct meter_profile_tailq meter_profiles;
1350 struct metarray_tailq metarrays;
1352 struct port_in_runtime *in;
1353 struct port_out_runtime *out;
1354 struct instruction **action_instructions;
1355 struct rte_swx_table_state *table_state;
1356 struct table_statistics *table_stats;
1357 struct selector_statistics *selector_stats;
1358 struct learner_statistics *learner_stats;
1359 struct regarray_runtime *regarray_runtime;
1360 struct metarray_runtime *metarray_runtime;
1361 struct instruction *instructions;
1362 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1365 uint32_t n_ports_in;
1366 uint32_t n_ports_out;
1367 uint32_t n_extern_objs;
1368 uint32_t n_extern_funcs;
1371 uint32_t n_selectors;
1372 uint32_t n_learners;
1373 uint32_t n_regarrays;
1374 uint32_t n_metarrays;
1378 uint32_t n_instructions;