1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_prefetch.h>
15 #include <rte_meter.h>
17 #include <rte_swx_table_selector.h>
18 #include <rte_swx_table_learner.h>
19 #include <rte_swx_pipeline.h>
20 #include <rte_swx_ctl.h>
27 #define TRACE(...) printf(__VA_ARGS__)
35 #define ntoh64(x) rte_be_to_cpu_64(x)
36 #define hton64(x) rte_cpu_to_be_64(x)
42 char name[RTE_SWX_NAME_SIZE];
49 TAILQ_ENTRY(struct_type) node;
50 char name[RTE_SWX_NAME_SIZE];
58 TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
65 char name[RTE_SWX_NAME_SIZE];
66 struct rte_swx_port_in_ops ops;
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78 TAILQ_HEAD(port_in_tailq, port_in);
80 struct port_in_runtime {
81 rte_swx_port_in_pkt_rx_t pkt_rx;
88 struct port_out_type {
89 TAILQ_ENTRY(port_out_type) node;
90 char name[RTE_SWX_NAME_SIZE];
91 struct rte_swx_port_out_ops ops;
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103 TAILQ_HEAD(port_out_tailq, port_out);
105 struct port_out_runtime {
106 rte_swx_port_out_pkt_tx_t pkt_tx;
107 rte_swx_port_out_pkt_fast_clone_tx_t pkt_fast_clone_tx;
108 rte_swx_port_out_pkt_clone_tx_t pkt_clone_tx;
109 rte_swx_port_out_flush_t flush;
116 struct mirroring_session {
119 uint32_t truncation_length;
125 struct extern_type_member_func {
126 TAILQ_ENTRY(extern_type_member_func) node;
127 char name[RTE_SWX_NAME_SIZE];
128 rte_swx_extern_type_member_func_t func;
132 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
135 TAILQ_ENTRY(extern_type) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct struct_type *mailbox_struct_type;
138 rte_swx_extern_type_constructor_t constructor;
139 rte_swx_extern_type_destructor_t destructor;
140 struct extern_type_member_func_tailq funcs;
144 TAILQ_HEAD(extern_type_tailq, extern_type);
147 TAILQ_ENTRY(extern_obj) node;
148 char name[RTE_SWX_NAME_SIZE];
149 struct extern_type *type;
155 TAILQ_HEAD(extern_obj_tailq, extern_obj);
157 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
158 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
161 struct extern_obj_runtime {
164 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
171 TAILQ_ENTRY(extern_func) node;
172 char name[RTE_SWX_NAME_SIZE];
173 struct struct_type *mailbox_struct_type;
174 rte_swx_extern_func_t func;
179 TAILQ_HEAD(extern_func_tailq, extern_func);
181 struct extern_func_runtime {
183 rte_swx_extern_func_t func;
190 TAILQ_ENTRY(header) node;
191 char name[RTE_SWX_NAME_SIZE];
192 struct struct_type *st;
197 TAILQ_HEAD(header_tailq, header);
199 struct header_runtime {
204 struct header_out_runtime {
214 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
215 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
216 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
217 * when transferred to packet meta-data and in NBO when transferred to packet
221 /* Notation conventions:
222 * -Header field: H = h.header.field (dst/src)
223 * -Meta-data field: M = m.field (dst/src)
224 * -Extern object mailbox field: E = e.field (dst/src)
225 * -Extern function mailbox field: F = f.field (dst/src)
226 * -Table action data field: T = t.field (src only)
227 * -Immediate value: I = 32-bit unsigned value (src only)
230 enum instruction_type {
237 INSTR_TX, /* port_out = M */
238 INSTR_TX_I, /* port_out = I */
242 * mirror slot_id session_id
252 /* recircid m.recirc_pass_id
253 * Read the internal recirculation pass ID into the specified meta-data field.
257 /* extract h.header */
267 /* extract h.header m.last_field_size */
270 /* lookahead h.header */
284 /* validate h.header */
287 /* invalidate h.header */
288 INSTR_HDR_INVALIDATE,
292 * dst = HMEF, src = HMEFTI
294 INSTR_MOV, /* dst = MEF, src = MEFT */
295 INSTR_MOV_MH, /* dst = MEF, src = H */
296 INSTR_MOV_HM, /* dst = H, src = MEFT */
297 INSTR_MOV_HH, /* dst = H, src = H */
298 INSTR_MOV_I, /* dst = HMEF, src = I */
300 /* dma h.header t.field
301 * memcpy(h.header, t.field, sizeof(h.header))
314 * dst = HMEF, src = HMEFTI
316 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
317 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
318 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
319 INSTR_ALU_ADD_HH, /* dst = H, src = H */
320 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
321 INSTR_ALU_ADD_HI, /* dst = H, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
328 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
329 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
330 INSTR_ALU_SUB_HH, /* dst = H, src = H */
331 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
332 INSTR_ALU_SUB_HI, /* dst = H, src = I */
335 * dst = dst '+ src[0:1] '+ src[2:3] '+ ...
336 * dst = H, src = {H, h.header}, '+ = 1's complement addition operator
338 INSTR_ALU_CKADD_FIELD, /* src = H */
339 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 bytes. */
340 INSTR_ALU_CKADD_STRUCT, /* src = h.header, with sizeof(header) any 4-byte multiple. */
344 * dst = H, src = H, '- = 1's complement subtraction operator
346 INSTR_ALU_CKSUB_FIELD,
350 * dst = HMEF, src = HMEFTI
352 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
353 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
354 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
355 INSTR_ALU_AND_HH, /* dst = H, src = H */
356 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
360 * dst = HMEF, src = HMEFTI
362 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
363 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
364 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
365 INSTR_ALU_OR_HH, /* dst = H, src = H */
366 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
370 * dst = HMEF, src = HMEFTI
372 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
373 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
374 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
375 INSTR_ALU_XOR_HH, /* dst = H, src = H */
376 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
380 * dst = HMEF, src = HMEFTI
382 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
383 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
384 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
385 INSTR_ALU_SHL_HH, /* dst = H, src = H */
386 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
387 INSTR_ALU_SHL_HI, /* dst = H, src = I */
391 * dst = HMEF, src = HMEFTI
393 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
394 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
395 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
396 INSTR_ALU_SHR_HH, /* dst = H, src = H */
397 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
398 INSTR_ALU_SHR_HI, /* dst = H, src = I */
400 /* regprefetch REGARRAY index
401 * prefetch REGARRAY[index]
404 INSTR_REGPREFETCH_RH, /* index = H */
405 INSTR_REGPREFETCH_RM, /* index = MEFT */
406 INSTR_REGPREFETCH_RI, /* index = I */
408 /* regrd dst REGARRAY index
409 * dst = REGARRAY[index]
410 * dst = HMEF, index = HMEFTI
412 INSTR_REGRD_HRH, /* dst = H, index = H */
413 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
414 INSTR_REGRD_HRI, /* dst = H, index = I */
415 INSTR_REGRD_MRH, /* dst = MEF, index = H */
416 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
417 INSTR_REGRD_MRI, /* dst = MEF, index = I */
419 /* regwr REGARRAY index src
420 * REGARRAY[index] = src
421 * index = HMEFTI, src = HMEFTI
423 INSTR_REGWR_RHH, /* index = H, src = H */
424 INSTR_REGWR_RHM, /* index = H, src = MEFT */
425 INSTR_REGWR_RHI, /* index = H, src = I */
426 INSTR_REGWR_RMH, /* index = MEFT, src = H */
427 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
428 INSTR_REGWR_RMI, /* index = MEFT, src = I */
429 INSTR_REGWR_RIH, /* index = I, src = H */
430 INSTR_REGWR_RIM, /* index = I, src = MEFT */
431 INSTR_REGWR_RII, /* index = I, src = I */
433 /* regadd REGARRAY index src
434 * REGARRAY[index] += src
435 * index = HMEFTI, src = HMEFTI
437 INSTR_REGADD_RHH, /* index = H, src = H */
438 INSTR_REGADD_RHM, /* index = H, src = MEFT */
439 INSTR_REGADD_RHI, /* index = H, src = I */
440 INSTR_REGADD_RMH, /* index = MEFT, src = H */
441 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
442 INSTR_REGADD_RMI, /* index = MEFT, src = I */
443 INSTR_REGADD_RIH, /* index = I, src = H */
444 INSTR_REGADD_RIM, /* index = I, src = MEFT */
445 INSTR_REGADD_RII, /* index = I, src = I */
447 /* metprefetch METARRAY index
448 * prefetch METARRAY[index]
451 INSTR_METPREFETCH_H, /* index = H */
452 INSTR_METPREFETCH_M, /* index = MEFT */
453 INSTR_METPREFETCH_I, /* index = I */
455 /* meter METARRAY index length color_in color_out
456 * color_out = meter(METARRAY[index], length, color_in)
457 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
459 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
460 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
461 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
462 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
463 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
464 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
465 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
466 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
467 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
468 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
469 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
470 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
479 /* learn LEARNER ACTION_NAME [ m.action_first_arg ] */
483 INSTR_LEARNER_FORGET,
485 /* extern e.obj.func */
496 /* jmpv LABEL h.header
497 * Jump if header is valid
501 /* jmpnv LABEL h.header
502 * Jump if header is invalid
507 * Jump if table lookup hit
512 * Jump if table lookup miss
519 INSTR_JMP_ACTION_HIT,
521 /* jmpna LABEL ACTION
522 * Jump if action not run
524 INSTR_JMP_ACTION_MISS,
527 * Jump if a is equal to b
528 * a = HMEFT, b = HMEFTI
530 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
531 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
532 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
533 INSTR_JMP_EQ_HH, /* a = H, b = H */
534 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
537 * Jump if a is not equal to b
538 * a = HMEFT, b = HMEFTI
540 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
541 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
542 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
543 INSTR_JMP_NEQ_HH, /* a = H, b = H */
544 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
547 * Jump if a is less than b
548 * a = HMEFT, b = HMEFTI
550 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
551 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
552 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
553 INSTR_JMP_LT_HH, /* a = H, b = H */
554 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
555 INSTR_JMP_LT_HI, /* a = H, b = I */
558 * Jump if a is greater than b
559 * a = HMEFT, b = HMEFTI
561 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
562 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
563 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
564 INSTR_JMP_GT_HH, /* a = H, b = H */
565 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
566 INSTR_JMP_GT_HI, /* a = H, b = I */
573 /* Start of custom instructions. */
577 struct instr_operand {
598 uint8_t header_id[8];
599 uint8_t struct_id[8];
604 struct instr_hdr_validity {
617 struct instr_extern_obj {
622 struct instr_extern_func {
626 struct instr_dst_src {
627 struct instr_operand dst;
629 struct instr_operand src;
634 struct instr_regarray {
639 struct instr_operand idx;
644 struct instr_operand dstsrc;
654 struct instr_operand idx;
658 struct instr_operand length;
661 struct instr_operand color_in;
662 uint32_t color_in_val;
665 struct instr_operand color_out;
670 uint8_t header_id[8];
671 uint8_t struct_id[8];
682 struct instruction *ip;
685 struct instr_operand a;
691 struct instr_operand b;
697 enum instruction_type type;
700 struct instr_dst_src mirror;
701 struct instr_hdr_validity valid;
702 struct instr_dst_src mov;
703 struct instr_regarray regarray;
704 struct instr_meter meter;
705 struct instr_dma dma;
706 struct instr_dst_src alu;
707 struct instr_table table;
708 struct instr_learn learn;
709 struct instr_extern_obj ext_obj;
710 struct instr_extern_func ext_func;
711 struct instr_jmp jmp;
715 struct instruction_data {
716 char label[RTE_SWX_NAME_SIZE];
717 char jmp_label[RTE_SWX_NAME_SIZE];
718 uint32_t n_users; /* user = jmp instruction to this instruction. */
722 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
728 (*action_func_t)(struct rte_swx_pipeline *p);
731 TAILQ_ENTRY(action) node;
732 char name[RTE_SWX_NAME_SIZE];
733 struct struct_type *st;
734 int *args_endianness; /* 0 = Host Byte Order (HBO); 1 = Network Byte Order (NBO). */
735 struct instruction *instructions;
736 struct instruction_data *instruction_data;
737 uint32_t n_instructions;
741 TAILQ_HEAD(action_tailq, action);
747 TAILQ_ENTRY(table_type) node;
748 char name[RTE_SWX_NAME_SIZE];
749 enum rte_swx_table_match_type match_type;
750 struct rte_swx_table_ops ops;
753 TAILQ_HEAD(table_type_tailq, table_type);
756 enum rte_swx_table_match_type match_type;
761 TAILQ_ENTRY(table) node;
762 char name[RTE_SWX_NAME_SIZE];
763 char args[RTE_SWX_NAME_SIZE];
764 struct table_type *type; /* NULL when n_fields == 0. */
767 struct match_field *fields;
769 struct header *header; /* Only valid when n_fields > 0. */
772 struct action **actions;
773 struct action *default_action;
774 uint8_t *default_action_data;
776 int default_action_is_const;
777 uint32_t action_data_size_max;
778 int *action_is_for_table_entries;
779 int *action_is_for_default_entry;
785 TAILQ_HEAD(table_tailq, table);
787 struct table_runtime {
788 rte_swx_table_lookup_t func;
793 struct table_statistics {
794 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
795 uint64_t *n_pkts_action;
802 TAILQ_ENTRY(selector) node;
803 char name[RTE_SWX_NAME_SIZE];
805 struct field *group_id_field;
806 struct field **selector_fields;
807 uint32_t n_selector_fields;
808 struct header *selector_header;
809 struct field *member_id_field;
811 uint32_t n_groups_max;
812 uint32_t n_members_per_group_max;
817 TAILQ_HEAD(selector_tailq, selector);
819 struct selector_runtime {
821 uint8_t **group_id_buffer;
822 uint8_t **selector_buffer;
823 uint8_t **member_id_buffer;
826 struct selector_statistics {
834 TAILQ_ENTRY(learner) node;
835 char name[RTE_SWX_NAME_SIZE];
838 struct field **fields;
840 struct header *header;
843 struct action **actions;
844 struct action *default_action;
845 uint8_t *default_action_data;
847 int default_action_is_const;
848 uint32_t action_data_size_max;
849 int *action_is_for_table_entries;
850 int *action_is_for_default_entry;
857 TAILQ_HEAD(learner_tailq, learner);
859 struct learner_runtime {
864 struct learner_statistics {
865 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
866 uint64_t n_pkts_learn[2]; /* 0 = Learn OK, 1 = Learn error. */
867 uint64_t n_pkts_forget;
868 uint64_t *n_pkts_action;
875 TAILQ_ENTRY(regarray) node;
876 char name[RTE_SWX_NAME_SIZE];
882 TAILQ_HEAD(regarray_tailq, regarray);
884 struct regarray_runtime {
892 struct meter_profile {
893 TAILQ_ENTRY(meter_profile) node;
894 char name[RTE_SWX_NAME_SIZE];
895 struct rte_meter_trtcm_params params;
896 struct rte_meter_trtcm_profile profile;
900 TAILQ_HEAD(meter_profile_tailq, meter_profile);
903 TAILQ_ENTRY(metarray) node;
904 char name[RTE_SWX_NAME_SIZE];
909 TAILQ_HEAD(metarray_tailq, metarray);
912 struct rte_meter_trtcm m;
913 struct meter_profile *profile;
914 enum rte_color color_mask;
917 uint64_t n_pkts[RTE_COLORS];
918 uint64_t n_bytes[RTE_COLORS];
921 struct metarray_runtime {
922 struct meter *metarray;
931 struct rte_swx_pkt pkt;
933 uint32_t *mirroring_slots;
934 uint64_t mirroring_slots_mask;
936 uint32_t recirc_pass_id;
941 /* Packet headers. */
942 struct header_runtime *headers; /* Extracted or generated headers. */
943 struct header_out_runtime *headers_out; /* Emitted headers. */
944 uint8_t *header_storage;
945 uint8_t *header_out_storage;
946 uint64_t valid_headers;
947 uint32_t n_headers_out;
949 /* Packet meta-data. */
953 struct table_runtime *tables;
954 struct selector_runtime *selectors;
955 struct learner_runtime *learners;
956 struct rte_swx_table_state *table_state;
958 int hit; /* 0 = Miss, 1 = Hit. */
962 /* Extern objects and functions. */
963 struct extern_obj_runtime *extern_objs;
964 struct extern_func_runtime *extern_funcs;
967 struct instruction *ip;
968 struct instruction *ret;
971 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
972 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
973 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
975 #define HEADER_VALID(thread, header_id) \
976 MASK64_BIT_GET((thread)->valid_headers, header_id)
978 static inline uint64_t
979 instr_operand_hbo(struct thread *t, const struct instr_operand *x)
981 uint8_t *x_struct = t->structs[x->struct_id];
982 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
983 uint64_t x64 = *x64_ptr;
984 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
986 return x64 & x64_mask;
989 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
991 static inline uint64_t
992 instr_operand_nbo(struct thread *t, const struct instr_operand *x)
994 uint8_t *x_struct = t->structs[x->struct_id];
995 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
996 uint64_t x64 = *x64_ptr;
998 return ntoh64(x64) >> (64 - x->n_bits);
1003 #define instr_operand_nbo instr_operand_hbo
1007 #define ALU(thread, ip, operator) \
1009 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1010 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1011 uint64_t dst64 = *dst64_ptr; \
1012 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1013 uint64_t dst = dst64 & dst64_mask; \
1015 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1016 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1017 uint64_t src64 = *src64_ptr; \
1018 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1019 uint64_t src = src64 & src64_mask; \
1021 uint64_t result = dst operator src; \
1023 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1026 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1028 #define ALU_MH(thread, ip, operator) \
1030 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1031 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1032 uint64_t dst64 = *dst64_ptr; \
1033 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1034 uint64_t dst = dst64 & dst64_mask; \
1036 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1037 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1038 uint64_t src64 = *src64_ptr; \
1039 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1041 uint64_t result = dst operator src; \
1043 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1046 #define ALU_HM(thread, ip, operator) \
1048 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1049 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1050 uint64_t dst64 = *dst64_ptr; \
1051 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1052 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1054 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1055 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1056 uint64_t src64 = *src64_ptr; \
1057 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1058 uint64_t src = src64 & src64_mask; \
1060 uint64_t result = dst operator src; \
1061 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1063 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1066 #define ALU_HM_FAST(thread, ip, operator) \
1068 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1069 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1070 uint64_t dst64 = *dst64_ptr; \
1071 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1072 uint64_t dst = dst64 & dst64_mask; \
1074 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1075 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1076 uint64_t src64 = *src64_ptr; \
1077 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1078 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1080 uint64_t result = dst operator src; \
1082 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1085 #define ALU_HH(thread, ip, operator) \
1087 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1088 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1089 uint64_t dst64 = *dst64_ptr; \
1090 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1091 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1093 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1094 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1095 uint64_t src64 = *src64_ptr; \
1096 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1098 uint64_t result = dst operator src; \
1099 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1101 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1104 #define ALU_HH_FAST(thread, ip, operator) \
1106 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1107 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1108 uint64_t dst64 = *dst64_ptr; \
1109 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1110 uint64_t dst = dst64 & dst64_mask; \
1112 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1113 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1114 uint64_t src64 = *src64_ptr; \
1115 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1117 uint64_t result = dst operator src; \
1119 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1126 #define ALU_HM_FAST ALU
1128 #define ALU_HH_FAST ALU
1132 #define ALU_I(thread, ip, operator) \
1134 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1135 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1136 uint64_t dst64 = *dst64_ptr; \
1137 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1138 uint64_t dst = dst64 & dst64_mask; \
1140 uint64_t src = (ip)->alu.src_val; \
1142 uint64_t result = dst operator src; \
1144 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1147 #define ALU_MI ALU_I
1149 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1151 #define ALU_HI(thread, ip, operator) \
1153 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1154 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1155 uint64_t dst64 = *dst64_ptr; \
1156 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1157 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1159 uint64_t src = (ip)->alu.src_val; \
1161 uint64_t result = dst operator src; \
1162 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1164 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1169 #define ALU_HI ALU_I
1173 #define MOV(thread, ip) \
1175 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1176 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1177 uint64_t dst64 = *dst64_ptr; \
1178 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1180 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1181 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1182 uint64_t src64 = *src64_ptr; \
1183 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1184 uint64_t src = src64 & src64_mask; \
1186 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1189 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1191 #define MOV_MH(thread, ip) \
1193 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1194 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1195 uint64_t dst64 = *dst64_ptr; \
1196 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1198 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1199 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1200 uint64_t src64 = *src64_ptr; \
1201 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1203 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1206 #define MOV_HM(thread, ip) \
1208 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1209 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1210 uint64_t dst64 = *dst64_ptr; \
1211 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1213 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1214 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1215 uint64_t src64 = *src64_ptr; \
1216 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1217 uint64_t src = src64 & src64_mask; \
1219 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1220 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1223 #define MOV_HH(thread, ip) \
1225 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1226 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1227 uint64_t dst64 = *dst64_ptr; \
1228 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1230 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1231 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1232 uint64_t src64 = *src64_ptr; \
1234 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1235 src = src >> (64 - (ip)->mov.dst.n_bits); \
1236 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1247 #define MOV_I(thread, ip) \
1249 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1250 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1251 uint64_t dst64 = *dst64_ptr; \
1252 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1254 uint64_t src = (ip)->mov.src_val; \
1256 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1259 #define JMP_CMP(thread, ip, operator) \
1261 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1262 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1263 uint64_t a64 = *a64_ptr; \
1264 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1265 uint64_t a = a64 & a64_mask; \
1267 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1268 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1269 uint64_t b64 = *b64_ptr; \
1270 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1271 uint64_t b = b64 & b64_mask; \
1273 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1276 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1278 #define JMP_CMP_MH(thread, ip, operator) \
1280 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1281 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1282 uint64_t a64 = *a64_ptr; \
1283 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1284 uint64_t a = a64 & a64_mask; \
1286 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1287 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1288 uint64_t b64 = *b64_ptr; \
1289 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1291 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1294 #define JMP_CMP_HM(thread, ip, operator) \
1296 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1297 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1298 uint64_t a64 = *a64_ptr; \
1299 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1301 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1302 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1303 uint64_t b64 = *b64_ptr; \
1304 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1305 uint64_t b = b64 & b64_mask; \
1307 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1310 #define JMP_CMP_HH(thread, ip, operator) \
1312 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1313 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1314 uint64_t a64 = *a64_ptr; \
1315 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1317 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1318 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1319 uint64_t b64 = *b64_ptr; \
1320 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1322 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1325 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1327 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1328 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1329 uint64_t a64 = *a64_ptr; \
1330 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1332 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1333 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1334 uint64_t b64 = *b64_ptr; \
1335 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1337 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1342 #define JMP_CMP_MH JMP_CMP
1343 #define JMP_CMP_HM JMP_CMP
1344 #define JMP_CMP_HH JMP_CMP
1345 #define JMP_CMP_HH_FAST JMP_CMP
1349 #define JMP_CMP_I(thread, ip, operator) \
1351 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1352 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1353 uint64_t a64 = *a64_ptr; \
1354 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1355 uint64_t a = a64 & a64_mask; \
1357 uint64_t b = (ip)->jmp.b_val; \
1359 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1362 #define JMP_CMP_MI JMP_CMP_I
1364 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1366 #define JMP_CMP_HI(thread, ip, operator) \
1368 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1369 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1370 uint64_t a64 = *a64_ptr; \
1371 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1373 uint64_t b = (ip)->jmp.b_val; \
1375 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1380 #define JMP_CMP_HI JMP_CMP_I
1384 #define METADATA_READ(thread, offset, n_bits) \
1386 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1387 uint64_t m64 = *m64_ptr; \
1388 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1392 #define METADATA_WRITE(thread, offset, n_bits, value) \
1394 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1395 uint64_t m64 = *m64_ptr; \
1396 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1398 uint64_t m_new = value; \
1400 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1403 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1404 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1407 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX
1408 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 256
1411 struct rte_swx_pipeline {
1412 struct struct_type_tailq struct_types;
1413 struct port_in_type_tailq port_in_types;
1414 struct port_in_tailq ports_in;
1415 struct port_out_type_tailq port_out_types;
1416 struct port_out_tailq ports_out;
1417 struct extern_type_tailq extern_types;
1418 struct extern_obj_tailq extern_objs;
1419 struct extern_func_tailq extern_funcs;
1420 struct header_tailq headers;
1421 struct struct_type *metadata_st;
1422 uint32_t metadata_struct_id;
1423 struct action_tailq actions;
1424 struct table_type_tailq table_types;
1425 struct table_tailq tables;
1426 struct selector_tailq selectors;
1427 struct learner_tailq learners;
1428 struct regarray_tailq regarrays;
1429 struct meter_profile_tailq meter_profiles;
1430 struct metarray_tailq metarrays;
1432 struct port_in_runtime *in;
1433 struct port_out_runtime *out;
1434 struct mirroring_session *mirroring_sessions;
1435 struct instruction **action_instructions;
1436 action_func_t *action_funcs;
1437 struct rte_swx_table_state *table_state;
1438 struct table_statistics *table_stats;
1439 struct selector_statistics *selector_stats;
1440 struct learner_statistics *learner_stats;
1441 struct regarray_runtime *regarray_runtime;
1442 struct metarray_runtime *metarray_runtime;
1443 struct instruction *instructions;
1444 struct instruction_data *instruction_data;
1445 instr_exec_t *instruction_table;
1446 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1450 uint32_t n_ports_in;
1451 uint32_t n_ports_out;
1452 uint32_t n_mirroring_slots;
1453 uint32_t n_mirroring_sessions;
1454 uint32_t n_extern_objs;
1455 uint32_t n_extern_funcs;
1458 uint32_t n_selectors;
1459 uint32_t n_learners;
1460 uint32_t n_regarrays;
1461 uint32_t n_metarrays;
1465 uint32_t n_instructions;
1474 pipeline_port_inc(struct rte_swx_pipeline *p)
1476 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1480 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1482 t->ip = p->instructions;
1486 thread_ip_set(struct thread *t, struct instruction *ip)
1492 thread_ip_action_call(struct rte_swx_pipeline *p,
1497 t->ip = p->action_instructions[action_id];
1501 thread_ip_inc(struct rte_swx_pipeline *p);
1504 thread_ip_inc(struct rte_swx_pipeline *p)
1506 struct thread *t = &p->threads[p->thread_id];
1512 thread_ip_inc_cond(struct thread *t, int cond)
1518 thread_yield(struct rte_swx_pipeline *p)
1520 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1524 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
1526 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1533 __instr_rx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1535 struct port_in_runtime *port = &p->in[p->port_id];
1536 struct rte_swx_pkt *pkt = &t->pkt;
1539 /* Recirculation: keep the current packet. */
1540 if (t->recirculate) {
1541 TRACE("[Thread %2u] rx - recirculate (pass %u)\n",
1543 t->recirc_pass_id + 1);
1546 t->ptr = &pkt->pkt[pkt->offset];
1547 t->mirroring_slots_mask = 0;
1549 t->recirc_pass_id++;
1552 t->valid_headers = 0;
1553 t->n_headers_out = 0;
1556 t->table_state = p->table_state;
1562 pkt_received = port->pkt_rx(port->obj, pkt);
1563 t->ptr = &pkt->pkt[pkt->offset];
1564 rte_prefetch0(t->ptr);
1566 TRACE("[Thread %2u] rx %s from port %u\n",
1568 pkt_received ? "1 pkt" : "0 pkts",
1571 t->mirroring_slots_mask = 0;
1572 t->recirc_pass_id = 0;
1575 t->valid_headers = 0;
1576 t->n_headers_out = 0;
1579 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1582 t->table_state = p->table_state;
1585 pipeline_port_inc(p);
1587 return pkt_received;
1591 instr_rx_exec(struct rte_swx_pipeline *p)
1593 struct thread *t = &p->threads[p->thread_id];
1594 struct instruction *ip = t->ip;
1598 pkt_received = __instr_rx_exec(p, t, ip);
1601 thread_ip_inc_cond(t, pkt_received);
1609 emit_handler(struct thread *t)
1611 struct header_out_runtime *h0 = &t->headers_out[0];
1612 struct header_out_runtime *h1 = &t->headers_out[1];
1613 uint32_t offset = 0, i;
1615 /* No header change or header decapsulation. */
1616 if ((t->n_headers_out == 1) &&
1617 (h0->ptr + h0->n_bytes == t->ptr)) {
1618 TRACE("Emit handler: no header change or header decap.\n");
1620 t->pkt.offset -= h0->n_bytes;
1621 t->pkt.length += h0->n_bytes;
1626 /* Header encapsulation (optionally, with prior header decapsulation). */
1627 if ((t->n_headers_out == 2) &&
1628 (h1->ptr + h1->n_bytes == t->ptr) &&
1629 (h0->ptr == h0->ptr0)) {
1632 TRACE("Emit handler: header encapsulation.\n");
1634 offset = h0->n_bytes + h1->n_bytes;
1635 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1636 t->pkt.offset -= offset;
1637 t->pkt.length += offset;
1642 /* For any other case. */
1643 TRACE("Emit handler: complex case.\n");
1645 for (i = 0; i < t->n_headers_out; i++) {
1646 struct header_out_runtime *h = &t->headers_out[i];
1648 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1649 offset += h->n_bytes;
1653 memcpy(t->ptr - offset, t->header_out_storage, offset);
1654 t->pkt.offset -= offset;
1655 t->pkt.length += offset;
1660 mirroring_handler(struct rte_swx_pipeline *p, struct thread *t, struct rte_swx_pkt *pkt)
1662 uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1665 for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1666 if (slot_mask & slots_mask) {
1667 struct port_out_runtime *port;
1668 struct mirroring_session *session;
1669 uint32_t port_id, session_id;
1671 session_id = t->mirroring_slots[slot_id];
1672 session = &p->mirroring_sessions[session_id];
1674 port_id = session->port_id;
1675 port = &p->out[port_id];
1677 if (session->fast_clone)
1678 port->pkt_fast_clone_tx(port->obj, pkt);
1680 port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1682 slots_mask &= ~slot_mask;
1687 __instr_tx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1689 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1690 struct port_out_runtime *port = &p->out[port_id];
1691 struct rte_swx_pkt *pkt = &t->pkt;
1693 /* Recirculation: keep the current packet. */
1694 if (t->recirculate) {
1695 TRACE("[Thread %2u]: tx 1 pkt - recirculate\n",
1702 mirroring_handler(p, t, pkt);
1707 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
1715 mirroring_handler(p, t, pkt);
1716 port->pkt_tx(port->obj, pkt);
1720 __instr_tx_i_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1722 uint64_t port_id = ip->io.io.val;
1723 struct port_out_runtime *port = &p->out[port_id];
1724 struct rte_swx_pkt *pkt = &t->pkt;
1726 /* Recirculation: keep the current packet. */
1727 if (t->recirculate) {
1728 TRACE("[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1735 mirroring_handler(p, t, pkt);
1740 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
1748 mirroring_handler(p, t, pkt);
1749 port->pkt_tx(port->obj, pkt);
1753 __instr_drop_exec(struct rte_swx_pipeline *p,
1755 const struct instruction *ip __rte_unused)
1757 uint64_t port_id = p->n_ports_out - 1;
1758 struct port_out_runtime *port = &p->out[port_id];
1759 struct rte_swx_pkt *pkt = &t->pkt;
1761 TRACE("[Thread %2u]: drop 1 pkt\n",
1768 mirroring_handler(p, t, pkt);
1769 port->pkt_tx(port->obj, pkt);
1773 __instr_mirror_exec(struct rte_swx_pipeline *p,
1775 const struct instruction *ip)
1777 uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1778 uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1780 slot_id &= p->n_mirroring_slots - 1;
1781 session_id &= p->n_mirroring_sessions - 1;
1783 TRACE("[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1786 (uint32_t)session_id);
1788 t->mirroring_slots[slot_id] = session_id;
1789 t->mirroring_slots_mask |= 1LLU << slot_id;
1793 __instr_recirculate_exec(struct rte_swx_pipeline *p __rte_unused,
1795 const struct instruction *ip __rte_unused)
1797 TRACE("[Thread %2u]: recirculate\n",
1804 __instr_recircid_exec(struct rte_swx_pipeline *p __rte_unused,
1806 const struct instruction *ip)
1808 TRACE("[Thread %2u]: recircid (pass %u)\n",
1813 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1820 __instr_hdr_extract_many_exec(struct rte_swx_pipeline *p __rte_unused,
1822 const struct instruction *ip,
1825 uint64_t valid_headers = t->valid_headers;
1826 uint8_t *ptr = t->ptr;
1827 uint32_t offset = t->pkt.offset;
1828 uint32_t length = t->pkt.length;
1831 for (i = 0; i < n_extract; i++) {
1832 uint32_t header_id = ip->io.hdr.header_id[i];
1833 uint32_t struct_id = ip->io.hdr.struct_id[i];
1834 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1836 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
1842 t->structs[struct_id] = ptr;
1843 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1852 t->valid_headers = valid_headers;
1855 t->pkt.offset = offset;
1856 t->pkt.length = length;
1861 __instr_hdr_extract_exec(struct rte_swx_pipeline *p,
1863 const struct instruction *ip)
1865 __instr_hdr_extract_many_exec(p, t, ip, 1);
1869 __instr_hdr_extract2_exec(struct rte_swx_pipeline *p,
1871 const struct instruction *ip)
1873 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1875 __instr_hdr_extract_many_exec(p, t, ip, 2);
1879 __instr_hdr_extract3_exec(struct rte_swx_pipeline *p,
1881 const struct instruction *ip)
1883 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1885 __instr_hdr_extract_many_exec(p, t, ip, 3);
1889 __instr_hdr_extract4_exec(struct rte_swx_pipeline *p,
1891 const struct instruction *ip)
1893 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1895 __instr_hdr_extract_many_exec(p, t, ip, 4);
1899 __instr_hdr_extract5_exec(struct rte_swx_pipeline *p,
1901 const struct instruction *ip)
1903 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1905 __instr_hdr_extract_many_exec(p, t, ip, 5);
1909 __instr_hdr_extract6_exec(struct rte_swx_pipeline *p,
1911 const struct instruction *ip)
1913 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1915 __instr_hdr_extract_many_exec(p, t, ip, 6);
1919 __instr_hdr_extract7_exec(struct rte_swx_pipeline *p,
1921 const struct instruction *ip)
1923 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1925 __instr_hdr_extract_many_exec(p, t, ip, 7);
1929 __instr_hdr_extract8_exec(struct rte_swx_pipeline *p,
1931 const struct instruction *ip)
1933 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1935 __instr_hdr_extract_many_exec(p, t, ip, 8);
1939 __instr_hdr_extract_m_exec(struct rte_swx_pipeline *p __rte_unused,
1941 const struct instruction *ip)
1943 uint64_t valid_headers = t->valid_headers;
1944 uint8_t *ptr = t->ptr;
1945 uint32_t offset = t->pkt.offset;
1946 uint32_t length = t->pkt.length;
1948 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1949 uint32_t header_id = ip->io.hdr.header_id[0];
1950 uint32_t struct_id = ip->io.hdr.struct_id[0];
1951 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
1953 struct header_runtime *h = &t->headers[header_id];
1955 TRACE("[Thread %2u]: extract header %u (%u + %u bytes)\n",
1961 n_bytes += n_bytes_last;
1964 t->structs[struct_id] = ptr;
1965 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1966 h->n_bytes = n_bytes;
1969 t->pkt.offset = offset + n_bytes;
1970 t->pkt.length = length - n_bytes;
1971 t->ptr = ptr + n_bytes;
1975 __instr_hdr_lookahead_exec(struct rte_swx_pipeline *p __rte_unused,
1977 const struct instruction *ip)
1979 uint64_t valid_headers = t->valid_headers;
1980 uint8_t *ptr = t->ptr;
1982 uint32_t header_id = ip->io.hdr.header_id[0];
1983 uint32_t struct_id = ip->io.hdr.struct_id[0];
1985 TRACE("[Thread %2u]: lookahead header %u\n",
1990 t->structs[struct_id] = ptr;
1991 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1998 __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
2000 const struct instruction *ip,
2003 uint64_t valid_headers = t->valid_headers;
2004 uint32_t n_headers_out = t->n_headers_out;
2005 struct header_out_runtime *ho = NULL;
2006 uint8_t *ho_ptr = NULL;
2007 uint32_t ho_nbytes = 0, i;
2009 for (i = 0; i < n_emit; i++) {
2010 uint32_t header_id = ip->io.hdr.header_id[i];
2011 uint32_t struct_id = ip->io.hdr.struct_id[i];
2013 struct header_runtime *hi = &t->headers[header_id];
2014 uint8_t *hi_ptr0 = hi->ptr0;
2015 uint32_t n_bytes = hi->n_bytes;
2017 uint8_t *hi_ptr = t->structs[struct_id];
2019 if (!MASK64_BIT_GET(valid_headers, header_id)) {
2020 TRACE("[Thread %2u]: emit header %u (invalid)\n",
2027 TRACE("[Thread %2u]: emit header %u (valid)\n",
2033 if (!n_headers_out) {
2034 ho = &t->headers_out[0];
2040 ho_nbytes = n_bytes;
2046 ho = &t->headers_out[n_headers_out - 1];
2049 ho_nbytes = ho->n_bytes;
2053 if (ho_ptr + ho_nbytes == hi_ptr) {
2054 ho_nbytes += n_bytes;
2056 ho->n_bytes = ho_nbytes;
2063 ho_nbytes = n_bytes;
2070 ho->n_bytes = ho_nbytes;
2071 t->n_headers_out = n_headers_out;
2075 __instr_hdr_emit_exec(struct rte_swx_pipeline *p,
2077 const struct instruction *ip)
2079 __instr_hdr_emit_many_exec(p, t, ip, 1);
2083 __instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p,
2085 const struct instruction *ip)
2087 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2089 __instr_hdr_emit_many_exec(p, t, ip, 1);
2090 __instr_tx_exec(p, t, ip);
2094 __instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p,
2096 const struct instruction *ip)
2098 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2100 __instr_hdr_emit_many_exec(p, t, ip, 2);
2101 __instr_tx_exec(p, t, ip);
2105 __instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p,
2107 const struct instruction *ip)
2109 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2111 __instr_hdr_emit_many_exec(p, t, ip, 3);
2112 __instr_tx_exec(p, t, ip);
2116 __instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p,
2118 const struct instruction *ip)
2120 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2122 __instr_hdr_emit_many_exec(p, t, ip, 4);
2123 __instr_tx_exec(p, t, ip);
2127 __instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p,
2129 const struct instruction *ip)
2131 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2133 __instr_hdr_emit_many_exec(p, t, ip, 5);
2134 __instr_tx_exec(p, t, ip);
2138 __instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p,
2140 const struct instruction *ip)
2142 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2144 __instr_hdr_emit_many_exec(p, t, ip, 6);
2145 __instr_tx_exec(p, t, ip);
2149 __instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p,
2151 const struct instruction *ip)
2153 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2155 __instr_hdr_emit_many_exec(p, t, ip, 7);
2156 __instr_tx_exec(p, t, ip);
2160 __instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p,
2162 const struct instruction *ip)
2164 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2166 __instr_hdr_emit_many_exec(p, t, ip, 8);
2167 __instr_tx_exec(p, t, ip);
2174 __instr_hdr_validate_exec(struct rte_swx_pipeline *p __rte_unused,
2176 const struct instruction *ip)
2178 uint32_t header_id = ip->valid.header_id;
2180 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2183 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2190 __instr_hdr_invalidate_exec(struct rte_swx_pipeline *p __rte_unused,
2192 const struct instruction *ip)
2194 uint32_t header_id = ip->valid.header_id;
2196 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2199 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2206 __instr_learn_exec(struct rte_swx_pipeline *p,
2208 const struct instruction *ip)
2210 uint64_t action_id = ip->learn.action_id;
2211 uint32_t mf_offset = ip->learn.mf_offset;
2212 uint32_t learner_id = t->learner_id;
2213 struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2214 p->n_selectors + learner_id];
2215 struct learner_runtime *l = &t->learners[learner_id];
2216 struct learner_statistics *stats = &p->learner_stats[learner_id];
2220 status = rte_swx_table_learner_add(ts->obj,
2224 &t->metadata[mf_offset],
2227 TRACE("[Thread %2u] learner %u learn %s\n",
2230 status ? "ok" : "error");
2232 stats->n_pkts_learn[status] += 1;
2239 __instr_forget_exec(struct rte_swx_pipeline *p,
2241 const struct instruction *ip __rte_unused)
2243 uint32_t learner_id = t->learner_id;
2244 struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2245 p->n_selectors + learner_id];
2246 struct learner_runtime *l = &t->learners[learner_id];
2247 struct learner_statistics *stats = &p->learner_stats[learner_id];
2250 rte_swx_table_learner_delete(ts->obj, l->mailbox);
2252 TRACE("[Thread %2u] learner %u forget\n",
2256 stats->n_pkts_forget += 1;
2262 static inline uint32_t
2263 __instr_extern_obj_exec(struct rte_swx_pipeline *p __rte_unused,
2265 const struct instruction *ip)
2267 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2268 uint32_t func_id = ip->ext_obj.func_id;
2269 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2270 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
2273 TRACE("[Thread %2u] extern obj %u member func %u\n",
2278 done = func(obj->obj, obj->mailbox);
2283 static inline uint32_t
2284 __instr_extern_func_exec(struct rte_swx_pipeline *p __rte_unused,
2286 const struct instruction *ip)
2288 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2289 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2290 rte_swx_extern_func_t func = ext_func->func;
2293 TRACE("[Thread %2u] extern func %u\n",
2297 done = func(ext_func->mailbox);
2306 __instr_mov_exec(struct rte_swx_pipeline *p __rte_unused,
2308 const struct instruction *ip)
2310 TRACE("[Thread %2u] mov\n", p->thread_id);
2316 __instr_mov_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2318 const struct instruction *ip)
2320 TRACE("[Thread %2u] mov (mh)\n", p->thread_id);
2326 __instr_mov_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2328 const struct instruction *ip)
2330 TRACE("[Thread %2u] mov (hm)\n", p->thread_id);
2336 __instr_mov_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2338 const struct instruction *ip)
2340 TRACE("[Thread %2u] mov (hh)\n", p->thread_id);
2346 __instr_mov_i_exec(struct rte_swx_pipeline *p __rte_unused,
2348 const struct instruction *ip)
2350 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n", p->thread_id, ip->mov.src_val);
2359 __instr_dma_ht_many_exec(struct rte_swx_pipeline *p __rte_unused,
2361 const struct instruction *ip,
2364 uint8_t *action_data = t->structs[0];
2365 uint64_t valid_headers = t->valid_headers;
2368 for (i = 0; i < n_dma; i++) {
2369 uint32_t header_id = ip->dma.dst.header_id[i];
2370 uint32_t struct_id = ip->dma.dst.struct_id[i];
2371 uint32_t offset = ip->dma.src.offset[i];
2372 uint32_t n_bytes = ip->dma.n_bytes[i];
2374 struct header_runtime *h = &t->headers[header_id];
2375 uint8_t *h_ptr0 = h->ptr0;
2376 uint8_t *h_ptr = t->structs[struct_id];
2378 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2380 void *src = &action_data[offset];
2382 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2385 memcpy(dst, src, n_bytes);
2386 t->structs[struct_id] = dst;
2387 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2390 t->valid_headers = valid_headers;
2394 __instr_dma_ht_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2396 __instr_dma_ht_many_exec(p, t, ip, 1);
2400 __instr_dma_ht2_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2402 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2404 __instr_dma_ht_many_exec(p, t, ip, 2);
2408 __instr_dma_ht3_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2410 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2412 __instr_dma_ht_many_exec(p, t, ip, 3);
2416 __instr_dma_ht4_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2418 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2420 __instr_dma_ht_many_exec(p, t, ip, 4);
2424 __instr_dma_ht5_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2426 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2428 __instr_dma_ht_many_exec(p, t, ip, 5);
2432 __instr_dma_ht6_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2434 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2436 __instr_dma_ht_many_exec(p, t, ip, 6);
2440 __instr_dma_ht7_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2442 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2444 __instr_dma_ht_many_exec(p, t, ip, 7);
2448 __instr_dma_ht8_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2450 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2452 __instr_dma_ht_many_exec(p, t, ip, 8);
2459 __instr_alu_add_exec(struct rte_swx_pipeline *p __rte_unused,
2461 const struct instruction *ip)
2463 TRACE("[Thread %2u] add\n", p->thread_id);
2469 __instr_alu_add_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2471 const struct instruction *ip)
2473 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
2479 __instr_alu_add_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2481 const struct instruction *ip)
2483 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
2489 __instr_alu_add_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2491 const struct instruction *ip)
2493 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
2499 __instr_alu_add_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2501 const struct instruction *ip)
2503 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
2509 __instr_alu_add_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2511 const struct instruction *ip)
2513 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
2519 __instr_alu_sub_exec(struct rte_swx_pipeline *p __rte_unused,
2521 const struct instruction *ip)
2523 TRACE("[Thread %2u] sub\n", p->thread_id);
2529 __instr_alu_sub_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2531 const struct instruction *ip)
2533 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
2539 __instr_alu_sub_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2541 const struct instruction *ip)
2543 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
2549 __instr_alu_sub_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2551 const struct instruction *ip)
2553 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
2559 __instr_alu_sub_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2561 const struct instruction *ip)
2563 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
2569 __instr_alu_sub_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2571 const struct instruction *ip)
2573 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
2579 __instr_alu_shl_exec(struct rte_swx_pipeline *p __rte_unused,
2581 const struct instruction *ip)
2583 TRACE("[Thread %2u] shl\n", p->thread_id);
2589 __instr_alu_shl_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2591 const struct instruction *ip)
2593 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
2599 __instr_alu_shl_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2601 const struct instruction *ip)
2603 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
2609 __instr_alu_shl_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2611 const struct instruction *ip)
2613 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
2619 __instr_alu_shl_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2621 const struct instruction *ip)
2623 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
2629 __instr_alu_shl_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2631 const struct instruction *ip)
2633 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
2639 __instr_alu_shr_exec(struct rte_swx_pipeline *p __rte_unused,
2641 const struct instruction *ip)
2643 TRACE("[Thread %2u] shr\n", p->thread_id);
2649 __instr_alu_shr_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2651 const struct instruction *ip)
2653 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
2659 __instr_alu_shr_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2661 const struct instruction *ip)
2663 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
2669 __instr_alu_shr_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2671 const struct instruction *ip)
2673 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
2679 __instr_alu_shr_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2681 const struct instruction *ip)
2683 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
2690 __instr_alu_shr_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2692 const struct instruction *ip)
2694 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
2700 __instr_alu_and_exec(struct rte_swx_pipeline *p __rte_unused,
2702 const struct instruction *ip)
2704 TRACE("[Thread %2u] and\n", p->thread_id);
2710 __instr_alu_and_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2712 const struct instruction *ip)
2714 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
2720 __instr_alu_and_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2722 const struct instruction *ip)
2724 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
2726 ALU_HM_FAST(t, ip, &);
2730 __instr_alu_and_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2732 const struct instruction *ip)
2734 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
2736 ALU_HH_FAST(t, ip, &);
2740 __instr_alu_and_i_exec(struct rte_swx_pipeline *p __rte_unused,
2742 const struct instruction *ip)
2744 TRACE("[Thread %2u] and (i)\n", p->thread_id);
2750 __instr_alu_or_exec(struct rte_swx_pipeline *p __rte_unused,
2752 const struct instruction *ip)
2754 TRACE("[Thread %2u] or\n", p->thread_id);
2760 __instr_alu_or_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2762 const struct instruction *ip)
2764 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
2770 __instr_alu_or_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2772 const struct instruction *ip)
2774 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
2776 ALU_HM_FAST(t, ip, |);
2780 __instr_alu_or_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2782 const struct instruction *ip)
2784 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
2786 ALU_HH_FAST(t, ip, |);
2790 __instr_alu_or_i_exec(struct rte_swx_pipeline *p __rte_unused,
2792 const struct instruction *ip)
2794 TRACE("[Thread %2u] or (i)\n", p->thread_id);
2800 __instr_alu_xor_exec(struct rte_swx_pipeline *p __rte_unused,
2802 const struct instruction *ip)
2804 TRACE("[Thread %2u] xor\n", p->thread_id);
2810 __instr_alu_xor_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2812 const struct instruction *ip)
2814 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
2820 __instr_alu_xor_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2822 const struct instruction *ip)
2824 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
2826 ALU_HM_FAST(t, ip, ^);
2830 __instr_alu_xor_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2832 const struct instruction *ip)
2834 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
2836 ALU_HH_FAST(t, ip, ^);
2840 __instr_alu_xor_i_exec(struct rte_swx_pipeline *p __rte_unused,
2842 const struct instruction *ip)
2844 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
2850 __instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p __rte_unused,
2852 const struct instruction *ip)
2854 uint8_t *dst_struct, *src_struct;
2855 uint16_t *dst16_ptr, dst;
2856 uint64_t *src64_ptr, src64, src64_mask, src;
2859 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
2862 dst_struct = t->structs[ip->alu.dst.struct_id];
2863 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2866 src_struct = t->structs[ip->alu.src.struct_id];
2867 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2869 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2870 src = src64 & src64_mask;
2872 /* Initialize the result with destination 1's complement. */
2876 /* The first input (r) is a 16-bit number. The second and the third
2877 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
2878 * three numbers (output r) is a 34-bit number.
2880 r += (src >> 32) + (src & 0xFFFFFFFF);
2882 /* The first input is a 16-bit number. The second input is an 18-bit
2883 * number. In the worst case scenario, the sum of the two numbers is a
2886 r = (r & 0xFFFF) + (r >> 16);
2888 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2889 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
2891 r = (r & 0xFFFF) + (r >> 16);
2893 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2894 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2895 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
2896 * therefore the output r is always a 16-bit number.
2898 r = (r & 0xFFFF) + (r >> 16);
2900 /* Apply 1's complement to the result. */
2904 *dst16_ptr = (uint16_t)r;
2908 __instr_alu_cksub_field_exec(struct rte_swx_pipeline *p __rte_unused,
2910 const struct instruction *ip)
2912 uint8_t *dst_struct, *src_struct;
2913 uint16_t *dst16_ptr, dst;
2914 uint64_t *src64_ptr, src64, src64_mask, src;
2917 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
2920 dst_struct = t->structs[ip->alu.dst.struct_id];
2921 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2924 src_struct = t->structs[ip->alu.src.struct_id];
2925 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2927 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2928 src = src64 & src64_mask;
2930 /* Initialize the result with destination 1's complement. */
2934 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
2935 * the following sequence of operations in 2's complement arithmetic:
2936 * a '- b = (a - b) % 0xFFFF.
2938 * In order to prevent an underflow for the below subtraction, in which
2939 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
2940 * minuend), we first add a multiple of the 0xFFFF modulus to the
2941 * minuend. The number we add to the minuend needs to be a 34-bit number
2942 * or higher, so for readability reasons we picked the 36-bit multiple.
2943 * We are effectively turning the 16-bit minuend into a 36-bit number:
2944 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
2946 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
2948 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
2949 * result (the output r) is a 36-bit number.
2951 r -= (src >> 32) + (src & 0xFFFFFFFF);
2953 /* The first input is a 16-bit number. The second input is a 20-bit
2954 * number. Their sum is a 21-bit number.
2956 r = (r & 0xFFFF) + (r >> 16);
2958 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2959 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
2961 r = (r & 0xFFFF) + (r >> 16);
2963 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2964 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2965 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
2966 * generated, therefore the output r is always a 16-bit number.
2968 r = (r & 0xFFFF) + (r >> 16);
2970 /* Apply 1's complement to the result. */
2974 *dst16_ptr = (uint16_t)r;
2978 __instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p __rte_unused,
2980 const struct instruction *ip)
2982 uint8_t *dst_struct, *src_struct;
2983 uint16_t *dst16_ptr, dst;
2984 uint32_t *src32_ptr;
2987 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
2990 dst_struct = t->structs[ip->alu.dst.struct_id];
2991 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2994 src_struct = t->structs[ip->alu.src.struct_id];
2995 src32_ptr = (uint32_t *)&src_struct[0];
2997 /* Initialize the result with destination 1's complement. */
3001 r0 += src32_ptr[0]; /* The output r0 is a 33-bit number. */
3002 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
3003 r0 += src32_ptr[2]; /* The output r0 is a 34-bit number. */
3004 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
3005 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
3007 /* The first input is a 16-bit number. The second input is a 19-bit
3008 * number. Their sum is a 20-bit number.
3010 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3012 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3013 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
3015 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3017 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3018 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3019 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
3020 * generated, therefore the output r is always a 16-bit number.
3022 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3024 /* Apply 1's complement to the result. */
3026 r0 = r0 ? r0 : 0xFFFF;
3028 *dst16_ptr = (uint16_t)r0;
3032 __instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p __rte_unused,
3034 const struct instruction *ip)
3036 uint32_t src_header_id = ip->alu.src.n_bits; /* The src header ID is stored here. */
3037 uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3038 uint8_t *dst_struct, *src_struct;
3039 uint16_t *dst16_ptr, dst;
3040 uint32_t *src32_ptr;
3044 if (n_src_header_bytes == 20) {
3045 __instr_alu_ckadd_struct20_exec(p, t, ip);
3049 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
3052 dst_struct = t->structs[ip->alu.dst.struct_id];
3053 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3056 src_struct = t->structs[ip->alu.src.struct_id];
3057 src32_ptr = (uint32_t *)&src_struct[0];
3059 /* Initialize the result with destination 1's complement. */
3063 /* The max number of 32-bit words in a 32K-byte header is 2^13.
3064 * Therefore, in the worst case scenario, a 45-bit number is added to a
3065 * 16-bit number (the input r), so the output r is 46-bit number.
3067 for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3070 /* The first input is a 16-bit number. The second input is a 30-bit
3071 * number. Their sum is a 31-bit number.
3073 r = (r & 0xFFFF) + (r >> 16);
3075 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3076 * a 15-bit number (0 .. 0x7FFF). The sum is a 17-bit number (0 .. 0x17FFE).
3078 r = (r & 0xFFFF) + (r >> 16);
3080 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3081 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3082 * 0x17FFE), the output r is (0 .. 0x7FFF). So no carry bit can be
3083 * generated, therefore the output r is always a 16-bit number.
3085 r = (r & 0xFFFF) + (r >> 16);
3087 /* Apply 1's complement to the result. */
3091 *dst16_ptr = (uint16_t)r;
3097 static inline uint64_t *
3098 instr_regarray_regarray(struct rte_swx_pipeline *p, const struct instruction *ip)
3100 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3104 static inline uint64_t
3105 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3107 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3109 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3110 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3111 uint64_t idx64 = *idx64_ptr;
3112 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3113 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3118 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3120 static inline uint64_t
3121 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3123 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3125 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3126 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3127 uint64_t idx64 = *idx64_ptr;
3128 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3135 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
3139 static inline uint64_t
3140 instr_regarray_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
3142 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3144 uint64_t idx = ip->regarray.idx_val & r->size_mask;
3149 static inline uint64_t
3150 instr_regarray_src_hbo(struct thread *t, const struct instruction *ip)
3152 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3153 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3154 uint64_t src64 = *src64_ptr;
3155 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3156 uint64_t src = src64 & src64_mask;
3161 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3163 static inline uint64_t
3164 instr_regarray_src_nbo(struct thread *t, const struct instruction *ip)
3166 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3167 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3168 uint64_t src64 = *src64_ptr;
3169 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3176 #define instr_regarray_src_nbo instr_regarray_src_hbo
3181 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
3183 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3184 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3185 uint64_t dst64 = *dst64_ptr;
3186 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3188 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3192 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3195 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
3197 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3198 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3199 uint64_t dst64 = *dst64_ptr;
3200 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3202 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3203 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3208 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
3213 __instr_regprefetch_rh_exec(struct rte_swx_pipeline *p,
3215 const struct instruction *ip)
3217 uint64_t *regarray, idx;
3219 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3221 regarray = instr_regarray_regarray(p, ip);
3222 idx = instr_regarray_idx_nbo(p, t, ip);
3223 rte_prefetch0(®array[idx]);
3227 __instr_regprefetch_rm_exec(struct rte_swx_pipeline *p,
3229 const struct instruction *ip)
3231 uint64_t *regarray, idx;
3233 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3235 regarray = instr_regarray_regarray(p, ip);
3236 idx = instr_regarray_idx_hbo(p, t, ip);
3237 rte_prefetch0(®array[idx]);
3241 __instr_regprefetch_ri_exec(struct rte_swx_pipeline *p,
3242 struct thread *t __rte_unused,
3243 const struct instruction *ip)
3245 uint64_t *regarray, idx;
3247 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3249 regarray = instr_regarray_regarray(p, ip);
3250 idx = instr_regarray_idx_imm(p, ip);
3251 rte_prefetch0(®array[idx]);
3255 __instr_regrd_hrh_exec(struct rte_swx_pipeline *p,
3257 const struct instruction *ip)
3259 uint64_t *regarray, idx;
3261 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3263 regarray = instr_regarray_regarray(p, ip);
3264 idx = instr_regarray_idx_nbo(p, t, ip);
3265 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3269 __instr_regrd_hrm_exec(struct rte_swx_pipeline *p,
3271 const struct instruction *ip)
3273 uint64_t *regarray, idx;
3275 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3278 regarray = instr_regarray_regarray(p, ip);
3279 idx = instr_regarray_idx_hbo(p, t, ip);
3280 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3284 __instr_regrd_mrh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3286 uint64_t *regarray, idx;
3288 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3290 regarray = instr_regarray_regarray(p, ip);
3291 idx = instr_regarray_idx_nbo(p, t, ip);
3292 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3296 __instr_regrd_mrm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3298 uint64_t *regarray, idx;
3300 TRACE("[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3302 regarray = instr_regarray_regarray(p, ip);
3303 idx = instr_regarray_idx_hbo(p, t, ip);
3304 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3308 __instr_regrd_hri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3310 uint64_t *regarray, idx;
3312 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3314 regarray = instr_regarray_regarray(p, ip);
3315 idx = instr_regarray_idx_imm(p, ip);
3316 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3320 __instr_regrd_mri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3322 uint64_t *regarray, idx;
3324 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3326 regarray = instr_regarray_regarray(p, ip);
3327 idx = instr_regarray_idx_imm(p, ip);
3328 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3332 __instr_regwr_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3334 uint64_t *regarray, idx, src;
3336 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3338 regarray = instr_regarray_regarray(p, ip);
3339 idx = instr_regarray_idx_nbo(p, t, ip);
3340 src = instr_regarray_src_nbo(t, ip);
3341 regarray[idx] = src;
3345 __instr_regwr_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3347 uint64_t *regarray, idx, src;
3349 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3351 regarray = instr_regarray_regarray(p, ip);
3352 idx = instr_regarray_idx_nbo(p, t, ip);
3353 src = instr_regarray_src_hbo(t, ip);
3354 regarray[idx] = src;
3358 __instr_regwr_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3360 uint64_t *regarray, idx, src;
3362 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3364 regarray = instr_regarray_regarray(p, ip);
3365 idx = instr_regarray_idx_hbo(p, t, ip);
3366 src = instr_regarray_src_nbo(t, ip);
3367 regarray[idx] = src;
3371 __instr_regwr_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3373 uint64_t *regarray, idx, src;
3375 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3377 regarray = instr_regarray_regarray(p, ip);
3378 idx = instr_regarray_idx_hbo(p, t, ip);
3379 src = instr_regarray_src_hbo(t, ip);
3380 regarray[idx] = src;
3384 __instr_regwr_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3386 uint64_t *regarray, idx, src;
3388 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3390 regarray = instr_regarray_regarray(p, ip);
3391 idx = instr_regarray_idx_nbo(p, t, ip);
3392 src = ip->regarray.dstsrc_val;
3393 regarray[idx] = src;
3397 __instr_regwr_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3399 uint64_t *regarray, idx, src;
3401 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3403 regarray = instr_regarray_regarray(p, ip);
3404 idx = instr_regarray_idx_hbo(p, t, ip);
3405 src = ip->regarray.dstsrc_val;
3406 regarray[idx] = src;
3410 __instr_regwr_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3412 uint64_t *regarray, idx, src;
3414 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3416 regarray = instr_regarray_regarray(p, ip);
3417 idx = instr_regarray_idx_imm(p, ip);
3418 src = instr_regarray_src_nbo(t, ip);
3419 regarray[idx] = src;
3423 __instr_regwr_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3425 uint64_t *regarray, idx, src;
3427 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3429 regarray = instr_regarray_regarray(p, ip);
3430 idx = instr_regarray_idx_imm(p, ip);
3431 src = instr_regarray_src_hbo(t, ip);
3432 regarray[idx] = src;
3436 __instr_regwr_rii_exec(struct rte_swx_pipeline *p,
3437 struct thread *t __rte_unused,
3438 const struct instruction *ip)
3440 uint64_t *regarray, idx, src;
3442 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3444 regarray = instr_regarray_regarray(p, ip);
3445 idx = instr_regarray_idx_imm(p, ip);
3446 src = ip->regarray.dstsrc_val;
3447 regarray[idx] = src;
3451 __instr_regadd_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3453 uint64_t *regarray, idx, src;
3455 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3457 regarray = instr_regarray_regarray(p, ip);
3458 idx = instr_regarray_idx_nbo(p, t, ip);
3459 src = instr_regarray_src_nbo(t, ip);
3460 regarray[idx] += src;
3464 __instr_regadd_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3466 uint64_t *regarray, idx, src;
3468 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3470 regarray = instr_regarray_regarray(p, ip);
3471 idx = instr_regarray_idx_nbo(p, t, ip);
3472 src = instr_regarray_src_hbo(t, ip);
3473 regarray[idx] += src;
3477 __instr_regadd_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3479 uint64_t *regarray, idx, src;
3481 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3483 regarray = instr_regarray_regarray(p, ip);
3484 idx = instr_regarray_idx_hbo(p, t, ip);
3485 src = instr_regarray_src_nbo(t, ip);
3486 regarray[idx] += src;
3490 __instr_regadd_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3492 uint64_t *regarray, idx, src;
3494 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3496 regarray = instr_regarray_regarray(p, ip);
3497 idx = instr_regarray_idx_hbo(p, t, ip);
3498 src = instr_regarray_src_hbo(t, ip);
3499 regarray[idx] += src;
3503 __instr_regadd_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3505 uint64_t *regarray, idx, src;
3507 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3509 regarray = instr_regarray_regarray(p, ip);
3510 idx = instr_regarray_idx_nbo(p, t, ip);
3511 src = ip->regarray.dstsrc_val;
3512 regarray[idx] += src;
3516 __instr_regadd_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3518 uint64_t *regarray, idx, src;
3520 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3522 regarray = instr_regarray_regarray(p, ip);
3523 idx = instr_regarray_idx_hbo(p, t, ip);
3524 src = ip->regarray.dstsrc_val;
3525 regarray[idx] += src;
3529 __instr_regadd_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3531 uint64_t *regarray, idx, src;
3533 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3535 regarray = instr_regarray_regarray(p, ip);
3536 idx = instr_regarray_idx_imm(p, ip);
3537 src = instr_regarray_src_nbo(t, ip);
3538 regarray[idx] += src;
3542 __instr_regadd_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3544 uint64_t *regarray, idx, src;
3546 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3548 regarray = instr_regarray_regarray(p, ip);
3549 idx = instr_regarray_idx_imm(p, ip);
3550 src = instr_regarray_src_hbo(t, ip);
3551 regarray[idx] += src;
3555 __instr_regadd_rii_exec(struct rte_swx_pipeline *p,
3556 struct thread *t __rte_unused,
3557 const struct instruction *ip)
3559 uint64_t *regarray, idx, src;
3561 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3563 regarray = instr_regarray_regarray(p, ip);
3564 idx = instr_regarray_idx_imm(p, ip);
3565 src = ip->regarray.dstsrc_val;
3566 regarray[idx] += src;
3572 static inline struct meter *
3573 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3575 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3577 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3578 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3579 uint64_t idx64 = *idx64_ptr;
3580 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3581 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3583 return &r->metarray[idx];
3586 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3588 static inline struct meter *
3589 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3591 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3593 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3594 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3595 uint64_t idx64 = *idx64_ptr;
3596 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3598 return &r->metarray[idx];
3603 #define instr_meter_idx_nbo instr_meter_idx_hbo
3607 static inline struct meter *
3608 instr_meter_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
3610 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3612 uint64_t idx = ip->meter.idx_val & r->size_mask;
3614 return &r->metarray[idx];
3617 static inline uint32_t
3618 instr_meter_length_hbo(struct thread *t, const struct instruction *ip)
3620 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3621 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3622 uint64_t src64 = *src64_ptr;
3623 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3624 uint64_t src = src64 & src64_mask;
3626 return (uint32_t)src;
3629 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3631 static inline uint32_t
3632 instr_meter_length_nbo(struct thread *t, const struct instruction *ip)
3634 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3635 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3636 uint64_t src64 = *src64_ptr;
3637 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3639 return (uint32_t)src;
3644 #define instr_meter_length_nbo instr_meter_length_hbo
3648 static inline enum rte_color
3649 instr_meter_color_in_hbo(struct thread *t, const struct instruction *ip)
3651 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3652 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3653 uint64_t src64 = *src64_ptr;
3654 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3655 uint64_t src = src64 & src64_mask;
3657 return (enum rte_color)src;
3661 instr_meter_color_out_hbo_set(struct thread *t,
3662 const struct instruction *ip,
3663 enum rte_color color_out)
3665 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
3666 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
3667 uint64_t dst64 = *dst64_ptr;
3668 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
3670 uint64_t src = (uint64_t)color_out;
3672 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3676 __instr_metprefetch_h_exec(struct rte_swx_pipeline *p,
3678 const struct instruction *ip)
3682 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
3684 m = instr_meter_idx_nbo(p, t, ip);
3689 __instr_metprefetch_m_exec(struct rte_swx_pipeline *p,
3691 const struct instruction *ip)
3695 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
3697 m = instr_meter_idx_hbo(p, t, ip);
3702 __instr_metprefetch_i_exec(struct rte_swx_pipeline *p,
3703 struct thread *t __rte_unused,
3704 const struct instruction *ip)
3708 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
3710 m = instr_meter_idx_imm(p, ip);
3715 __instr_meter_hhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3718 uint64_t time, n_pkts, n_bytes;
3720 enum rte_color color_in, color_out;
3722 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
3724 m = instr_meter_idx_nbo(p, t, ip);
3725 rte_prefetch0(m->n_pkts);
3726 time = rte_get_tsc_cycles();
3727 length = instr_meter_length_nbo(t, ip);
3728 color_in = instr_meter_color_in_hbo(t, ip);
3730 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3731 &m->profile->profile,
3736 color_out &= m->color_mask;
3738 n_pkts = m->n_pkts[color_out];
3739 n_bytes = m->n_bytes[color_out];
3741 instr_meter_color_out_hbo_set(t, ip, color_out);
3743 m->n_pkts[color_out] = n_pkts + 1;
3744 m->n_bytes[color_out] = n_bytes + length;
3748 __instr_meter_hhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3751 uint64_t time, n_pkts, n_bytes;
3753 enum rte_color color_in, color_out;
3755 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
3757 m = instr_meter_idx_nbo(p, t, ip);
3758 rte_prefetch0(m->n_pkts);
3759 time = rte_get_tsc_cycles();
3760 length = instr_meter_length_nbo(t, ip);
3761 color_in = (enum rte_color)ip->meter.color_in_val;
3763 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3764 &m->profile->profile,
3769 color_out &= m->color_mask;
3771 n_pkts = m->n_pkts[color_out];
3772 n_bytes = m->n_bytes[color_out];
3774 instr_meter_color_out_hbo_set(t, ip, color_out);
3776 m->n_pkts[color_out] = n_pkts + 1;
3777 m->n_bytes[color_out] = n_bytes + length;
3781 __instr_meter_hmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3784 uint64_t time, n_pkts, n_bytes;
3786 enum rte_color color_in, color_out;
3788 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
3790 m = instr_meter_idx_nbo(p, t, ip);
3791 rte_prefetch0(m->n_pkts);
3792 time = rte_get_tsc_cycles();
3793 length = instr_meter_length_hbo(t, ip);
3794 color_in = instr_meter_color_in_hbo(t, ip);
3796 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3797 &m->profile->profile,
3802 color_out &= m->color_mask;
3804 n_pkts = m->n_pkts[color_out];
3805 n_bytes = m->n_bytes[color_out];
3807 instr_meter_color_out_hbo_set(t, ip, color_out);
3809 m->n_pkts[color_out] = n_pkts + 1;
3810 m->n_bytes[color_out] = n_bytes + length;
3814 __instr_meter_hmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3817 uint64_t time, n_pkts, n_bytes;
3819 enum rte_color color_in, color_out;
3821 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
3823 m = instr_meter_idx_nbo(p, t, ip);
3824 rte_prefetch0(m->n_pkts);
3825 time = rte_get_tsc_cycles();
3826 length = instr_meter_length_hbo(t, ip);
3827 color_in = (enum rte_color)ip->meter.color_in_val;
3829 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3830 &m->profile->profile,
3835 color_out &= m->color_mask;
3837 n_pkts = m->n_pkts[color_out];
3838 n_bytes = m->n_bytes[color_out];
3840 instr_meter_color_out_hbo_set(t, ip, color_out);
3842 m->n_pkts[color_out] = n_pkts + 1;
3843 m->n_bytes[color_out] = n_bytes + length;
3847 __instr_meter_mhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3850 uint64_t time, n_pkts, n_bytes;
3852 enum rte_color color_in, color_out;
3854 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
3856 m = instr_meter_idx_hbo(p, t, ip);
3857 rte_prefetch0(m->n_pkts);
3858 time = rte_get_tsc_cycles();
3859 length = instr_meter_length_nbo(t, ip);
3860 color_in = instr_meter_color_in_hbo(t, ip);
3862 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3863 &m->profile->profile,
3868 color_out &= m->color_mask;
3870 n_pkts = m->n_pkts[color_out];
3871 n_bytes = m->n_bytes[color_out];
3873 instr_meter_color_out_hbo_set(t, ip, color_out);
3875 m->n_pkts[color_out] = n_pkts + 1;
3876 m->n_bytes[color_out] = n_bytes + length;
3880 __instr_meter_mhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3883 uint64_t time, n_pkts, n_bytes;
3885 enum rte_color color_in, color_out;
3887 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
3889 m = instr_meter_idx_hbo(p, t, ip);
3890 rte_prefetch0(m->n_pkts);
3891 time = rte_get_tsc_cycles();
3892 length = instr_meter_length_nbo(t, ip);
3893 color_in = (enum rte_color)ip->meter.color_in_val;
3895 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3896 &m->profile->profile,
3901 color_out &= m->color_mask;
3903 n_pkts = m->n_pkts[color_out];
3904 n_bytes = m->n_bytes[color_out];
3906 instr_meter_color_out_hbo_set(t, ip, color_out);
3908 m->n_pkts[color_out] = n_pkts + 1;
3909 m->n_bytes[color_out] = n_bytes + length;
3913 __instr_meter_mmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3916 uint64_t time, n_pkts, n_bytes;
3918 enum rte_color color_in, color_out;
3920 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
3922 m = instr_meter_idx_hbo(p, t, ip);
3923 rte_prefetch0(m->n_pkts);
3924 time = rte_get_tsc_cycles();
3925 length = instr_meter_length_hbo(t, ip);
3926 color_in = instr_meter_color_in_hbo(t, ip);
3928 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3929 &m->profile->profile,
3934 color_out &= m->color_mask;
3936 n_pkts = m->n_pkts[color_out];
3937 n_bytes = m->n_bytes[color_out];
3939 instr_meter_color_out_hbo_set(t, ip, color_out);
3941 m->n_pkts[color_out] = n_pkts + 1;
3942 m->n_bytes[color_out] = n_bytes + length;
3946 __instr_meter_mmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3949 uint64_t time, n_pkts, n_bytes;
3951 enum rte_color color_in, color_out;
3953 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
3955 m = instr_meter_idx_hbo(p, t, ip);
3956 rte_prefetch0(m->n_pkts);
3957 time = rte_get_tsc_cycles();
3958 length = instr_meter_length_hbo(t, ip);
3959 color_in = (enum rte_color)ip->meter.color_in_val;
3961 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3962 &m->profile->profile,
3967 color_out &= m->color_mask;
3969 n_pkts = m->n_pkts[color_out];
3970 n_bytes = m->n_bytes[color_out];
3972 instr_meter_color_out_hbo_set(t, ip, color_out);
3974 m->n_pkts[color_out] = n_pkts + 1;
3975 m->n_bytes[color_out] = n_bytes + length;
3979 __instr_meter_ihm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3982 uint64_t time, n_pkts, n_bytes;
3984 enum rte_color color_in, color_out;
3986 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
3988 m = instr_meter_idx_imm(p, ip);
3989 rte_prefetch0(m->n_pkts);
3990 time = rte_get_tsc_cycles();
3991 length = instr_meter_length_nbo(t, ip);
3992 color_in = instr_meter_color_in_hbo(t, ip);
3994 color_out = rte_meter_trtcm_color_aware_check(&m->m,
3995 &m->profile->profile,
4000 color_out &= m->color_mask;
4002 n_pkts = m->n_pkts[color_out];
4003 n_bytes = m->n_bytes[color_out];
4005 instr_meter_color_out_hbo_set(t, ip, color_out);
4007 m->n_pkts[color_out] = n_pkts + 1;
4008 m->n_bytes[color_out] = n_bytes + length;
4012 __instr_meter_ihi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4015 uint64_t time, n_pkts, n_bytes;
4017 enum rte_color color_in, color_out;
4019 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
4021 m = instr_meter_idx_imm(p, ip);
4022 rte_prefetch0(m->n_pkts);
4023 time = rte_get_tsc_cycles();
4024 length = instr_meter_length_nbo(t, ip);
4025 color_in = (enum rte_color)ip->meter.color_in_val;
4027 color_out = rte_meter_trtcm_color_aware_check(&m->m,
4028 &m->profile->profile,
4033 color_out &= m->color_mask;
4035 n_pkts = m->n_pkts[color_out];
4036 n_bytes = m->n_bytes[color_out];
4038 instr_meter_color_out_hbo_set(t, ip, color_out);
4040 m->n_pkts[color_out] = n_pkts + 1;
4041 m->n_bytes[color_out] = n_bytes + length;
4045 __instr_meter_imm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4048 uint64_t time, n_pkts, n_bytes;
4050 enum rte_color color_in, color_out;
4052 TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
4054 m = instr_meter_idx_imm(p, ip);
4055 rte_prefetch0(m->n_pkts);
4056 time = rte_get_tsc_cycles();
4057 length = instr_meter_length_hbo(t, ip);
4058 color_in = instr_meter_color_in_hbo(t, ip);
4060 color_out = rte_meter_trtcm_color_aware_check(&m->m,
4061 &m->profile->profile,
4066 color_out &= m->color_mask;
4068 n_pkts = m->n_pkts[color_out];
4069 n_bytes = m->n_bytes[color_out];
4071 instr_meter_color_out_hbo_set(t, ip, color_out);
4073 m->n_pkts[color_out] = n_pkts + 1;
4074 m->n_bytes[color_out] = n_bytes + length;
4078 __instr_meter_imi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4081 uint64_t time, n_pkts, n_bytes;
4083 enum rte_color color_in, color_out;
4085 TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
4087 m = instr_meter_idx_imm(p, ip);
4088 rte_prefetch0(m->n_pkts);
4089 time = rte_get_tsc_cycles();
4090 length = instr_meter_length_hbo(t, ip);
4091 color_in = (enum rte_color)ip->meter.color_in_val;
4093 color_out = rte_meter_trtcm_color_aware_check(&m->m,
4094 &m->profile->profile,
4099 color_out &= m->color_mask;
4101 n_pkts = m->n_pkts[color_out];
4102 n_bytes = m->n_bytes[color_out];
4104 instr_meter_color_out_hbo_set(t, ip, color_out);
4106 m->n_pkts[color_out] = n_pkts + 1;
4107 m->n_bytes[color_out] = n_bytes + length;