1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_prefetch.h>
15 #include <rte_meter.h>
17 #include <rte_swx_table_selector.h>
18 #include <rte_swx_table_learner.h>
19 #include <rte_swx_pipeline.h>
20 #include <rte_swx_ctl.h>
27 #define TRACE(...) printf(__VA_ARGS__)
35 #define ntoh64(x) rte_be_to_cpu_64(x)
36 #define hton64(x) rte_cpu_to_be_64(x)
42 char name[RTE_SWX_NAME_SIZE];
49 TAILQ_ENTRY(struct_type) node;
50 char name[RTE_SWX_NAME_SIZE];
58 TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
65 char name[RTE_SWX_NAME_SIZE];
66 struct rte_swx_port_in_ops ops;
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78 TAILQ_HEAD(port_in_tailq, port_in);
80 struct port_in_runtime {
81 rte_swx_port_in_pkt_rx_t pkt_rx;
88 struct port_out_type {
89 TAILQ_ENTRY(port_out_type) node;
90 char name[RTE_SWX_NAME_SIZE];
91 struct rte_swx_port_out_ops ops;
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103 TAILQ_HEAD(port_out_tailq, port_out);
105 struct port_out_runtime {
106 rte_swx_port_out_pkt_tx_t pkt_tx;
107 rte_swx_port_out_flush_t flush;
114 struct extern_type_member_func {
115 TAILQ_ENTRY(extern_type_member_func) node;
116 char name[RTE_SWX_NAME_SIZE];
117 rte_swx_extern_type_member_func_t func;
121 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
124 TAILQ_ENTRY(extern_type) node;
125 char name[RTE_SWX_NAME_SIZE];
126 struct struct_type *mailbox_struct_type;
127 rte_swx_extern_type_constructor_t constructor;
128 rte_swx_extern_type_destructor_t destructor;
129 struct extern_type_member_func_tailq funcs;
133 TAILQ_HEAD(extern_type_tailq, extern_type);
136 TAILQ_ENTRY(extern_obj) node;
137 char name[RTE_SWX_NAME_SIZE];
138 struct extern_type *type;
144 TAILQ_HEAD(extern_obj_tailq, extern_obj);
146 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
147 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
150 struct extern_obj_runtime {
153 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
160 TAILQ_ENTRY(extern_func) node;
161 char name[RTE_SWX_NAME_SIZE];
162 struct struct_type *mailbox_struct_type;
163 rte_swx_extern_func_t func;
168 TAILQ_HEAD(extern_func_tailq, extern_func);
170 struct extern_func_runtime {
172 rte_swx_extern_func_t func;
179 TAILQ_ENTRY(header) node;
180 char name[RTE_SWX_NAME_SIZE];
181 struct struct_type *st;
186 TAILQ_HEAD(header_tailq, header);
188 struct header_runtime {
193 struct header_out_runtime {
203 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
204 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
205 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
206 * when transferred to packet meta-data and in NBO when transferred to packet
210 /* Notation conventions:
211 * -Header field: H = h.header.field (dst/src)
212 * -Meta-data field: M = m.field (dst/src)
213 * -Extern object mailbox field: E = e.field (dst/src)
214 * -Extern function mailbox field: F = f.field (dst/src)
215 * -Table action data field: T = t.field (src only)
216 * -Immediate value: I = 32-bit unsigned value (src only)
219 enum instruction_type {
226 INSTR_TX, /* port_out = M */
227 INSTR_TX_I, /* port_out = I */
229 /* extract h.header */
239 /* extract h.header m.last_field_size */
242 /* lookahead h.header */
256 /* validate h.header */
259 /* invalidate h.header */
260 INSTR_HDR_INVALIDATE,
264 * dst = HMEF, src = HMEFTI
266 INSTR_MOV, /* dst = MEF, src = MEFT */
267 INSTR_MOV_MH, /* dst = MEF, src = H */
268 INSTR_MOV_HM, /* dst = H, src = MEFT */
269 INSTR_MOV_HH, /* dst = H, src = H */
270 INSTR_MOV_I, /* dst = HMEF, src = I */
272 /* dma h.header t.field
273 * memcpy(h.header, t.field, sizeof(h.header))
286 * dst = HMEF, src = HMEFTI
288 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
289 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
290 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
291 INSTR_ALU_ADD_HH, /* dst = H, src = H */
292 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
293 INSTR_ALU_ADD_HI, /* dst = H, src = I */
297 * dst = HMEF, src = HMEFTI
299 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
300 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
301 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
302 INSTR_ALU_SUB_HH, /* dst = H, src = H */
303 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
304 INSTR_ALU_SUB_HI, /* dst = H, src = I */
307 * dst = dst '+ src[0:1] '+ src[2:3] + ...
308 * dst = H, src = {H, h.header}
310 INSTR_ALU_CKADD_FIELD, /* src = H */
311 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
312 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
318 INSTR_ALU_CKSUB_FIELD,
322 * dst = HMEF, src = HMEFTI
324 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
325 INSTR_ALU_AND_MH, /* dst = MEF, src = H */
326 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
327 INSTR_ALU_AND_HH, /* dst = H, src = H */
328 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
332 * dst = HMEF, src = HMEFTI
334 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
335 INSTR_ALU_OR_MH, /* dst = MEF, src = H */
336 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
337 INSTR_ALU_OR_HH, /* dst = H, src = H */
338 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
342 * dst = HMEF, src = HMEFTI
344 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
345 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
346 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
347 INSTR_ALU_XOR_HH, /* dst = H, src = H */
348 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
352 * dst = HMEF, src = HMEFTI
354 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
355 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
356 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
357 INSTR_ALU_SHL_HH, /* dst = H, src = H */
358 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
359 INSTR_ALU_SHL_HI, /* dst = H, src = I */
363 * dst = HMEF, src = HMEFTI
365 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
366 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
367 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
368 INSTR_ALU_SHR_HH, /* dst = H, src = H */
369 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
370 INSTR_ALU_SHR_HI, /* dst = H, src = I */
372 /* regprefetch REGARRAY index
373 * prefetch REGARRAY[index]
376 INSTR_REGPREFETCH_RH, /* index = H */
377 INSTR_REGPREFETCH_RM, /* index = MEFT */
378 INSTR_REGPREFETCH_RI, /* index = I */
380 /* regrd dst REGARRAY index
381 * dst = REGARRAY[index]
382 * dst = HMEF, index = HMEFTI
384 INSTR_REGRD_HRH, /* dst = H, index = H */
385 INSTR_REGRD_HRM, /* dst = H, index = MEFT */
386 INSTR_REGRD_HRI, /* dst = H, index = I */
387 INSTR_REGRD_MRH, /* dst = MEF, index = H */
388 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
389 INSTR_REGRD_MRI, /* dst = MEF, index = I */
391 /* regwr REGARRAY index src
392 * REGARRAY[index] = src
393 * index = HMEFTI, src = HMEFTI
395 INSTR_REGWR_RHH, /* index = H, src = H */
396 INSTR_REGWR_RHM, /* index = H, src = MEFT */
397 INSTR_REGWR_RHI, /* index = H, src = I */
398 INSTR_REGWR_RMH, /* index = MEFT, src = H */
399 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
400 INSTR_REGWR_RMI, /* index = MEFT, src = I */
401 INSTR_REGWR_RIH, /* index = I, src = H */
402 INSTR_REGWR_RIM, /* index = I, src = MEFT */
403 INSTR_REGWR_RII, /* index = I, src = I */
405 /* regadd REGARRAY index src
406 * REGARRAY[index] += src
407 * index = HMEFTI, src = HMEFTI
409 INSTR_REGADD_RHH, /* index = H, src = H */
410 INSTR_REGADD_RHM, /* index = H, src = MEFT */
411 INSTR_REGADD_RHI, /* index = H, src = I */
412 INSTR_REGADD_RMH, /* index = MEFT, src = H */
413 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
414 INSTR_REGADD_RMI, /* index = MEFT, src = I */
415 INSTR_REGADD_RIH, /* index = I, src = H */
416 INSTR_REGADD_RIM, /* index = I, src = MEFT */
417 INSTR_REGADD_RII, /* index = I, src = I */
419 /* metprefetch METARRAY index
420 * prefetch METARRAY[index]
423 INSTR_METPREFETCH_H, /* index = H */
424 INSTR_METPREFETCH_M, /* index = MEFT */
425 INSTR_METPREFETCH_I, /* index = I */
427 /* meter METARRAY index length color_in color_out
428 * color_out = meter(METARRAY[index], length, color_in)
429 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
431 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
432 INSTR_METER_HHI, /* index = H, length = H, color_in = I */
433 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
434 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
435 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
436 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
437 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
438 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
439 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
440 INSTR_METER_IHI, /* index = I, length = H, color_in = I */
441 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
442 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
449 /* learn LEARNER ACTION_NAME */
453 INSTR_LEARNER_FORGET,
455 /* extern e.obj.func */
466 /* jmpv LABEL h.header
467 * Jump if header is valid
471 /* jmpnv LABEL h.header
472 * Jump if header is invalid
477 * Jump if table lookup hit
482 * Jump if table lookup miss
489 INSTR_JMP_ACTION_HIT,
491 /* jmpna LABEL ACTION
492 * Jump if action not run
494 INSTR_JMP_ACTION_MISS,
497 * Jump if a is equal to b
498 * a = HMEFT, b = HMEFTI
500 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
501 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
502 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
503 INSTR_JMP_EQ_HH, /* a = H, b = H */
504 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
507 * Jump if a is not equal to b
508 * a = HMEFT, b = HMEFTI
510 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
511 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
512 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
513 INSTR_JMP_NEQ_HH, /* a = H, b = H */
514 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
517 * Jump if a is less than b
518 * a = HMEFT, b = HMEFTI
520 INSTR_JMP_LT, /* a = MEFT, b = MEFT */
521 INSTR_JMP_LT_MH, /* a = MEFT, b = H */
522 INSTR_JMP_LT_HM, /* a = H, b = MEFT */
523 INSTR_JMP_LT_HH, /* a = H, b = H */
524 INSTR_JMP_LT_MI, /* a = MEFT, b = I */
525 INSTR_JMP_LT_HI, /* a = H, b = I */
528 * Jump if a is greater than b
529 * a = HMEFT, b = HMEFTI
531 INSTR_JMP_GT, /* a = MEFT, b = MEFT */
532 INSTR_JMP_GT_MH, /* a = MEFT, b = H */
533 INSTR_JMP_GT_HM, /* a = H, b = MEFT */
534 INSTR_JMP_GT_HH, /* a = H, b = H */
535 INSTR_JMP_GT_MI, /* a = MEFT, b = I */
536 INSTR_JMP_GT_HI, /* a = H, b = I */
544 struct instr_operand {
565 uint8_t header_id[8];
566 uint8_t struct_id[8];
571 struct instr_hdr_validity {
583 struct instr_extern_obj {
588 struct instr_extern_func {
592 struct instr_dst_src {
593 struct instr_operand dst;
595 struct instr_operand src;
600 struct instr_regarray {
605 struct instr_operand idx;
610 struct instr_operand dstsrc;
620 struct instr_operand idx;
624 struct instr_operand length;
627 struct instr_operand color_in;
628 uint32_t color_in_val;
631 struct instr_operand color_out;
636 uint8_t header_id[8];
637 uint8_t struct_id[8];
648 struct instruction *ip;
651 struct instr_operand a;
657 struct instr_operand b;
663 enum instruction_type type;
666 struct instr_hdr_validity valid;
667 struct instr_dst_src mov;
668 struct instr_regarray regarray;
669 struct instr_meter meter;
670 struct instr_dma dma;
671 struct instr_dst_src alu;
672 struct instr_table table;
673 struct instr_learn learn;
674 struct instr_extern_obj ext_obj;
675 struct instr_extern_func ext_func;
676 struct instr_jmp jmp;
680 struct instruction_data {
681 char label[RTE_SWX_NAME_SIZE];
682 char jmp_label[RTE_SWX_NAME_SIZE];
683 uint32_t n_users; /* user = jmp instruction to this instruction. */
691 TAILQ_ENTRY(action) node;
692 char name[RTE_SWX_NAME_SIZE];
693 struct struct_type *st;
694 int *args_endianness; /* 0 = Host Byte Order (HBO); 1 = Network Byte Order (NBO). */
695 struct instruction *instructions;
696 uint32_t n_instructions;
700 TAILQ_HEAD(action_tailq, action);
706 TAILQ_ENTRY(table_type) node;
707 char name[RTE_SWX_NAME_SIZE];
708 enum rte_swx_table_match_type match_type;
709 struct rte_swx_table_ops ops;
712 TAILQ_HEAD(table_type_tailq, table_type);
715 enum rte_swx_table_match_type match_type;
720 TAILQ_ENTRY(table) node;
721 char name[RTE_SWX_NAME_SIZE];
722 char args[RTE_SWX_NAME_SIZE];
723 struct table_type *type; /* NULL when n_fields == 0. */
726 struct match_field *fields;
728 struct header *header; /* Only valid when n_fields > 0. */
731 struct action **actions;
732 struct action *default_action;
733 uint8_t *default_action_data;
735 int default_action_is_const;
736 uint32_t action_data_size_max;
742 TAILQ_HEAD(table_tailq, table);
744 struct table_runtime {
745 rte_swx_table_lookup_t func;
750 struct table_statistics {
751 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
752 uint64_t *n_pkts_action;
759 TAILQ_ENTRY(selector) node;
760 char name[RTE_SWX_NAME_SIZE];
762 struct field *group_id_field;
763 struct field **selector_fields;
764 uint32_t n_selector_fields;
765 struct header *selector_header;
766 struct field *member_id_field;
768 uint32_t n_groups_max;
769 uint32_t n_members_per_group_max;
774 TAILQ_HEAD(selector_tailq, selector);
776 struct selector_runtime {
778 uint8_t **group_id_buffer;
779 uint8_t **selector_buffer;
780 uint8_t **member_id_buffer;
783 struct selector_statistics {
791 TAILQ_ENTRY(learner) node;
792 char name[RTE_SWX_NAME_SIZE];
795 struct field **fields;
797 struct header *header;
800 struct action **actions;
801 struct field **action_arg;
802 struct action *default_action;
803 uint8_t *default_action_data;
805 int default_action_is_const;
806 uint32_t action_data_size_max;
813 TAILQ_HEAD(learner_tailq, learner);
815 struct learner_runtime {
818 uint8_t **action_data;
821 struct learner_statistics {
822 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
823 uint64_t n_pkts_learn[2]; /* 0 = Learn OK, 1 = Learn error. */
824 uint64_t n_pkts_forget;
825 uint64_t *n_pkts_action;
832 TAILQ_ENTRY(regarray) node;
833 char name[RTE_SWX_NAME_SIZE];
839 TAILQ_HEAD(regarray_tailq, regarray);
841 struct regarray_runtime {
849 struct meter_profile {
850 TAILQ_ENTRY(meter_profile) node;
851 char name[RTE_SWX_NAME_SIZE];
852 struct rte_meter_trtcm_params params;
853 struct rte_meter_trtcm_profile profile;
857 TAILQ_HEAD(meter_profile_tailq, meter_profile);
860 TAILQ_ENTRY(metarray) node;
861 char name[RTE_SWX_NAME_SIZE];
866 TAILQ_HEAD(metarray_tailq, metarray);
869 struct rte_meter_trtcm m;
870 struct meter_profile *profile;
871 enum rte_color color_mask;
874 uint64_t n_pkts[RTE_COLORS];
875 uint64_t n_bytes[RTE_COLORS];
878 struct metarray_runtime {
879 struct meter *metarray;
888 struct rte_swx_pkt pkt;
894 /* Packet headers. */
895 struct header_runtime *headers; /* Extracted or generated headers. */
896 struct header_out_runtime *headers_out; /* Emitted headers. */
897 uint8_t *header_storage;
898 uint8_t *header_out_storage;
899 uint64_t valid_headers;
900 uint32_t n_headers_out;
902 /* Packet meta-data. */
906 struct table_runtime *tables;
907 struct selector_runtime *selectors;
908 struct learner_runtime *learners;
909 struct rte_swx_table_state *table_state;
911 int hit; /* 0 = Miss, 1 = Hit. */
915 /* Extern objects and functions. */
916 struct extern_obj_runtime *extern_objs;
917 struct extern_func_runtime *extern_funcs;
920 struct instruction *ip;
921 struct instruction *ret;
924 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
925 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
926 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
928 #define HEADER_VALID(thread, header_id) \
929 MASK64_BIT_GET((thread)->valid_headers, header_id)
931 #define ALU(thread, ip, operator) \
933 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
934 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
935 uint64_t dst64 = *dst64_ptr; \
936 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
937 uint64_t dst = dst64 & dst64_mask; \
939 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
940 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
941 uint64_t src64 = *src64_ptr; \
942 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
943 uint64_t src = src64 & src64_mask; \
945 uint64_t result = dst operator src; \
947 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
950 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
952 #define ALU_MH(thread, ip, operator) \
954 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
955 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
956 uint64_t dst64 = *dst64_ptr; \
957 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
958 uint64_t dst = dst64 & dst64_mask; \
960 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
961 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
962 uint64_t src64 = *src64_ptr; \
963 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
965 uint64_t result = dst operator src; \
967 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
970 #define ALU_HM(thread, ip, operator) \
972 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
973 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
974 uint64_t dst64 = *dst64_ptr; \
975 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
976 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
978 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
979 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
980 uint64_t src64 = *src64_ptr; \
981 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
982 uint64_t src = src64 & src64_mask; \
984 uint64_t result = dst operator src; \
985 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
987 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
990 #define ALU_HM_FAST(thread, ip, operator) \
992 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
993 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
994 uint64_t dst64 = *dst64_ptr; \
995 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
996 uint64_t dst = dst64 & dst64_mask; \
998 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
999 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1000 uint64_t src64 = *src64_ptr; \
1001 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1002 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1004 uint64_t result = dst operator src; \
1006 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1009 #define ALU_HH(thread, ip, operator) \
1011 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1012 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1013 uint64_t dst64 = *dst64_ptr; \
1014 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1015 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1017 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1018 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1019 uint64_t src64 = *src64_ptr; \
1020 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1022 uint64_t result = dst operator src; \
1023 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1025 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1028 #define ALU_HH_FAST(thread, ip, operator) \
1030 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1031 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1032 uint64_t dst64 = *dst64_ptr; \
1033 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1034 uint64_t dst = dst64 & dst64_mask; \
1036 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1037 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1038 uint64_t src64 = *src64_ptr; \
1039 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1041 uint64_t result = dst operator src; \
1043 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1050 #define ALU_HM_FAST ALU
1052 #define ALU_HH_FAST ALU
1056 #define ALU_I(thread, ip, operator) \
1058 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1059 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1060 uint64_t dst64 = *dst64_ptr; \
1061 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1062 uint64_t dst = dst64 & dst64_mask; \
1064 uint64_t src = (ip)->alu.src_val; \
1066 uint64_t result = dst operator src; \
1068 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1071 #define ALU_MI ALU_I
1073 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1075 #define ALU_HI(thread, ip, operator) \
1077 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1078 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1079 uint64_t dst64 = *dst64_ptr; \
1080 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1081 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1083 uint64_t src = (ip)->alu.src_val; \
1085 uint64_t result = dst operator src; \
1086 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1088 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1093 #define ALU_HI ALU_I
1097 #define MOV(thread, ip) \
1099 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1100 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1101 uint64_t dst64 = *dst64_ptr; \
1102 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1104 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1105 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1106 uint64_t src64 = *src64_ptr; \
1107 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1108 uint64_t src = src64 & src64_mask; \
1110 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1113 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1115 #define MOV_MH(thread, ip) \
1117 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1118 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1119 uint64_t dst64 = *dst64_ptr; \
1120 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1122 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1123 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1124 uint64_t src64 = *src64_ptr; \
1125 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1127 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1130 #define MOV_HM(thread, ip) \
1132 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1133 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1134 uint64_t dst64 = *dst64_ptr; \
1135 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1137 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1138 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1139 uint64_t src64 = *src64_ptr; \
1140 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1141 uint64_t src = src64 & src64_mask; \
1143 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1144 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1147 #define MOV_HH(thread, ip) \
1149 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1150 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1151 uint64_t dst64 = *dst64_ptr; \
1152 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1154 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1155 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1156 uint64_t src64 = *src64_ptr; \
1158 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1159 src = src >> (64 - (ip)->mov.dst.n_bits); \
1160 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1171 #define MOV_I(thread, ip) \
1173 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1174 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1175 uint64_t dst64 = *dst64_ptr; \
1176 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1178 uint64_t src = (ip)->mov.src_val; \
1180 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1183 #define JMP_CMP(thread, ip, operator) \
1185 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1186 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1187 uint64_t a64 = *a64_ptr; \
1188 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1189 uint64_t a = a64 & a64_mask; \
1191 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1192 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1193 uint64_t b64 = *b64_ptr; \
1194 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1195 uint64_t b = b64 & b64_mask; \
1197 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1200 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1202 #define JMP_CMP_MH(thread, ip, operator) \
1204 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1205 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1206 uint64_t a64 = *a64_ptr; \
1207 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1208 uint64_t a = a64 & a64_mask; \
1210 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1211 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1212 uint64_t b64 = *b64_ptr; \
1213 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1215 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1218 #define JMP_CMP_HM(thread, ip, operator) \
1220 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1221 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1222 uint64_t a64 = *a64_ptr; \
1223 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1225 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1226 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1227 uint64_t b64 = *b64_ptr; \
1228 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1229 uint64_t b = b64 & b64_mask; \
1231 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1234 #define JMP_CMP_HH(thread, ip, operator) \
1236 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1237 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1238 uint64_t a64 = *a64_ptr; \
1239 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1241 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1242 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1243 uint64_t b64 = *b64_ptr; \
1244 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1246 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1249 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1251 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1252 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1253 uint64_t a64 = *a64_ptr; \
1254 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1256 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1257 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1258 uint64_t b64 = *b64_ptr; \
1259 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1261 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1266 #define JMP_CMP_MH JMP_CMP
1267 #define JMP_CMP_HM JMP_CMP
1268 #define JMP_CMP_HH JMP_CMP
1269 #define JMP_CMP_HH_FAST JMP_CMP
1273 #define JMP_CMP_I(thread, ip, operator) \
1275 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1276 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1277 uint64_t a64 = *a64_ptr; \
1278 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1279 uint64_t a = a64 & a64_mask; \
1281 uint64_t b = (ip)->jmp.b_val; \
1283 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1286 #define JMP_CMP_MI JMP_CMP_I
1288 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1290 #define JMP_CMP_HI(thread, ip, operator) \
1292 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1293 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1294 uint64_t a64 = *a64_ptr; \
1295 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1297 uint64_t b = (ip)->jmp.b_val; \
1299 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1304 #define JMP_CMP_HI JMP_CMP_I
1308 #define METADATA_READ(thread, offset, n_bits) \
1310 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1311 uint64_t m64 = *m64_ptr; \
1312 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1316 #define METADATA_WRITE(thread, offset, n_bits, value) \
1318 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1319 uint64_t m64 = *m64_ptr; \
1320 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1322 uint64_t m_new = value; \
1324 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1327 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1328 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1331 struct rte_swx_pipeline {
1332 struct struct_type_tailq struct_types;
1333 struct port_in_type_tailq port_in_types;
1334 struct port_in_tailq ports_in;
1335 struct port_out_type_tailq port_out_types;
1336 struct port_out_tailq ports_out;
1337 struct extern_type_tailq extern_types;
1338 struct extern_obj_tailq extern_objs;
1339 struct extern_func_tailq extern_funcs;
1340 struct header_tailq headers;
1341 struct struct_type *metadata_st;
1342 uint32_t metadata_struct_id;
1343 struct action_tailq actions;
1344 struct table_type_tailq table_types;
1345 struct table_tailq tables;
1346 struct selector_tailq selectors;
1347 struct learner_tailq learners;
1348 struct regarray_tailq regarrays;
1349 struct meter_profile_tailq meter_profiles;
1350 struct metarray_tailq metarrays;
1352 struct port_in_runtime *in;
1353 struct port_out_runtime *out;
1354 struct instruction **action_instructions;
1355 struct rte_swx_table_state *table_state;
1356 struct table_statistics *table_stats;
1357 struct selector_statistics *selector_stats;
1358 struct learner_statistics *learner_stats;
1359 struct regarray_runtime *regarray_runtime;
1360 struct metarray_runtime *metarray_runtime;
1361 struct instruction *instructions;
1362 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1365 uint32_t n_ports_in;
1366 uint32_t n_ports_out;
1367 uint32_t n_extern_objs;
1368 uint32_t n_extern_funcs;
1371 uint32_t n_selectors;
1372 uint32_t n_learners;
1373 uint32_t n_regarrays;
1374 uint32_t n_metarrays;
1378 uint32_t n_instructions;
1387 pipeline_port_inc(struct rte_swx_pipeline *p)
1389 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1393 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1395 t->ip = p->instructions;
1399 thread_ip_set(struct thread *t, struct instruction *ip)
1405 thread_ip_action_call(struct rte_swx_pipeline *p,
1410 t->ip = p->action_instructions[action_id];
1414 thread_ip_inc(struct rte_swx_pipeline *p);
1417 thread_ip_inc(struct rte_swx_pipeline *p)
1419 struct thread *t = &p->threads[p->thread_id];
1425 thread_ip_inc_cond(struct thread *t, int cond)
1431 thread_yield(struct rte_swx_pipeline *p)
1433 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1437 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
1439 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1446 __instr_rx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1448 struct port_in_runtime *port = &p->in[p->port_id];
1449 struct rte_swx_pkt *pkt = &t->pkt;
1453 pkt_received = port->pkt_rx(port->obj, pkt);
1454 t->ptr = &pkt->pkt[pkt->offset];
1455 rte_prefetch0(t->ptr);
1457 TRACE("[Thread %2u] rx %s from port %u\n",
1459 pkt_received ? "1 pkt" : "0 pkts",
1463 t->valid_headers = 0;
1464 t->n_headers_out = 0;
1467 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1470 t->table_state = p->table_state;
1473 pipeline_port_inc(p);
1475 return pkt_received;
1479 instr_rx_exec(struct rte_swx_pipeline *p)
1481 struct thread *t = &p->threads[p->thread_id];
1482 struct instruction *ip = t->ip;
1486 pkt_received = __instr_rx_exec(p, t, ip);
1489 thread_ip_inc_cond(t, pkt_received);
1497 emit_handler(struct thread *t)
1499 struct header_out_runtime *h0 = &t->headers_out[0];
1500 struct header_out_runtime *h1 = &t->headers_out[1];
1501 uint32_t offset = 0, i;
1503 /* No header change or header decapsulation. */
1504 if ((t->n_headers_out == 1) &&
1505 (h0->ptr + h0->n_bytes == t->ptr)) {
1506 TRACE("Emit handler: no header change or header decap.\n");
1508 t->pkt.offset -= h0->n_bytes;
1509 t->pkt.length += h0->n_bytes;
1514 /* Header encapsulation (optionally, with prior header decasulation). */
1515 if ((t->n_headers_out == 2) &&
1516 (h1->ptr + h1->n_bytes == t->ptr) &&
1517 (h0->ptr == h0->ptr0)) {
1520 TRACE("Emit handler: header encapsulation.\n");
1522 offset = h0->n_bytes + h1->n_bytes;
1523 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1524 t->pkt.offset -= offset;
1525 t->pkt.length += offset;
1530 /* For any other case. */
1531 TRACE("Emit handler: complex case.\n");
1533 for (i = 0; i < t->n_headers_out; i++) {
1534 struct header_out_runtime *h = &t->headers_out[i];
1536 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1537 offset += h->n_bytes;
1541 memcpy(t->ptr - offset, t->header_out_storage, offset);
1542 t->pkt.offset -= offset;
1543 t->pkt.length += offset;
1548 __instr_tx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1550 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1551 struct port_out_runtime *port = &p->out[port_id];
1552 struct rte_swx_pkt *pkt = &t->pkt;
1554 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
1562 port->pkt_tx(port->obj, pkt);
1566 __instr_tx_i_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1568 uint64_t port_id = ip->io.io.val;
1569 struct port_out_runtime *port = &p->out[port_id];
1570 struct rte_swx_pkt *pkt = &t->pkt;
1572 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
1580 port->pkt_tx(port->obj, pkt);
1587 __instr_hdr_extract_many_exec(struct rte_swx_pipeline *p __rte_unused,
1589 const struct instruction *ip,
1592 uint64_t valid_headers = t->valid_headers;
1593 uint8_t *ptr = t->ptr;
1594 uint32_t offset = t->pkt.offset;
1595 uint32_t length = t->pkt.length;
1598 for (i = 0; i < n_extract; i++) {
1599 uint32_t header_id = ip->io.hdr.header_id[i];
1600 uint32_t struct_id = ip->io.hdr.struct_id[i];
1601 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1603 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
1609 t->structs[struct_id] = ptr;
1610 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1619 t->valid_headers = valid_headers;
1622 t->pkt.offset = offset;
1623 t->pkt.length = length;
1628 __instr_hdr_extract_exec(struct rte_swx_pipeline *p,
1630 const struct instruction *ip)
1632 __instr_hdr_extract_many_exec(p, t, ip, 1);
1636 __instr_hdr_extract2_exec(struct rte_swx_pipeline *p,
1638 const struct instruction *ip)
1640 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1642 __instr_hdr_extract_many_exec(p, t, ip, 2);
1646 __instr_hdr_extract3_exec(struct rte_swx_pipeline *p,
1648 const struct instruction *ip)
1650 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1652 __instr_hdr_extract_many_exec(p, t, ip, 3);
1656 __instr_hdr_extract4_exec(struct rte_swx_pipeline *p,
1658 const struct instruction *ip)
1660 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1662 __instr_hdr_extract_many_exec(p, t, ip, 4);
1666 __instr_hdr_extract5_exec(struct rte_swx_pipeline *p,
1668 const struct instruction *ip)
1670 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1672 __instr_hdr_extract_many_exec(p, t, ip, 5);
1676 __instr_hdr_extract6_exec(struct rte_swx_pipeline *p,
1678 const struct instruction *ip)
1680 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1682 __instr_hdr_extract_many_exec(p, t, ip, 6);
1686 __instr_hdr_extract7_exec(struct rte_swx_pipeline *p,
1688 const struct instruction *ip)
1690 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1692 __instr_hdr_extract_many_exec(p, t, ip, 7);
1696 __instr_hdr_extract8_exec(struct rte_swx_pipeline *p,
1698 const struct instruction *ip)
1700 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1702 __instr_hdr_extract_many_exec(p, t, ip, 8);
1706 __instr_hdr_extract_m_exec(struct rte_swx_pipeline *p __rte_unused,
1708 const struct instruction *ip)
1710 uint64_t valid_headers = t->valid_headers;
1711 uint8_t *ptr = t->ptr;
1712 uint32_t offset = t->pkt.offset;
1713 uint32_t length = t->pkt.length;
1715 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1716 uint32_t header_id = ip->io.hdr.header_id[0];
1717 uint32_t struct_id = ip->io.hdr.struct_id[0];
1718 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
1720 struct header_runtime *h = &t->headers[header_id];
1722 TRACE("[Thread %2u]: extract header %u (%u + %u bytes)\n",
1728 n_bytes += n_bytes_last;
1731 t->structs[struct_id] = ptr;
1732 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1733 h->n_bytes = n_bytes;
1736 t->pkt.offset = offset + n_bytes;
1737 t->pkt.length = length - n_bytes;
1738 t->ptr = ptr + n_bytes;
1742 __instr_hdr_lookahead_exec(struct rte_swx_pipeline *p __rte_unused,
1744 const struct instruction *ip)
1746 uint64_t valid_headers = t->valid_headers;
1747 uint8_t *ptr = t->ptr;
1749 uint32_t header_id = ip->io.hdr.header_id[0];
1750 uint32_t struct_id = ip->io.hdr.struct_id[0];
1752 TRACE("[Thread %2u]: lookahead header %u\n",
1757 t->structs[struct_id] = ptr;
1758 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1765 __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
1767 const struct instruction *ip,
1770 uint64_t valid_headers = t->valid_headers;
1771 uint32_t n_headers_out = t->n_headers_out;
1772 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
1773 uint8_t *ho_ptr = NULL;
1774 uint32_t ho_nbytes = 0, first = 1, i;
1776 for (i = 0; i < n_emit; i++) {
1777 uint32_t header_id = ip->io.hdr.header_id[i];
1778 uint32_t struct_id = ip->io.hdr.struct_id[i];
1780 struct header_runtime *hi = &t->headers[header_id];
1781 uint8_t *hi_ptr0 = hi->ptr0;
1782 uint32_t n_bytes = hi->n_bytes;
1784 uint8_t *hi_ptr = t->structs[struct_id];
1786 if (!MASK64_BIT_GET(valid_headers, header_id))
1789 TRACE("[Thread %2u]: emit header %u\n",
1797 if (!t->n_headers_out) {
1798 ho = &t->headers_out[0];
1804 ho_nbytes = n_bytes;
1811 ho_nbytes = ho->n_bytes;
1815 if (ho_ptr + ho_nbytes == hi_ptr) {
1816 ho_nbytes += n_bytes;
1818 ho->n_bytes = ho_nbytes;
1825 ho_nbytes = n_bytes;
1831 ho->n_bytes = ho_nbytes;
1832 t->n_headers_out = n_headers_out;
1836 __instr_hdr_emit_exec(struct rte_swx_pipeline *p,
1838 const struct instruction *ip)
1840 __instr_hdr_emit_many_exec(p, t, ip, 1);
1844 __instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p,
1846 const struct instruction *ip)
1848 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1850 __instr_hdr_emit_many_exec(p, t, ip, 1);
1851 __instr_tx_exec(p, t, ip);
1855 __instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p,
1857 const struct instruction *ip)
1859 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1861 __instr_hdr_emit_many_exec(p, t, ip, 2);
1862 __instr_tx_exec(p, t, ip);
1866 __instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p,
1868 const struct instruction *ip)
1870 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1872 __instr_hdr_emit_many_exec(p, t, ip, 3);
1873 __instr_tx_exec(p, t, ip);
1877 __instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p,
1879 const struct instruction *ip)
1881 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1883 __instr_hdr_emit_many_exec(p, t, ip, 4);
1884 __instr_tx_exec(p, t, ip);
1888 __instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p,
1890 const struct instruction *ip)
1892 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1894 __instr_hdr_emit_many_exec(p, t, ip, 5);
1895 __instr_tx_exec(p, t, ip);
1899 __instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p,
1901 const struct instruction *ip)
1903 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1905 __instr_hdr_emit_many_exec(p, t, ip, 6);
1906 __instr_tx_exec(p, t, ip);
1910 __instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p,
1912 const struct instruction *ip)
1914 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1916 __instr_hdr_emit_many_exec(p, t, ip, 7);
1917 __instr_tx_exec(p, t, ip);
1921 __instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p,
1923 const struct instruction *ip)
1925 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
1927 __instr_hdr_emit_many_exec(p, t, ip, 8);
1928 __instr_tx_exec(p, t, ip);
1935 __instr_hdr_validate_exec(struct rte_swx_pipeline *p __rte_unused,
1937 const struct instruction *ip)
1939 uint32_t header_id = ip->valid.header_id;
1941 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
1944 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
1951 __instr_hdr_invalidate_exec(struct rte_swx_pipeline *p __rte_unused,
1953 const struct instruction *ip)
1955 uint32_t header_id = ip->valid.header_id;
1957 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
1960 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
1967 __instr_learn_exec(struct rte_swx_pipeline *p,
1969 const struct instruction *ip)
1971 uint64_t action_id = ip->learn.action_id;
1972 uint32_t learner_id = t->learner_id;
1973 struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
1974 p->n_selectors + learner_id];
1975 struct learner_runtime *l = &t->learners[learner_id];
1976 struct learner_statistics *stats = &p->learner_stats[learner_id];
1980 status = rte_swx_table_learner_add(ts->obj,
1984 l->action_data[action_id]);
1986 TRACE("[Thread %2u] learner %u learn %s\n",
1989 status ? "ok" : "error");
1991 stats->n_pkts_learn[status] += 1;
1998 __instr_forget_exec(struct rte_swx_pipeline *p,
2000 const struct instruction *ip __rte_unused)
2002 uint32_t learner_id = t->learner_id;
2003 struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2004 p->n_selectors + learner_id];
2005 struct learner_runtime *l = &t->learners[learner_id];
2006 struct learner_statistics *stats = &p->learner_stats[learner_id];
2009 rte_swx_table_learner_delete(ts->obj, l->mailbox);
2011 TRACE("[Thread %2u] learner %u forget\n",
2015 stats->n_pkts_forget += 1;
2021 static inline uint32_t
2022 __instr_extern_obj_exec(struct rte_swx_pipeline *p __rte_unused,
2024 const struct instruction *ip)
2026 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2027 uint32_t func_id = ip->ext_obj.func_id;
2028 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2029 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
2032 TRACE("[Thread %2u] extern obj %u member func %u\n",
2037 done = func(obj->obj, obj->mailbox);
2042 static inline uint32_t
2043 __instr_extern_func_exec(struct rte_swx_pipeline *p __rte_unused,
2045 const struct instruction *ip)
2047 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2048 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2049 rte_swx_extern_func_t func = ext_func->func;
2052 TRACE("[Thread %2u] extern func %u\n",
2056 done = func(ext_func->mailbox);
2065 __instr_mov_exec(struct rte_swx_pipeline *p __rte_unused,
2067 const struct instruction *ip)
2069 TRACE("[Thread %2u] mov\n", p->thread_id);
2075 __instr_mov_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2077 const struct instruction *ip)
2079 TRACE("[Thread %2u] mov (mh)\n", p->thread_id);
2085 __instr_mov_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2087 const struct instruction *ip)
2089 TRACE("[Thread %2u] mov (hm)\n", p->thread_id);
2095 __instr_mov_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2097 const struct instruction *ip)
2099 TRACE("[Thread %2u] mov (hh)\n", p->thread_id);
2105 __instr_mov_i_exec(struct rte_swx_pipeline *p __rte_unused,
2107 const struct instruction *ip)
2109 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n", p->thread_id, ip->mov.src_val);
2118 __instr_dma_ht_many_exec(struct rte_swx_pipeline *p __rte_unused,
2120 const struct instruction *ip,
2123 uint8_t *action_data = t->structs[0];
2124 uint64_t valid_headers = t->valid_headers;
2127 for (i = 0; i < n_dma; i++) {
2128 uint32_t header_id = ip->dma.dst.header_id[i];
2129 uint32_t struct_id = ip->dma.dst.struct_id[i];
2130 uint32_t offset = ip->dma.src.offset[i];
2131 uint32_t n_bytes = ip->dma.n_bytes[i];
2133 struct header_runtime *h = &t->headers[header_id];
2134 uint8_t *h_ptr0 = h->ptr0;
2135 uint8_t *h_ptr = t->structs[struct_id];
2137 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2139 void *src = &action_data[offset];
2141 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2144 memcpy(dst, src, n_bytes);
2145 t->structs[struct_id] = dst;
2146 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2149 t->valid_headers = valid_headers;
2153 __instr_dma_ht_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2155 __instr_dma_ht_many_exec(p, t, ip, 1);
2159 __instr_dma_ht2_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2161 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2163 __instr_dma_ht_many_exec(p, t, ip, 2);
2167 __instr_dma_ht3_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2169 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2171 __instr_dma_ht_many_exec(p, t, ip, 3);
2175 __instr_dma_ht4_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2177 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2179 __instr_dma_ht_many_exec(p, t, ip, 4);
2183 __instr_dma_ht5_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2185 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2187 __instr_dma_ht_many_exec(p, t, ip, 5);
2191 __instr_dma_ht6_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2193 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2195 __instr_dma_ht_many_exec(p, t, ip, 6);
2199 __instr_dma_ht7_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2201 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2203 __instr_dma_ht_many_exec(p, t, ip, 7);
2207 __instr_dma_ht8_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2209 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2211 __instr_dma_ht_many_exec(p, t, ip, 8);
2218 __instr_alu_add_exec(struct rte_swx_pipeline *p __rte_unused,
2220 const struct instruction *ip)
2222 TRACE("[Thread %2u] add\n", p->thread_id);
2228 __instr_alu_add_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2230 const struct instruction *ip)
2232 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
2238 __instr_alu_add_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2240 const struct instruction *ip)
2242 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
2248 __instr_alu_add_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2250 const struct instruction *ip)
2252 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
2258 __instr_alu_add_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2260 const struct instruction *ip)
2262 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
2268 __instr_alu_add_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2270 const struct instruction *ip)
2272 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
2278 __instr_alu_sub_exec(struct rte_swx_pipeline *p __rte_unused,
2280 const struct instruction *ip)
2282 TRACE("[Thread %2u] sub\n", p->thread_id);
2288 __instr_alu_sub_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2290 const struct instruction *ip)
2292 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
2298 __instr_alu_sub_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2300 const struct instruction *ip)
2302 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
2308 __instr_alu_sub_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2310 const struct instruction *ip)
2312 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
2318 __instr_alu_sub_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2320 const struct instruction *ip)
2322 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
2328 __instr_alu_sub_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2330 const struct instruction *ip)
2332 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
2338 __instr_alu_shl_exec(struct rte_swx_pipeline *p __rte_unused,
2340 const struct instruction *ip)
2342 TRACE("[Thread %2u] shl\n", p->thread_id);
2348 __instr_alu_shl_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2350 const struct instruction *ip)
2352 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
2358 __instr_alu_shl_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2360 const struct instruction *ip)
2362 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
2368 __instr_alu_shl_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2370 const struct instruction *ip)
2372 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
2378 __instr_alu_shl_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2380 const struct instruction *ip)
2382 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
2388 __instr_alu_shl_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2390 const struct instruction *ip)
2392 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
2398 __instr_alu_shr_exec(struct rte_swx_pipeline *p __rte_unused,
2400 const struct instruction *ip)
2402 TRACE("[Thread %2u] shr\n", p->thread_id);
2408 __instr_alu_shr_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2410 const struct instruction *ip)
2412 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
2418 __instr_alu_shr_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2420 const struct instruction *ip)
2422 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
2428 __instr_alu_shr_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2430 const struct instruction *ip)
2432 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
2438 __instr_alu_shr_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2440 const struct instruction *ip)
2442 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
2449 __instr_alu_shr_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2451 const struct instruction *ip)
2453 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
2459 __instr_alu_and_exec(struct rte_swx_pipeline *p __rte_unused,
2461 const struct instruction *ip)
2463 TRACE("[Thread %2u] and\n", p->thread_id);
2469 __instr_alu_and_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2471 const struct instruction *ip)
2473 TRACE("[Thread %2u] and (mh)\n", p->thread_id);
2479 __instr_alu_and_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2481 const struct instruction *ip)
2483 TRACE("[Thread %2u] and (hm)\n", p->thread_id);
2485 ALU_HM_FAST(t, ip, &);
2489 __instr_alu_and_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2491 const struct instruction *ip)
2493 TRACE("[Thread %2u] and (hh)\n", p->thread_id);
2495 ALU_HH_FAST(t, ip, &);
2499 __instr_alu_and_i_exec(struct rte_swx_pipeline *p __rte_unused,
2501 const struct instruction *ip)
2503 TRACE("[Thread %2u] and (i)\n", p->thread_id);
2509 __instr_alu_or_exec(struct rte_swx_pipeline *p __rte_unused,
2511 const struct instruction *ip)
2513 TRACE("[Thread %2u] or\n", p->thread_id);
2519 __instr_alu_or_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2521 const struct instruction *ip)
2523 TRACE("[Thread %2u] or (mh)\n", p->thread_id);
2529 __instr_alu_or_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2531 const struct instruction *ip)
2533 TRACE("[Thread %2u] or (hm)\n", p->thread_id);
2535 ALU_HM_FAST(t, ip, |);
2539 __instr_alu_or_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2541 const struct instruction *ip)
2543 TRACE("[Thread %2u] or (hh)\n", p->thread_id);
2545 ALU_HH_FAST(t, ip, |);
2549 __instr_alu_or_i_exec(struct rte_swx_pipeline *p __rte_unused,
2551 const struct instruction *ip)
2553 TRACE("[Thread %2u] or (i)\n", p->thread_id);
2559 __instr_alu_xor_exec(struct rte_swx_pipeline *p __rte_unused,
2561 const struct instruction *ip)
2563 TRACE("[Thread %2u] xor\n", p->thread_id);
2569 __instr_alu_xor_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2571 const struct instruction *ip)
2573 TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
2579 __instr_alu_xor_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2581 const struct instruction *ip)
2583 TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
2585 ALU_HM_FAST(t, ip, ^);
2589 __instr_alu_xor_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2591 const struct instruction *ip)
2593 TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
2595 ALU_HH_FAST(t, ip, ^);
2599 __instr_alu_xor_i_exec(struct rte_swx_pipeline *p __rte_unused,
2601 const struct instruction *ip)
2603 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
2609 __instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p __rte_unused,
2611 const struct instruction *ip)
2613 uint8_t *dst_struct, *src_struct;
2614 uint16_t *dst16_ptr, dst;
2615 uint64_t *src64_ptr, src64, src64_mask, src;
2618 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
2621 dst_struct = t->structs[ip->alu.dst.struct_id];
2622 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2625 src_struct = t->structs[ip->alu.src.struct_id];
2626 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2628 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2629 src = src64 & src64_mask;
2634 /* The first input (r) is a 16-bit number. The second and the third
2635 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
2636 * three numbers (output r) is a 34-bit number.
2638 r += (src >> 32) + (src & 0xFFFFFFFF);
2640 /* The first input is a 16-bit number. The second input is an 18-bit
2641 * number. In the worst case scenario, the sum of the two numbers is a
2644 r = (r & 0xFFFF) + (r >> 16);
2646 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2647 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
2649 r = (r & 0xFFFF) + (r >> 16);
2651 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2652 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2653 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
2654 * therefore the output r is always a 16-bit number.
2656 r = (r & 0xFFFF) + (r >> 16);
2661 *dst16_ptr = (uint16_t)r;
2665 __instr_alu_cksub_field_exec(struct rte_swx_pipeline *p __rte_unused,
2667 const struct instruction *ip)
2669 uint8_t *dst_struct, *src_struct;
2670 uint16_t *dst16_ptr, dst;
2671 uint64_t *src64_ptr, src64, src64_mask, src;
2674 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
2677 dst_struct = t->structs[ip->alu.dst.struct_id];
2678 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2681 src_struct = t->structs[ip->alu.src.struct_id];
2682 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2684 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2685 src = src64 & src64_mask;
2690 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
2691 * the following sequence of operations in 2's complement arithmetic:
2692 * a '- b = (a - b) % 0xFFFF.
2694 * In order to prevent an underflow for the below subtraction, in which
2695 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
2696 * minuend), we first add a multiple of the 0xFFFF modulus to the
2697 * minuend. The number we add to the minuend needs to be a 34-bit number
2698 * or higher, so for readability reasons we picked the 36-bit multiple.
2699 * We are effectively turning the 16-bit minuend into a 36-bit number:
2700 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
2702 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
2704 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
2705 * result (the output r) is a 36-bit number.
2707 r -= (src >> 32) + (src & 0xFFFFFFFF);
2709 /* The first input is a 16-bit number. The second input is a 20-bit
2710 * number. Their sum is a 21-bit number.
2712 r = (r & 0xFFFF) + (r >> 16);
2714 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2715 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
2717 r = (r & 0xFFFF) + (r >> 16);
2719 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2720 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2721 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
2722 * generated, therefore the output r is always a 16-bit number.
2724 r = (r & 0xFFFF) + (r >> 16);
2729 *dst16_ptr = (uint16_t)r;
2733 __instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p __rte_unused,
2735 const struct instruction *ip)
2737 uint8_t *dst_struct, *src_struct;
2738 uint16_t *dst16_ptr;
2739 uint32_t *src32_ptr;
2742 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
2745 dst_struct = t->structs[ip->alu.dst.struct_id];
2746 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2748 src_struct = t->structs[ip->alu.src.struct_id];
2749 src32_ptr = (uint32_t *)&src_struct[0];
2751 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
2752 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
2753 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
2754 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
2755 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
2757 /* The first input is a 16-bit number. The second input is a 19-bit
2758 * number. Their sum is a 20-bit number.
2760 r0 = (r0 & 0xFFFF) + (r0 >> 16);
2762 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2763 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
2765 r0 = (r0 & 0xFFFF) + (r0 >> 16);
2767 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2768 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2769 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
2770 * generated, therefore the output r is always a 16-bit number.
2772 r0 = (r0 & 0xFFFF) + (r0 >> 16);
2775 r0 = r0 ? r0 : 0xFFFF;
2777 *dst16_ptr = (uint16_t)r0;
2781 __instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p __rte_unused,
2783 const struct instruction *ip)
2785 uint8_t *dst_struct, *src_struct;
2786 uint16_t *dst16_ptr;
2787 uint32_t *src32_ptr;
2791 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
2794 dst_struct = t->structs[ip->alu.dst.struct_id];
2795 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2797 src_struct = t->structs[ip->alu.src.struct_id];
2798 src32_ptr = (uint32_t *)&src_struct[0];
2800 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
2801 * Therefore, in the worst case scenario, a 35-bit number is added to a
2802 * 16-bit number (the input r), so the output r is 36-bit number.
2804 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
2807 /* The first input is a 16-bit number. The second input is a 20-bit
2808 * number. Their sum is a 21-bit number.
2810 r = (r & 0xFFFF) + (r >> 16);
2812 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2813 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
2815 r = (r & 0xFFFF) + (r >> 16);
2817 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2818 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2819 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
2820 * generated, therefore the output r is always a 16-bit number.
2822 r = (r & 0xFFFF) + (r >> 16);
2827 *dst16_ptr = (uint16_t)r;
2833 static inline uint64_t *
2834 instr_regarray_regarray(struct rte_swx_pipeline *p, const struct instruction *ip)
2836 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2840 static inline uint64_t
2841 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2843 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2845 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
2846 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
2847 uint64_t idx64 = *idx64_ptr;
2848 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
2849 uint64_t idx = idx64 & idx64_mask & r->size_mask;
2854 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2856 static inline uint64_t
2857 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2859 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2861 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
2862 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
2863 uint64_t idx64 = *idx64_ptr;
2864 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
2871 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
2875 static inline uint64_t
2876 instr_regarray_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
2878 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2880 uint64_t idx = ip->regarray.idx_val & r->size_mask;
2885 static inline uint64_t
2886 instr_regarray_src_hbo(struct thread *t, const struct instruction *ip)
2888 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
2889 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
2890 uint64_t src64 = *src64_ptr;
2891 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2892 uint64_t src = src64 & src64_mask;
2897 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2899 static inline uint64_t
2900 instr_regarray_src_nbo(struct thread *t, const struct instruction *ip)
2902 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
2903 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
2904 uint64_t src64 = *src64_ptr;
2905 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
2912 #define instr_regarray_src_nbo instr_regarray_src_hbo
2917 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
2919 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
2920 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
2921 uint64_t dst64 = *dst64_ptr;
2922 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2924 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
2928 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2931 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
2933 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
2934 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
2935 uint64_t dst64 = *dst64_ptr;
2936 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2938 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
2939 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
2944 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
2949 __instr_regprefetch_rh_exec(struct rte_swx_pipeline *p,
2951 const struct instruction *ip)
2953 uint64_t *regarray, idx;
2955 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
2957 regarray = instr_regarray_regarray(p, ip);
2958 idx = instr_regarray_idx_nbo(p, t, ip);
2959 rte_prefetch0(®array[idx]);
2963 __instr_regprefetch_rm_exec(struct rte_swx_pipeline *p,
2965 const struct instruction *ip)
2967 uint64_t *regarray, idx;
2969 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
2971 regarray = instr_regarray_regarray(p, ip);
2972 idx = instr_regarray_idx_hbo(p, t, ip);
2973 rte_prefetch0(®array[idx]);
2977 __instr_regprefetch_ri_exec(struct rte_swx_pipeline *p,
2978 struct thread *t __rte_unused,
2979 const struct instruction *ip)
2981 uint64_t *regarray, idx;
2983 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
2985 regarray = instr_regarray_regarray(p, ip);
2986 idx = instr_regarray_idx_imm(p, ip);
2987 rte_prefetch0(®array[idx]);
2991 __instr_regrd_hrh_exec(struct rte_swx_pipeline *p,
2993 const struct instruction *ip)
2995 uint64_t *regarray, idx;
2997 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
2999 regarray = instr_regarray_regarray(p, ip);
3000 idx = instr_regarray_idx_nbo(p, t, ip);
3001 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3005 __instr_regrd_hrm_exec(struct rte_swx_pipeline *p,
3007 const struct instruction *ip)
3009 uint64_t *regarray, idx;
3011 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3014 regarray = instr_regarray_regarray(p, ip);
3015 idx = instr_regarray_idx_hbo(p, t, ip);
3016 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3020 __instr_regrd_mrh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3022 uint64_t *regarray, idx;
3024 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3026 regarray = instr_regarray_regarray(p, ip);
3027 idx = instr_regarray_idx_nbo(p, t, ip);
3028 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3032 __instr_regrd_mrm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3034 uint64_t *regarray, idx;
3036 TRACE("[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3038 regarray = instr_regarray_regarray(p, ip);
3039 idx = instr_regarray_idx_hbo(p, t, ip);
3040 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3044 __instr_regrd_hri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3046 uint64_t *regarray, idx;
3048 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3050 regarray = instr_regarray_regarray(p, ip);
3051 idx = instr_regarray_idx_imm(p, ip);
3052 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3056 __instr_regrd_mri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3058 uint64_t *regarray, idx;
3060 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3062 regarray = instr_regarray_regarray(p, ip);
3063 idx = instr_regarray_idx_imm(p, ip);
3064 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3068 __instr_regwr_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3070 uint64_t *regarray, idx, src;
3072 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3074 regarray = instr_regarray_regarray(p, ip);
3075 idx = instr_regarray_idx_nbo(p, t, ip);
3076 src = instr_regarray_src_nbo(t, ip);
3077 regarray[idx] = src;
3081 __instr_regwr_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3083 uint64_t *regarray, idx, src;
3085 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3087 regarray = instr_regarray_regarray(p, ip);
3088 idx = instr_regarray_idx_nbo(p, t, ip);
3089 src = instr_regarray_src_hbo(t, ip);
3090 regarray[idx] = src;
3094 __instr_regwr_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3096 uint64_t *regarray, idx, src;
3098 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3100 regarray = instr_regarray_regarray(p, ip);
3101 idx = instr_regarray_idx_hbo(p, t, ip);
3102 src = instr_regarray_src_nbo(t, ip);
3103 regarray[idx] = src;
3107 __instr_regwr_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3109 uint64_t *regarray, idx, src;
3111 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3113 regarray = instr_regarray_regarray(p, ip);
3114 idx = instr_regarray_idx_hbo(p, t, ip);
3115 src = instr_regarray_src_hbo(t, ip);
3116 regarray[idx] = src;
3120 __instr_regwr_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3122 uint64_t *regarray, idx, src;
3124 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3126 regarray = instr_regarray_regarray(p, ip);
3127 idx = instr_regarray_idx_nbo(p, t, ip);
3128 src = ip->regarray.dstsrc_val;
3129 regarray[idx] = src;
3133 __instr_regwr_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3135 uint64_t *regarray, idx, src;
3137 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3139 regarray = instr_regarray_regarray(p, ip);
3140 idx = instr_regarray_idx_hbo(p, t, ip);
3141 src = ip->regarray.dstsrc_val;
3142 regarray[idx] = src;
3146 __instr_regwr_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3148 uint64_t *regarray, idx, src;
3150 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3152 regarray = instr_regarray_regarray(p, ip);
3153 idx = instr_regarray_idx_imm(p, ip);
3154 src = instr_regarray_src_nbo(t, ip);
3155 regarray[idx] = src;
3159 __instr_regwr_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3161 uint64_t *regarray, idx, src;
3163 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3165 regarray = instr_regarray_regarray(p, ip);
3166 idx = instr_regarray_idx_imm(p, ip);
3167 src = instr_regarray_src_hbo(t, ip);
3168 regarray[idx] = src;
3172 __instr_regwr_rii_exec(struct rte_swx_pipeline *p,
3173 struct thread *t __rte_unused,
3174 const struct instruction *ip)
3176 uint64_t *regarray, idx, src;
3178 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3180 regarray = instr_regarray_regarray(p, ip);
3181 idx = instr_regarray_idx_imm(p, ip);
3182 src = ip->regarray.dstsrc_val;
3183 regarray[idx] = src;
3187 __instr_regadd_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3189 uint64_t *regarray, idx, src;
3191 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3193 regarray = instr_regarray_regarray(p, ip);
3194 idx = instr_regarray_idx_nbo(p, t, ip);
3195 src = instr_regarray_src_nbo(t, ip);
3196 regarray[idx] += src;
3200 __instr_regadd_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3202 uint64_t *regarray, idx, src;
3204 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3206 regarray = instr_regarray_regarray(p, ip);
3207 idx = instr_regarray_idx_nbo(p, t, ip);
3208 src = instr_regarray_src_hbo(t, ip);
3209 regarray[idx] += src;
3213 __instr_regadd_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3215 uint64_t *regarray, idx, src;
3217 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3219 regarray = instr_regarray_regarray(p, ip);
3220 idx = instr_regarray_idx_hbo(p, t, ip);
3221 src = instr_regarray_src_nbo(t, ip);
3222 regarray[idx] += src;
3226 __instr_regadd_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3228 uint64_t *regarray, idx, src;
3230 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3232 regarray = instr_regarray_regarray(p, ip);
3233 idx = instr_regarray_idx_hbo(p, t, ip);
3234 src = instr_regarray_src_hbo(t, ip);
3235 regarray[idx] += src;
3239 __instr_regadd_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3241 uint64_t *regarray, idx, src;
3243 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3245 regarray = instr_regarray_regarray(p, ip);
3246 idx = instr_regarray_idx_nbo(p, t, ip);
3247 src = ip->regarray.dstsrc_val;
3248 regarray[idx] += src;
3252 __instr_regadd_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3254 uint64_t *regarray, idx, src;
3256 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3258 regarray = instr_regarray_regarray(p, ip);
3259 idx = instr_regarray_idx_hbo(p, t, ip);
3260 src = ip->regarray.dstsrc_val;
3261 regarray[idx] += src;
3265 __instr_regadd_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3267 uint64_t *regarray, idx, src;
3269 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3271 regarray = instr_regarray_regarray(p, ip);
3272 idx = instr_regarray_idx_imm(p, ip);
3273 src = instr_regarray_src_nbo(t, ip);
3274 regarray[idx] += src;
3278 __instr_regadd_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3280 uint64_t *regarray, idx, src;
3282 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3284 regarray = instr_regarray_regarray(p, ip);
3285 idx = instr_regarray_idx_imm(p, ip);
3286 src = instr_regarray_src_hbo(t, ip);
3287 regarray[idx] += src;
3291 __instr_regadd_rii_exec(struct rte_swx_pipeline *p,
3292 struct thread *t __rte_unused,
3293 const struct instruction *ip)
3295 uint64_t *regarray, idx, src;
3297 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3299 regarray = instr_regarray_regarray(p, ip);
3300 idx = instr_regarray_idx_imm(p, ip);
3301 src = ip->regarray.dstsrc_val;
3302 regarray[idx] += src;