1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
328 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
329 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
333 * dst = HMEF, src = HMEFTI
335 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
336 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
337 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
338 INSTR_ALU_SHL_HH, /* dst = H, src = H */
339 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
340 INSTR_ALU_SHL_HI, /* dst = H, src = I */
344 * dst = HMEF, src = HMEFTI
346 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
347 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
348 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
349 INSTR_ALU_SHR_HH, /* dst = H, src = H */
350 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
351 INSTR_ALU_SHR_HI, /* dst = H, src = I */
354 struct instr_operand {
369 uint8_t header_id[8];
370 uint8_t struct_id[8];
375 struct instr_hdr_validity {
379 struct instr_dst_src {
380 struct instr_operand dst;
382 struct instr_operand src;
389 uint8_t header_id[8];
390 uint8_t struct_id[8];
401 enum instruction_type type;
404 struct instr_hdr_validity valid;
405 struct instr_dst_src mov;
406 struct instr_dma dma;
407 struct instr_dst_src alu;
411 struct instruction_data {
412 char label[RTE_SWX_NAME_SIZE];
413 char jmp_label[RTE_SWX_NAME_SIZE];
414 uint32_t n_users; /* user = jmp instruction to this instruction. */
422 TAILQ_ENTRY(action) node;
423 char name[RTE_SWX_NAME_SIZE];
424 struct struct_type *st;
425 struct instruction *instructions;
426 uint32_t n_instructions;
430 TAILQ_HEAD(action_tailq, action);
436 TAILQ_ENTRY(table_type) node;
437 char name[RTE_SWX_NAME_SIZE];
438 enum rte_swx_table_match_type match_type;
439 struct rte_swx_table_ops ops;
442 TAILQ_HEAD(table_type_tailq, table_type);
445 enum rte_swx_table_match_type match_type;
450 TAILQ_ENTRY(table) node;
451 char name[RTE_SWX_NAME_SIZE];
452 char args[RTE_SWX_NAME_SIZE];
453 struct table_type *type; /* NULL when n_fields == 0. */
456 struct match_field *fields;
458 int is_header; /* Only valid when n_fields > 0. */
459 struct header *header; /* Only valid when n_fields > 0. */
462 struct action **actions;
463 struct action *default_action;
464 uint8_t *default_action_data;
466 int default_action_is_const;
467 uint32_t action_data_size_max;
473 TAILQ_HEAD(table_tailq, table);
475 struct table_runtime {
476 rte_swx_table_lookup_t func;
486 struct rte_swx_pkt pkt;
492 /* Packet headers. */
493 struct header_runtime *headers; /* Extracted or generated headers. */
494 struct header_out_runtime *headers_out; /* Emitted headers. */
495 uint8_t *header_storage;
496 uint8_t *header_out_storage;
497 uint64_t valid_headers;
498 uint32_t n_headers_out;
500 /* Packet meta-data. */
504 struct table_runtime *tables;
505 struct rte_swx_table_state *table_state;
507 int hit; /* 0 = Miss, 1 = Hit. */
509 /* Extern objects and functions. */
510 struct extern_obj_runtime *extern_objs;
511 struct extern_func_runtime *extern_funcs;
514 struct instruction *ip;
515 struct instruction *ret;
518 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
519 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
520 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
522 #define ALU(thread, ip, operator) \
524 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
525 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
526 uint64_t dst64 = *dst64_ptr; \
527 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
528 uint64_t dst = dst64 & dst64_mask; \
530 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
531 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
532 uint64_t src64 = *src64_ptr; \
533 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
534 uint64_t src = src64 & src64_mask; \
536 uint64_t result = dst operator src; \
538 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
541 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
543 #define ALU_S(thread, ip, operator) \
545 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
546 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
547 uint64_t dst64 = *dst64_ptr; \
548 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
549 uint64_t dst = dst64 & dst64_mask; \
551 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
552 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
553 uint64_t src64 = *src64_ptr; \
554 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
556 uint64_t result = dst operator src; \
558 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
563 #define ALU_HM(thread, ip, operator) \
565 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
566 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
567 uint64_t dst64 = *dst64_ptr; \
568 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
569 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
571 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
572 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
573 uint64_t src64 = *src64_ptr; \
574 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
575 uint64_t src = src64 & src64_mask; \
577 uint64_t result = dst operator src; \
578 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
580 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
583 #define ALU_HH(thread, ip, operator) \
585 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
586 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
587 uint64_t dst64 = *dst64_ptr; \
588 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
589 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
591 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
592 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
593 uint64_t src64 = *src64_ptr; \
594 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
596 uint64_t result = dst operator src; \
597 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
599 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
611 #define ALU_I(thread, ip, operator) \
613 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
614 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
615 uint64_t dst64 = *dst64_ptr; \
616 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
617 uint64_t dst = dst64 & dst64_mask; \
619 uint64_t src = (ip)->alu.src_val; \
621 uint64_t result = dst operator src; \
623 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
628 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
630 #define ALU_HI(thread, ip, operator) \
632 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
633 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
634 uint64_t dst64 = *dst64_ptr; \
635 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
636 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
638 uint64_t src = (ip)->alu.src_val; \
640 uint64_t result = dst operator src; \
641 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
643 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
652 #define MOV(thread, ip) \
654 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
655 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
656 uint64_t dst64 = *dst64_ptr; \
657 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
659 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
660 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
661 uint64_t src64 = *src64_ptr; \
662 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
663 uint64_t src = src64 & src64_mask; \
665 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
668 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
670 #define MOV_S(thread, ip) \
672 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
673 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
674 uint64_t dst64 = *dst64_ptr; \
675 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
677 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
678 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
679 uint64_t src64 = *src64_ptr; \
680 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
682 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
691 #define MOV_I(thread, ip) \
693 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
694 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
695 uint64_t dst64 = *dst64_ptr; \
696 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
698 uint64_t src = (ip)->mov.src_val; \
700 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
703 #define METADATA_READ(thread, offset, n_bits) \
705 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
706 uint64_t m64 = *m64_ptr; \
707 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
711 #define METADATA_WRITE(thread, offset, n_bits, value) \
713 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
714 uint64_t m64 = *m64_ptr; \
715 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
717 uint64_t m_new = value; \
719 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
722 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
723 #define RTE_SWX_PIPELINE_THREADS_MAX 16
726 struct rte_swx_pipeline {
727 struct struct_type_tailq struct_types;
728 struct port_in_type_tailq port_in_types;
729 struct port_in_tailq ports_in;
730 struct port_out_type_tailq port_out_types;
731 struct port_out_tailq ports_out;
732 struct extern_type_tailq extern_types;
733 struct extern_obj_tailq extern_objs;
734 struct extern_func_tailq extern_funcs;
735 struct header_tailq headers;
736 struct struct_type *metadata_st;
737 uint32_t metadata_struct_id;
738 struct action_tailq actions;
739 struct table_type_tailq table_types;
740 struct table_tailq tables;
742 struct port_in_runtime *in;
743 struct port_out_runtime *out;
744 struct instruction **action_instructions;
745 struct rte_swx_table_state *table_state;
746 struct instruction *instructions;
747 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
751 uint32_t n_ports_out;
752 uint32_t n_extern_objs;
753 uint32_t n_extern_funcs;
759 uint32_t n_instructions;
767 static struct struct_type *
768 struct_type_find(struct rte_swx_pipeline *p, const char *name)
770 struct struct_type *elem;
772 TAILQ_FOREACH(elem, &p->struct_types, node)
773 if (strcmp(elem->name, name) == 0)
779 static struct field *
780 struct_type_field_find(struct struct_type *st, const char *name)
784 for (i = 0; i < st->n_fields; i++) {
785 struct field *f = &st->fields[i];
787 if (strcmp(f->name, name) == 0)
795 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
797 struct rte_swx_field_params *fields,
800 struct struct_type *st;
804 CHECK_NAME(name, EINVAL);
805 CHECK(fields, EINVAL);
806 CHECK(n_fields, EINVAL);
808 for (i = 0; i < n_fields; i++) {
809 struct rte_swx_field_params *f = &fields[i];
812 CHECK_NAME(f->name, EINVAL);
813 CHECK(f->n_bits, EINVAL);
814 CHECK(f->n_bits <= 64, EINVAL);
815 CHECK((f->n_bits & 7) == 0, EINVAL);
817 for (j = 0; j < i; j++) {
818 struct rte_swx_field_params *f_prev = &fields[j];
820 CHECK(strcmp(f->name, f_prev->name), EINVAL);
824 CHECK(!struct_type_find(p, name), EEXIST);
826 /* Node allocation. */
827 st = calloc(1, sizeof(struct struct_type));
830 st->fields = calloc(n_fields, sizeof(struct field));
836 /* Node initialization. */
837 strcpy(st->name, name);
838 for (i = 0; i < n_fields; i++) {
839 struct field *dst = &st->fields[i];
840 struct rte_swx_field_params *src = &fields[i];
842 strcpy(dst->name, src->name);
843 dst->n_bits = src->n_bits;
844 dst->offset = st->n_bits;
846 st->n_bits += src->n_bits;
848 st->n_fields = n_fields;
850 /* Node add to tailq. */
851 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
857 struct_build(struct rte_swx_pipeline *p)
861 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
862 struct thread *t = &p->threads[i];
864 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
865 CHECK(t->structs, ENOMEM);
872 struct_build_free(struct rte_swx_pipeline *p)
876 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
877 struct thread *t = &p->threads[i];
885 struct_free(struct rte_swx_pipeline *p)
887 struct_build_free(p);
891 struct struct_type *elem;
893 elem = TAILQ_FIRST(&p->struct_types);
897 TAILQ_REMOVE(&p->struct_types, elem, node);
906 static struct port_in_type *
907 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
909 struct port_in_type *elem;
914 TAILQ_FOREACH(elem, &p->port_in_types, node)
915 if (strcmp(elem->name, name) == 0)
922 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
924 struct rte_swx_port_in_ops *ops)
926 struct port_in_type *elem;
929 CHECK_NAME(name, EINVAL);
931 CHECK(ops->create, EINVAL);
932 CHECK(ops->free, EINVAL);
933 CHECK(ops->pkt_rx, EINVAL);
934 CHECK(ops->stats_read, EINVAL);
936 CHECK(!port_in_type_find(p, name), EEXIST);
938 /* Node allocation. */
939 elem = calloc(1, sizeof(struct port_in_type));
942 /* Node initialization. */
943 strcpy(elem->name, name);
944 memcpy(&elem->ops, ops, sizeof(*ops));
946 /* Node add to tailq. */
947 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
952 static struct port_in *
953 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
955 struct port_in *port;
957 TAILQ_FOREACH(port, &p->ports_in, node)
958 if (port->id == port_id)
965 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
967 const char *port_type_name,
970 struct port_in_type *type = NULL;
971 struct port_in *port = NULL;
976 CHECK(!port_in_find(p, port_id), EINVAL);
978 CHECK_NAME(port_type_name, EINVAL);
979 type = port_in_type_find(p, port_type_name);
982 obj = type->ops.create(args);
985 /* Node allocation. */
986 port = calloc(1, sizeof(struct port_in));
989 /* Node initialization. */
994 /* Node add to tailq. */
995 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
996 if (p->n_ports_in < port_id + 1)
997 p->n_ports_in = port_id + 1;
1003 port_in_build(struct rte_swx_pipeline *p)
1005 struct port_in *port;
1008 CHECK(p->n_ports_in, EINVAL);
1009 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1011 for (i = 0; i < p->n_ports_in; i++)
1012 CHECK(port_in_find(p, i), EINVAL);
1014 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1015 CHECK(p->in, ENOMEM);
1017 TAILQ_FOREACH(port, &p->ports_in, node) {
1018 struct port_in_runtime *in = &p->in[port->id];
1020 in->pkt_rx = port->type->ops.pkt_rx;
1021 in->obj = port->obj;
1028 port_in_build_free(struct rte_swx_pipeline *p)
1035 port_in_free(struct rte_swx_pipeline *p)
1037 port_in_build_free(p);
1041 struct port_in *port;
1043 port = TAILQ_FIRST(&p->ports_in);
1047 TAILQ_REMOVE(&p->ports_in, port, node);
1048 port->type->ops.free(port->obj);
1052 /* Input port types. */
1054 struct port_in_type *elem;
1056 elem = TAILQ_FIRST(&p->port_in_types);
1060 TAILQ_REMOVE(&p->port_in_types, elem, node);
1068 static struct port_out_type *
1069 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1071 struct port_out_type *elem;
1076 TAILQ_FOREACH(elem, &p->port_out_types, node)
1077 if (!strcmp(elem->name, name))
1084 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1086 struct rte_swx_port_out_ops *ops)
1088 struct port_out_type *elem;
1091 CHECK_NAME(name, EINVAL);
1093 CHECK(ops->create, EINVAL);
1094 CHECK(ops->free, EINVAL);
1095 CHECK(ops->pkt_tx, EINVAL);
1096 CHECK(ops->stats_read, EINVAL);
1098 CHECK(!port_out_type_find(p, name), EEXIST);
1100 /* Node allocation. */
1101 elem = calloc(1, sizeof(struct port_out_type));
1102 CHECK(elem, ENOMEM);
1104 /* Node initialization. */
1105 strcpy(elem->name, name);
1106 memcpy(&elem->ops, ops, sizeof(*ops));
1108 /* Node add to tailq. */
1109 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1114 static struct port_out *
1115 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1117 struct port_out *port;
1119 TAILQ_FOREACH(port, &p->ports_out, node)
1120 if (port->id == port_id)
1127 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1129 const char *port_type_name,
1132 struct port_out_type *type = NULL;
1133 struct port_out *port = NULL;
1138 CHECK(!port_out_find(p, port_id), EINVAL);
1140 CHECK_NAME(port_type_name, EINVAL);
1141 type = port_out_type_find(p, port_type_name);
1142 CHECK(type, EINVAL);
1144 obj = type->ops.create(args);
1147 /* Node allocation. */
1148 port = calloc(1, sizeof(struct port_out));
1149 CHECK(port, ENOMEM);
1151 /* Node initialization. */
1156 /* Node add to tailq. */
1157 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1158 if (p->n_ports_out < port_id + 1)
1159 p->n_ports_out = port_id + 1;
1165 port_out_build(struct rte_swx_pipeline *p)
1167 struct port_out *port;
1170 CHECK(p->n_ports_out, EINVAL);
1172 for (i = 0; i < p->n_ports_out; i++)
1173 CHECK(port_out_find(p, i), EINVAL);
1175 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1176 CHECK(p->out, ENOMEM);
1178 TAILQ_FOREACH(port, &p->ports_out, node) {
1179 struct port_out_runtime *out = &p->out[port->id];
1181 out->pkt_tx = port->type->ops.pkt_tx;
1182 out->flush = port->type->ops.flush;
1183 out->obj = port->obj;
1190 port_out_build_free(struct rte_swx_pipeline *p)
1197 port_out_free(struct rte_swx_pipeline *p)
1199 port_out_build_free(p);
1203 struct port_out *port;
1205 port = TAILQ_FIRST(&p->ports_out);
1209 TAILQ_REMOVE(&p->ports_out, port, node);
1210 port->type->ops.free(port->obj);
1214 /* Output port types. */
1216 struct port_out_type *elem;
1218 elem = TAILQ_FIRST(&p->port_out_types);
1222 TAILQ_REMOVE(&p->port_out_types, elem, node);
1230 static struct extern_type *
1231 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1233 struct extern_type *elem;
1235 TAILQ_FOREACH(elem, &p->extern_types, node)
1236 if (strcmp(elem->name, name) == 0)
1242 static struct extern_type_member_func *
1243 extern_type_member_func_find(struct extern_type *type, const char *name)
1245 struct extern_type_member_func *elem;
1247 TAILQ_FOREACH(elem, &type->funcs, node)
1248 if (strcmp(elem->name, name) == 0)
1254 static struct extern_obj *
1255 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1257 struct extern_obj *elem;
1259 TAILQ_FOREACH(elem, &p->extern_objs, node)
1260 if (strcmp(elem->name, name) == 0)
1266 static struct field *
1267 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1269 struct extern_obj **object)
1271 struct extern_obj *obj;
1273 char *obj_name, *field_name;
1275 if ((name[0] != 'e') || (name[1] != '.'))
1278 obj_name = strdup(&name[2]);
1282 field_name = strchr(obj_name, '.');
1291 obj = extern_obj_find(p, obj_name);
1297 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1311 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1313 const char *mailbox_struct_type_name,
1314 rte_swx_extern_type_constructor_t constructor,
1315 rte_swx_extern_type_destructor_t destructor)
1317 struct extern_type *elem;
1318 struct struct_type *mailbox_struct_type;
1322 CHECK_NAME(name, EINVAL);
1323 CHECK(!extern_type_find(p, name), EEXIST);
1325 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1326 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1327 CHECK(mailbox_struct_type, EINVAL);
1329 CHECK(constructor, EINVAL);
1330 CHECK(destructor, EINVAL);
1332 /* Node allocation. */
1333 elem = calloc(1, sizeof(struct extern_type));
1334 CHECK(elem, ENOMEM);
1336 /* Node initialization. */
1337 strcpy(elem->name, name);
1338 elem->mailbox_struct_type = mailbox_struct_type;
1339 elem->constructor = constructor;
1340 elem->destructor = destructor;
1341 TAILQ_INIT(&elem->funcs);
1343 /* Node add to tailq. */
1344 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1350 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1351 const char *extern_type_name,
1353 rte_swx_extern_type_member_func_t member_func)
1355 struct extern_type *type;
1356 struct extern_type_member_func *type_member;
1360 CHECK(extern_type_name, EINVAL);
1361 type = extern_type_find(p, extern_type_name);
1362 CHECK(type, EINVAL);
1363 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1365 CHECK(name, EINVAL);
1366 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1368 CHECK(member_func, EINVAL);
1370 /* Node allocation. */
1371 type_member = calloc(1, sizeof(struct extern_type_member_func));
1372 CHECK(type_member, ENOMEM);
1374 /* Node initialization. */
1375 strcpy(type_member->name, name);
1376 type_member->func = member_func;
1377 type_member->id = type->n_funcs;
1379 /* Node add to tailq. */
1380 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1387 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1388 const char *extern_type_name,
1392 struct extern_type *type;
1393 struct extern_obj *obj;
1398 CHECK_NAME(extern_type_name, EINVAL);
1399 type = extern_type_find(p, extern_type_name);
1400 CHECK(type, EINVAL);
1402 CHECK_NAME(name, EINVAL);
1403 CHECK(!extern_obj_find(p, name), EEXIST);
1405 /* Node allocation. */
1406 obj = calloc(1, sizeof(struct extern_obj));
1409 /* Object construction. */
1410 obj_handle = type->constructor(args);
1416 /* Node initialization. */
1417 strcpy(obj->name, name);
1419 obj->obj = obj_handle;
1420 obj->struct_id = p->n_structs;
1421 obj->id = p->n_extern_objs;
1423 /* Node add to tailq. */
1424 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1432 extern_obj_build(struct rte_swx_pipeline *p)
1436 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1437 struct thread *t = &p->threads[i];
1438 struct extern_obj *obj;
1440 t->extern_objs = calloc(p->n_extern_objs,
1441 sizeof(struct extern_obj_runtime));
1442 CHECK(t->extern_objs, ENOMEM);
1444 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1445 struct extern_obj_runtime *r =
1446 &t->extern_objs[obj->id];
1447 struct extern_type_member_func *func;
1448 uint32_t mailbox_size =
1449 obj->type->mailbox_struct_type->n_bits / 8;
1453 r->mailbox = calloc(1, mailbox_size);
1454 CHECK(r->mailbox, ENOMEM);
1456 TAILQ_FOREACH(func, &obj->type->funcs, node)
1457 r->funcs[func->id] = func->func;
1459 t->structs[obj->struct_id] = r->mailbox;
1467 extern_obj_build_free(struct rte_swx_pipeline *p)
1471 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1472 struct thread *t = &p->threads[i];
1475 if (!t->extern_objs)
1478 for (j = 0; j < p->n_extern_objs; j++) {
1479 struct extern_obj_runtime *r = &t->extern_objs[j];
1484 free(t->extern_objs);
1485 t->extern_objs = NULL;
1490 extern_obj_free(struct rte_swx_pipeline *p)
1492 extern_obj_build_free(p);
1494 /* Extern objects. */
1496 struct extern_obj *elem;
1498 elem = TAILQ_FIRST(&p->extern_objs);
1502 TAILQ_REMOVE(&p->extern_objs, elem, node);
1504 elem->type->destructor(elem->obj);
1510 struct extern_type *elem;
1512 elem = TAILQ_FIRST(&p->extern_types);
1516 TAILQ_REMOVE(&p->extern_types, elem, node);
1519 struct extern_type_member_func *func;
1521 func = TAILQ_FIRST(&elem->funcs);
1525 TAILQ_REMOVE(&elem->funcs, func, node);
1536 static struct extern_func *
1537 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1539 struct extern_func *elem;
1541 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1542 if (strcmp(elem->name, name) == 0)
1548 static struct field *
1549 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1551 struct extern_func **function)
1553 struct extern_func *func;
1555 char *func_name, *field_name;
1557 if ((name[0] != 'f') || (name[1] != '.'))
1560 func_name = strdup(&name[2]);
1564 field_name = strchr(func_name, '.');
1573 func = extern_func_find(p, func_name);
1579 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1593 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1595 const char *mailbox_struct_type_name,
1596 rte_swx_extern_func_t func)
1598 struct extern_func *f;
1599 struct struct_type *mailbox_struct_type;
1603 CHECK_NAME(name, EINVAL);
1604 CHECK(!extern_func_find(p, name), EEXIST);
1606 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1607 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1608 CHECK(mailbox_struct_type, EINVAL);
1610 CHECK(func, EINVAL);
1612 /* Node allocation. */
1613 f = calloc(1, sizeof(struct extern_func));
1614 CHECK(func, ENOMEM);
1616 /* Node initialization. */
1617 strcpy(f->name, name);
1618 f->mailbox_struct_type = mailbox_struct_type;
1620 f->struct_id = p->n_structs;
1621 f->id = p->n_extern_funcs;
1623 /* Node add to tailq. */
1624 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1625 p->n_extern_funcs++;
1632 extern_func_build(struct rte_swx_pipeline *p)
1636 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1637 struct thread *t = &p->threads[i];
1638 struct extern_func *func;
1640 /* Memory allocation. */
1641 t->extern_funcs = calloc(p->n_extern_funcs,
1642 sizeof(struct extern_func_runtime));
1643 CHECK(t->extern_funcs, ENOMEM);
1645 /* Extern function. */
1646 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1647 struct extern_func_runtime *r =
1648 &t->extern_funcs[func->id];
1649 uint32_t mailbox_size =
1650 func->mailbox_struct_type->n_bits / 8;
1652 r->func = func->func;
1654 r->mailbox = calloc(1, mailbox_size);
1655 CHECK(r->mailbox, ENOMEM);
1657 t->structs[func->struct_id] = r->mailbox;
1665 extern_func_build_free(struct rte_swx_pipeline *p)
1669 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1670 struct thread *t = &p->threads[i];
1673 if (!t->extern_funcs)
1676 for (j = 0; j < p->n_extern_funcs; j++) {
1677 struct extern_func_runtime *r = &t->extern_funcs[j];
1682 free(t->extern_funcs);
1683 t->extern_funcs = NULL;
1688 extern_func_free(struct rte_swx_pipeline *p)
1690 extern_func_build_free(p);
1693 struct extern_func *elem;
1695 elem = TAILQ_FIRST(&p->extern_funcs);
1699 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1707 static struct header *
1708 header_find(struct rte_swx_pipeline *p, const char *name)
1710 struct header *elem;
1712 TAILQ_FOREACH(elem, &p->headers, node)
1713 if (strcmp(elem->name, name) == 0)
1719 static struct header *
1720 header_parse(struct rte_swx_pipeline *p,
1723 if (name[0] != 'h' || name[1] != '.')
1726 return header_find(p, &name[2]);
1729 static struct field *
1730 header_field_parse(struct rte_swx_pipeline *p,
1732 struct header **header)
1736 char *header_name, *field_name;
1738 if ((name[0] != 'h') || (name[1] != '.'))
1741 header_name = strdup(&name[2]);
1745 field_name = strchr(header_name, '.');
1754 h = header_find(p, header_name);
1760 f = struct_type_field_find(h->st, field_name);
1774 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1776 const char *struct_type_name)
1778 struct struct_type *st;
1780 size_t n_headers_max;
1783 CHECK_NAME(name, EINVAL);
1784 CHECK_NAME(struct_type_name, EINVAL);
1786 CHECK(!header_find(p, name), EEXIST);
1788 st = struct_type_find(p, struct_type_name);
1791 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1792 CHECK(p->n_headers < n_headers_max, ENOSPC);
1794 /* Node allocation. */
1795 h = calloc(1, sizeof(struct header));
1798 /* Node initialization. */
1799 strcpy(h->name, name);
1801 h->struct_id = p->n_structs;
1802 h->id = p->n_headers;
1804 /* Node add to tailq. */
1805 TAILQ_INSERT_TAIL(&p->headers, h, node);
1813 header_build(struct rte_swx_pipeline *p)
1816 uint32_t n_bytes = 0, i;
1818 TAILQ_FOREACH(h, &p->headers, node) {
1819 n_bytes += h->st->n_bits / 8;
1822 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1823 struct thread *t = &p->threads[i];
1824 uint32_t offset = 0;
1826 t->headers = calloc(p->n_headers,
1827 sizeof(struct header_runtime));
1828 CHECK(t->headers, ENOMEM);
1830 t->headers_out = calloc(p->n_headers,
1831 sizeof(struct header_out_runtime));
1832 CHECK(t->headers_out, ENOMEM);
1834 t->header_storage = calloc(1, n_bytes);
1835 CHECK(t->header_storage, ENOMEM);
1837 t->header_out_storage = calloc(1, n_bytes);
1838 CHECK(t->header_out_storage, ENOMEM);
1840 TAILQ_FOREACH(h, &p->headers, node) {
1841 uint8_t *header_storage;
1843 header_storage = &t->header_storage[offset];
1844 offset += h->st->n_bits / 8;
1846 t->headers[h->id].ptr0 = header_storage;
1847 t->structs[h->struct_id] = header_storage;
1855 header_build_free(struct rte_swx_pipeline *p)
1859 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1860 struct thread *t = &p->threads[i];
1862 free(t->headers_out);
1863 t->headers_out = NULL;
1868 free(t->header_out_storage);
1869 t->header_out_storage = NULL;
1871 free(t->header_storage);
1872 t->header_storage = NULL;
1877 header_free(struct rte_swx_pipeline *p)
1879 header_build_free(p);
1882 struct header *elem;
1884 elem = TAILQ_FIRST(&p->headers);
1888 TAILQ_REMOVE(&p->headers, elem, node);
1896 static struct field *
1897 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1899 if (!p->metadata_st)
1902 if (name[0] != 'm' || name[1] != '.')
1905 return struct_type_field_find(p->metadata_st, &name[2]);
1909 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1910 const char *struct_type_name)
1912 struct struct_type *st = NULL;
1916 CHECK_NAME(struct_type_name, EINVAL);
1917 st = struct_type_find(p, struct_type_name);
1919 CHECK(!p->metadata_st, EINVAL);
1921 p->metadata_st = st;
1922 p->metadata_struct_id = p->n_structs;
1930 metadata_build(struct rte_swx_pipeline *p)
1932 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1935 /* Thread-level initialization. */
1936 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1937 struct thread *t = &p->threads[i];
1940 metadata = calloc(1, n_bytes);
1941 CHECK(metadata, ENOMEM);
1943 t->metadata = metadata;
1944 t->structs[p->metadata_struct_id] = metadata;
1951 metadata_build_free(struct rte_swx_pipeline *p)
1955 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1956 struct thread *t = &p->threads[i];
1964 metadata_free(struct rte_swx_pipeline *p)
1966 metadata_build_free(p);
1972 static struct field *
1973 action_field_parse(struct action *action, const char *name);
1975 static struct field *
1976 struct_field_parse(struct rte_swx_pipeline *p,
1977 struct action *action,
1979 uint32_t *struct_id)
1986 struct header *header;
1988 f = header_field_parse(p, name, &header);
1992 *struct_id = header->struct_id;
1998 f = metadata_field_parse(p, name);
2002 *struct_id = p->metadata_struct_id;
2011 f = action_field_parse(action, name);
2021 struct extern_obj *obj;
2023 f = extern_obj_mailbox_field_parse(p, name, &obj);
2027 *struct_id = obj->struct_id;
2033 struct extern_func *func;
2035 f = extern_func_mailbox_field_parse(p, name, &func);
2039 *struct_id = func->struct_id;
2049 pipeline_port_inc(struct rte_swx_pipeline *p)
2051 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2055 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2057 t->ip = p->instructions;
2061 thread_ip_inc(struct rte_swx_pipeline *p);
2064 thread_ip_inc(struct rte_swx_pipeline *p)
2066 struct thread *t = &p->threads[p->thread_id];
2072 thread_ip_inc_cond(struct thread *t, int cond)
2078 thread_yield(struct rte_swx_pipeline *p)
2080 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2087 instr_rx_translate(struct rte_swx_pipeline *p,
2088 struct action *action,
2091 struct instruction *instr,
2092 struct instruction_data *data __rte_unused)
2096 CHECK(!action, EINVAL);
2097 CHECK(n_tokens == 2, EINVAL);
2099 f = metadata_field_parse(p, tokens[1]);
2102 instr->type = INSTR_RX;
2103 instr->io.io.offset = f->offset / 8;
2104 instr->io.io.n_bits = f->n_bits;
2109 instr_rx_exec(struct rte_swx_pipeline *p);
2112 instr_rx_exec(struct rte_swx_pipeline *p)
2114 struct thread *t = &p->threads[p->thread_id];
2115 struct instruction *ip = t->ip;
2116 struct port_in_runtime *port = &p->in[p->port_id];
2117 struct rte_swx_pkt *pkt = &t->pkt;
2121 pkt_received = port->pkt_rx(port->obj, pkt);
2122 t->ptr = &pkt->pkt[pkt->offset];
2123 rte_prefetch0(t->ptr);
2125 TRACE("[Thread %2u] rx %s from port %u\n",
2127 pkt_received ? "1 pkt" : "0 pkts",
2131 t->valid_headers = 0;
2132 t->n_headers_out = 0;
2135 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2138 t->table_state = p->table_state;
2141 pipeline_port_inc(p);
2142 thread_ip_inc_cond(t, pkt_received);
2150 instr_tx_translate(struct rte_swx_pipeline *p,
2151 struct action *action __rte_unused,
2154 struct instruction *instr,
2155 struct instruction_data *data __rte_unused)
2159 CHECK(n_tokens == 2, EINVAL);
2161 f = metadata_field_parse(p, tokens[1]);
2164 instr->type = INSTR_TX;
2165 instr->io.io.offset = f->offset / 8;
2166 instr->io.io.n_bits = f->n_bits;
2171 emit_handler(struct thread *t)
2173 struct header_out_runtime *h0 = &t->headers_out[0];
2174 struct header_out_runtime *h1 = &t->headers_out[1];
2175 uint32_t offset = 0, i;
2177 /* No header change or header decapsulation. */
2178 if ((t->n_headers_out == 1) &&
2179 (h0->ptr + h0->n_bytes == t->ptr)) {
2180 TRACE("Emit handler: no header change or header decap.\n");
2182 t->pkt.offset -= h0->n_bytes;
2183 t->pkt.length += h0->n_bytes;
2188 /* Header encapsulation (optionally, with prior header decasulation). */
2189 if ((t->n_headers_out == 2) &&
2190 (h1->ptr + h1->n_bytes == t->ptr) &&
2191 (h0->ptr == h0->ptr0)) {
2194 TRACE("Emit handler: header encapsulation.\n");
2196 offset = h0->n_bytes + h1->n_bytes;
2197 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2198 t->pkt.offset -= offset;
2199 t->pkt.length += offset;
2204 /* Header insertion. */
2207 /* Header extraction. */
2210 /* For any other case. */
2211 TRACE("Emit handler: complex case.\n");
2213 for (i = 0; i < t->n_headers_out; i++) {
2214 struct header_out_runtime *h = &t->headers_out[i];
2216 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2217 offset += h->n_bytes;
2221 memcpy(t->ptr - offset, t->header_out_storage, offset);
2222 t->pkt.offset -= offset;
2223 t->pkt.length += offset;
2228 instr_tx_exec(struct rte_swx_pipeline *p);
2231 instr_tx_exec(struct rte_swx_pipeline *p)
2233 struct thread *t = &p->threads[p->thread_id];
2234 struct instruction *ip = t->ip;
2235 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2236 struct port_out_runtime *port = &p->out[port_id];
2237 struct rte_swx_pkt *pkt = &t->pkt;
2239 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2247 port->pkt_tx(port->obj, pkt);
2250 thread_ip_reset(p, t);
2258 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2259 struct action *action,
2262 struct instruction *instr,
2263 struct instruction_data *data __rte_unused)
2267 CHECK(!action, EINVAL);
2268 CHECK(n_tokens == 2, EINVAL);
2270 h = header_parse(p, tokens[1]);
2273 instr->type = INSTR_HDR_EXTRACT;
2274 instr->io.hdr.header_id[0] = h->id;
2275 instr->io.hdr.struct_id[0] = h->struct_id;
2276 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2281 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2284 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2286 struct thread *t = &p->threads[p->thread_id];
2287 struct instruction *ip = t->ip;
2288 uint64_t valid_headers = t->valid_headers;
2289 uint8_t *ptr = t->ptr;
2290 uint32_t offset = t->pkt.offset;
2291 uint32_t length = t->pkt.length;
2294 for (i = 0; i < n_extract; i++) {
2295 uint32_t header_id = ip->io.hdr.header_id[i];
2296 uint32_t struct_id = ip->io.hdr.struct_id[i];
2297 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2299 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2305 t->structs[struct_id] = ptr;
2306 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2315 t->valid_headers = valid_headers;
2318 t->pkt.offset = offset;
2319 t->pkt.length = length;
2324 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2326 __instr_hdr_extract_exec(p, 1);
2333 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2335 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2338 __instr_hdr_extract_exec(p, 2);
2345 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2347 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2350 __instr_hdr_extract_exec(p, 3);
2357 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2359 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2362 __instr_hdr_extract_exec(p, 4);
2369 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2371 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2374 __instr_hdr_extract_exec(p, 5);
2381 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2383 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2386 __instr_hdr_extract_exec(p, 6);
2393 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2395 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2398 __instr_hdr_extract_exec(p, 7);
2405 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2407 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2410 __instr_hdr_extract_exec(p, 8);
2420 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2421 struct action *action __rte_unused,
2424 struct instruction *instr,
2425 struct instruction_data *data __rte_unused)
2429 CHECK(n_tokens == 2, EINVAL);
2431 h = header_parse(p, tokens[1]);
2434 instr->type = INSTR_HDR_EMIT;
2435 instr->io.hdr.header_id[0] = h->id;
2436 instr->io.hdr.struct_id[0] = h->struct_id;
2437 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2442 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2445 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2447 struct thread *t = &p->threads[p->thread_id];
2448 struct instruction *ip = t->ip;
2449 uint32_t n_headers_out = t->n_headers_out;
2450 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2451 uint8_t *ho_ptr = NULL;
2452 uint32_t ho_nbytes = 0, i;
2454 for (i = 0; i < n_emit; i++) {
2455 uint32_t header_id = ip->io.hdr.header_id[i];
2456 uint32_t struct_id = ip->io.hdr.struct_id[i];
2457 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2459 struct header_runtime *hi = &t->headers[header_id];
2460 uint8_t *hi_ptr = t->structs[struct_id];
2462 TRACE("[Thread %2u]: emit header %u\n",
2468 if (!t->n_headers_out) {
2469 ho = &t->headers_out[0];
2471 ho->ptr0 = hi->ptr0;
2475 ho_nbytes = n_bytes;
2482 ho_nbytes = ho->n_bytes;
2486 if (ho_ptr + ho_nbytes == hi_ptr) {
2487 ho_nbytes += n_bytes;
2489 ho->n_bytes = ho_nbytes;
2492 ho->ptr0 = hi->ptr0;
2496 ho_nbytes = n_bytes;
2502 ho->n_bytes = ho_nbytes;
2503 t->n_headers_out = n_headers_out;
2507 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2509 __instr_hdr_emit_exec(p, 1);
2516 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2518 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2521 __instr_hdr_emit_exec(p, 1);
2526 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2528 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2531 __instr_hdr_emit_exec(p, 2);
2536 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2538 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2541 __instr_hdr_emit_exec(p, 3);
2546 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2548 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2551 __instr_hdr_emit_exec(p, 4);
2556 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2558 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2561 __instr_hdr_emit_exec(p, 5);
2566 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2568 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2571 __instr_hdr_emit_exec(p, 6);
2576 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2578 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2581 __instr_hdr_emit_exec(p, 7);
2586 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2588 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2591 __instr_hdr_emit_exec(p, 8);
2599 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2600 struct action *action __rte_unused,
2603 struct instruction *instr,
2604 struct instruction_data *data __rte_unused)
2608 CHECK(n_tokens == 2, EINVAL);
2610 h = header_parse(p, tokens[1]);
2613 instr->type = INSTR_HDR_VALIDATE;
2614 instr->valid.header_id = h->id;
2619 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2621 struct thread *t = &p->threads[p->thread_id];
2622 struct instruction *ip = t->ip;
2623 uint32_t header_id = ip->valid.header_id;
2625 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2628 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2638 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2639 struct action *action __rte_unused,
2642 struct instruction *instr,
2643 struct instruction_data *data __rte_unused)
2647 CHECK(n_tokens == 2, EINVAL);
2649 h = header_parse(p, tokens[1]);
2652 instr->type = INSTR_HDR_INVALIDATE;
2653 instr->valid.header_id = h->id;
2658 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2660 struct thread *t = &p->threads[p->thread_id];
2661 struct instruction *ip = t->ip;
2662 uint32_t header_id = ip->valid.header_id;
2664 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2667 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2677 instr_mov_translate(struct rte_swx_pipeline *p,
2678 struct action *action,
2681 struct instruction *instr,
2682 struct instruction_data *data __rte_unused)
2684 char *dst = tokens[1], *src = tokens[2];
2685 struct field *fdst, *fsrc;
2686 uint32_t dst_struct_id, src_struct_id, src_val;
2688 CHECK(n_tokens == 3, EINVAL);
2690 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2691 CHECK(fdst, EINVAL);
2694 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2696 instr->type = INSTR_MOV;
2697 if ((dst[0] == 'h' && src[0] != 'h') ||
2698 (dst[0] != 'h' && src[0] == 'h'))
2699 instr->type = INSTR_MOV_S;
2701 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2702 instr->mov.dst.n_bits = fdst->n_bits;
2703 instr->mov.dst.offset = fdst->offset / 8;
2704 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2705 instr->mov.src.n_bits = fsrc->n_bits;
2706 instr->mov.src.offset = fsrc->offset / 8;
2711 src_val = strtoul(src, &src, 0);
2712 CHECK(!src[0], EINVAL);
2715 src_val = htonl(src_val);
2717 instr->type = INSTR_MOV_I;
2718 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2719 instr->mov.dst.n_bits = fdst->n_bits;
2720 instr->mov.dst.offset = fdst->offset / 8;
2721 instr->mov.src_val = (uint32_t)src_val;
2726 instr_mov_exec(struct rte_swx_pipeline *p)
2728 struct thread *t = &p->threads[p->thread_id];
2729 struct instruction *ip = t->ip;
2731 TRACE("[Thread %2u] mov\n",
2741 instr_mov_s_exec(struct rte_swx_pipeline *p)
2743 struct thread *t = &p->threads[p->thread_id];
2744 struct instruction *ip = t->ip;
2746 TRACE("[Thread %2u] mov (s)\n",
2756 instr_mov_i_exec(struct rte_swx_pipeline *p)
2758 struct thread *t = &p->threads[p->thread_id];
2759 struct instruction *ip = t->ip;
2761 TRACE("[Thread %2u] mov m.f %x\n",
2775 instr_dma_translate(struct rte_swx_pipeline *p,
2776 struct action *action,
2779 struct instruction *instr,
2780 struct instruction_data *data __rte_unused)
2782 char *dst = tokens[1];
2783 char *src = tokens[2];
2787 CHECK(action, EINVAL);
2788 CHECK(n_tokens == 3, EINVAL);
2790 h = header_parse(p, dst);
2793 tf = action_field_parse(action, src);
2796 instr->type = INSTR_DMA_HT;
2797 instr->dma.dst.header_id[0] = h->id;
2798 instr->dma.dst.struct_id[0] = h->struct_id;
2799 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2800 instr->dma.src.offset[0] = tf->offset / 8;
2806 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2809 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2811 struct thread *t = &p->threads[p->thread_id];
2812 struct instruction *ip = t->ip;
2813 uint8_t *action_data = t->structs[0];
2814 uint64_t valid_headers = t->valid_headers;
2817 for (i = 0; i < n_dma; i++) {
2818 uint32_t header_id = ip->dma.dst.header_id[i];
2819 uint32_t struct_id = ip->dma.dst.struct_id[i];
2820 uint32_t offset = ip->dma.src.offset[i];
2821 uint32_t n_bytes = ip->dma.n_bytes[i];
2823 struct header_runtime *h = &t->headers[header_id];
2824 uint8_t *h_ptr0 = h->ptr0;
2825 uint8_t *h_ptr = t->structs[struct_id];
2827 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2829 void *src = &action_data[offset];
2831 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2834 memcpy(dst, src, n_bytes);
2835 t->structs[struct_id] = dst;
2836 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2839 t->valid_headers = valid_headers;
2843 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2845 __instr_dma_ht_exec(p, 1);
2852 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2854 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2857 __instr_dma_ht_exec(p, 2);
2864 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2866 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2869 __instr_dma_ht_exec(p, 3);
2876 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2878 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2881 __instr_dma_ht_exec(p, 4);
2888 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2890 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2893 __instr_dma_ht_exec(p, 5);
2900 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2902 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2905 __instr_dma_ht_exec(p, 6);
2912 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2914 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2917 __instr_dma_ht_exec(p, 7);
2924 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2926 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2929 __instr_dma_ht_exec(p, 8);
2939 instr_alu_add_translate(struct rte_swx_pipeline *p,
2940 struct action *action,
2943 struct instruction *instr,
2944 struct instruction_data *data __rte_unused)
2946 char *dst = tokens[1], *src = tokens[2];
2947 struct field *fdst, *fsrc;
2948 uint32_t dst_struct_id, src_struct_id, src_val;
2950 CHECK(n_tokens == 3, EINVAL);
2952 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2953 CHECK(fdst, EINVAL);
2955 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2956 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2958 instr->type = INSTR_ALU_ADD;
2959 if (dst[0] == 'h' && src[0] == 'm')
2960 instr->type = INSTR_ALU_ADD_HM;
2961 if (dst[0] == 'm' && src[0] == 'h')
2962 instr->type = INSTR_ALU_ADD_MH;
2963 if (dst[0] == 'h' && src[0] == 'h')
2964 instr->type = INSTR_ALU_ADD_HH;
2966 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2967 instr->alu.dst.n_bits = fdst->n_bits;
2968 instr->alu.dst.offset = fdst->offset / 8;
2969 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2970 instr->alu.src.n_bits = fsrc->n_bits;
2971 instr->alu.src.offset = fsrc->offset / 8;
2975 /* ADD_MI, ADD_HI. */
2976 src_val = strtoul(src, &src, 0);
2977 CHECK(!src[0], EINVAL);
2979 instr->type = INSTR_ALU_ADD_MI;
2981 instr->type = INSTR_ALU_ADD_HI;
2983 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2984 instr->alu.dst.n_bits = fdst->n_bits;
2985 instr->alu.dst.offset = fdst->offset / 8;
2986 instr->alu.src_val = (uint32_t)src_val;
2991 instr_alu_sub_translate(struct rte_swx_pipeline *p,
2992 struct action *action,
2995 struct instruction *instr,
2996 struct instruction_data *data __rte_unused)
2998 char *dst = tokens[1], *src = tokens[2];
2999 struct field *fdst, *fsrc;
3000 uint32_t dst_struct_id, src_struct_id, src_val;
3002 CHECK(n_tokens == 3, EINVAL);
3004 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3005 CHECK(fdst, EINVAL);
3007 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3008 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3010 instr->type = INSTR_ALU_SUB;
3011 if (dst[0] == 'h' && src[0] == 'm')
3012 instr->type = INSTR_ALU_SUB_HM;
3013 if (dst[0] == 'm' && src[0] == 'h')
3014 instr->type = INSTR_ALU_SUB_MH;
3015 if (dst[0] == 'h' && src[0] == 'h')
3016 instr->type = INSTR_ALU_SUB_HH;
3018 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3019 instr->alu.dst.n_bits = fdst->n_bits;
3020 instr->alu.dst.offset = fdst->offset / 8;
3021 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3022 instr->alu.src.n_bits = fsrc->n_bits;
3023 instr->alu.src.offset = fsrc->offset / 8;
3027 /* SUB_MI, SUB_HI. */
3028 src_val = strtoul(src, &src, 0);
3029 CHECK(!src[0], EINVAL);
3031 instr->type = INSTR_ALU_SUB_MI;
3033 instr->type = INSTR_ALU_SUB_HI;
3035 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3036 instr->alu.dst.n_bits = fdst->n_bits;
3037 instr->alu.dst.offset = fdst->offset / 8;
3038 instr->alu.src_val = (uint32_t)src_val;
3043 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3044 struct action *action __rte_unused,
3047 struct instruction *instr,
3048 struct instruction_data *data __rte_unused)
3050 char *dst = tokens[1], *src = tokens[2];
3051 struct header *hdst, *hsrc;
3052 struct field *fdst, *fsrc;
3054 CHECK(n_tokens == 3, EINVAL);
3056 fdst = header_field_parse(p, dst, &hdst);
3057 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3060 fsrc = header_field_parse(p, src, &hsrc);
3062 instr->type = INSTR_ALU_CKADD_FIELD;
3063 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3064 instr->alu.dst.n_bits = fdst->n_bits;
3065 instr->alu.dst.offset = fdst->offset / 8;
3066 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3067 instr->alu.src.n_bits = fsrc->n_bits;
3068 instr->alu.src.offset = fsrc->offset / 8;
3072 /* CKADD_STRUCT, CKADD_STRUCT20. */
3073 hsrc = header_parse(p, src);
3074 CHECK(hsrc, EINVAL);
3076 instr->type = INSTR_ALU_CKADD_STRUCT;
3077 if ((hsrc->st->n_bits / 8) == 20)
3078 instr->type = INSTR_ALU_CKADD_STRUCT20;
3080 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3081 instr->alu.dst.n_bits = fdst->n_bits;
3082 instr->alu.dst.offset = fdst->offset / 8;
3083 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3084 instr->alu.src.n_bits = hsrc->st->n_bits;
3085 instr->alu.src.offset = 0; /* Unused. */
3090 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3091 struct action *action __rte_unused,
3094 struct instruction *instr,
3095 struct instruction_data *data __rte_unused)
3097 char *dst = tokens[1], *src = tokens[2];
3098 struct header *hdst, *hsrc;
3099 struct field *fdst, *fsrc;
3101 CHECK(n_tokens == 3, EINVAL);
3103 fdst = header_field_parse(p, dst, &hdst);
3104 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3106 fsrc = header_field_parse(p, src, &hsrc);
3107 CHECK(fsrc, EINVAL);
3109 instr->type = INSTR_ALU_CKSUB_FIELD;
3110 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3111 instr->alu.dst.n_bits = fdst->n_bits;
3112 instr->alu.dst.offset = fdst->offset / 8;
3113 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3114 instr->alu.src.n_bits = fsrc->n_bits;
3115 instr->alu.src.offset = fsrc->offset / 8;
3120 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3121 struct action *action,
3124 struct instruction *instr,
3125 struct instruction_data *data __rte_unused)
3127 char *dst = tokens[1], *src = tokens[2];
3128 struct field *fdst, *fsrc;
3129 uint32_t dst_struct_id, src_struct_id, src_val;
3131 CHECK(n_tokens == 3, EINVAL);
3133 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3134 CHECK(fdst, EINVAL);
3136 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3137 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3139 instr->type = INSTR_ALU_SHL;
3140 if (dst[0] == 'h' && src[0] == 'm')
3141 instr->type = INSTR_ALU_SHL_HM;
3142 if (dst[0] == 'm' && src[0] == 'h')
3143 instr->type = INSTR_ALU_SHL_MH;
3144 if (dst[0] == 'h' && src[0] == 'h')
3145 instr->type = INSTR_ALU_SHL_HH;
3147 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3148 instr->alu.dst.n_bits = fdst->n_bits;
3149 instr->alu.dst.offset = fdst->offset / 8;
3150 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3151 instr->alu.src.n_bits = fsrc->n_bits;
3152 instr->alu.src.offset = fsrc->offset / 8;
3156 /* SHL_MI, SHL_HI. */
3157 src_val = strtoul(src, &src, 0);
3158 CHECK(!src[0], EINVAL);
3160 instr->type = INSTR_ALU_SHL_MI;
3162 instr->type = INSTR_ALU_SHL_HI;
3164 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3165 instr->alu.dst.n_bits = fdst->n_bits;
3166 instr->alu.dst.offset = fdst->offset / 8;
3167 instr->alu.src_val = (uint32_t)src_val;
3172 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3173 struct action *action,
3176 struct instruction *instr,
3177 struct instruction_data *data __rte_unused)
3179 char *dst = tokens[1], *src = tokens[2];
3180 struct field *fdst, *fsrc;
3181 uint32_t dst_struct_id, src_struct_id, src_val;
3183 CHECK(n_tokens == 3, EINVAL);
3185 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3186 CHECK(fdst, EINVAL);
3188 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3189 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3191 instr->type = INSTR_ALU_SHR;
3192 if (dst[0] == 'h' && src[0] == 'm')
3193 instr->type = INSTR_ALU_SHR_HM;
3194 if (dst[0] == 'm' && src[0] == 'h')
3195 instr->type = INSTR_ALU_SHR_MH;
3196 if (dst[0] == 'h' && src[0] == 'h')
3197 instr->type = INSTR_ALU_SHR_HH;
3199 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3200 instr->alu.dst.n_bits = fdst->n_bits;
3201 instr->alu.dst.offset = fdst->offset / 8;
3202 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3203 instr->alu.src.n_bits = fsrc->n_bits;
3204 instr->alu.src.offset = fsrc->offset / 8;
3208 /* SHR_MI, SHR_HI. */
3209 src_val = strtoul(src, &src, 0);
3210 CHECK(!src[0], EINVAL);
3212 instr->type = INSTR_ALU_SHR_MI;
3214 instr->type = INSTR_ALU_SHR_HI;
3216 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3217 instr->alu.dst.n_bits = fdst->n_bits;
3218 instr->alu.dst.offset = fdst->offset / 8;
3219 instr->alu.src_val = (uint32_t)src_val;
3224 instr_alu_and_translate(struct rte_swx_pipeline *p,
3225 struct action *action,
3228 struct instruction *instr,
3229 struct instruction_data *data __rte_unused)
3231 char *dst = tokens[1], *src = tokens[2];
3232 struct field *fdst, *fsrc;
3233 uint32_t dst_struct_id, src_struct_id, src_val;
3235 CHECK(n_tokens == 3, EINVAL);
3237 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3238 CHECK(fdst, EINVAL);
3241 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3243 instr->type = INSTR_ALU_AND;
3244 if ((dst[0] == 'h' && src[0] != 'h') ||
3245 (dst[0] != 'h' && src[0] == 'h'))
3246 instr->type = INSTR_ALU_AND_S;
3248 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3249 instr->alu.dst.n_bits = fdst->n_bits;
3250 instr->alu.dst.offset = fdst->offset / 8;
3251 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3252 instr->alu.src.n_bits = fsrc->n_bits;
3253 instr->alu.src.offset = fsrc->offset / 8;
3258 src_val = strtoul(src, &src, 0);
3259 CHECK(!src[0], EINVAL);
3262 src_val = htonl(src_val);
3264 instr->type = INSTR_ALU_AND_I;
3265 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3266 instr->alu.dst.n_bits = fdst->n_bits;
3267 instr->alu.dst.offset = fdst->offset / 8;
3268 instr->alu.src_val = (uint32_t)src_val;
3273 instr_alu_or_translate(struct rte_swx_pipeline *p,
3274 struct action *action,
3277 struct instruction *instr,
3278 struct instruction_data *data __rte_unused)
3280 char *dst = tokens[1], *src = tokens[2];
3281 struct field *fdst, *fsrc;
3282 uint32_t dst_struct_id, src_struct_id, src_val;
3284 CHECK(n_tokens == 3, EINVAL);
3286 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3287 CHECK(fdst, EINVAL);
3290 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3292 instr->type = INSTR_ALU_OR;
3293 if ((dst[0] == 'h' && src[0] != 'h') ||
3294 (dst[0] != 'h' && src[0] == 'h'))
3295 instr->type = INSTR_ALU_OR_S;
3297 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3298 instr->alu.dst.n_bits = fdst->n_bits;
3299 instr->alu.dst.offset = fdst->offset / 8;
3300 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3301 instr->alu.src.n_bits = fsrc->n_bits;
3302 instr->alu.src.offset = fsrc->offset / 8;
3307 src_val = strtoul(src, &src, 0);
3308 CHECK(!src[0], EINVAL);
3311 src_val = htonl(src_val);
3313 instr->type = INSTR_ALU_OR_I;
3314 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3315 instr->alu.dst.n_bits = fdst->n_bits;
3316 instr->alu.dst.offset = fdst->offset / 8;
3317 instr->alu.src_val = (uint32_t)src_val;
3322 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3323 struct action *action,
3326 struct instruction *instr,
3327 struct instruction_data *data __rte_unused)
3329 char *dst = tokens[1], *src = tokens[2];
3330 struct field *fdst, *fsrc;
3331 uint32_t dst_struct_id, src_struct_id, src_val;
3333 CHECK(n_tokens == 3, EINVAL);
3335 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3336 CHECK(fdst, EINVAL);
3339 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3341 instr->type = INSTR_ALU_XOR;
3342 if ((dst[0] == 'h' && src[0] != 'h') ||
3343 (dst[0] != 'h' && src[0] == 'h'))
3344 instr->type = INSTR_ALU_XOR_S;
3346 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3347 instr->alu.dst.n_bits = fdst->n_bits;
3348 instr->alu.dst.offset = fdst->offset / 8;
3349 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3350 instr->alu.src.n_bits = fsrc->n_bits;
3351 instr->alu.src.offset = fsrc->offset / 8;
3356 src_val = strtoul(src, &src, 0);
3357 CHECK(!src[0], EINVAL);
3360 src_val = htonl(src_val);
3362 instr->type = INSTR_ALU_XOR_I;
3363 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3364 instr->alu.dst.n_bits = fdst->n_bits;
3365 instr->alu.dst.offset = fdst->offset / 8;
3366 instr->alu.src_val = (uint32_t)src_val;
3371 instr_alu_add_exec(struct rte_swx_pipeline *p)
3373 struct thread *t = &p->threads[p->thread_id];
3374 struct instruction *ip = t->ip;
3376 TRACE("[Thread %2u] add\n", p->thread_id);
3386 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3388 struct thread *t = &p->threads[p->thread_id];
3389 struct instruction *ip = t->ip;
3391 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3401 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3403 struct thread *t = &p->threads[p->thread_id];
3404 struct instruction *ip = t->ip;
3406 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3416 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3418 struct thread *t = &p->threads[p->thread_id];
3419 struct instruction *ip = t->ip;
3421 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3431 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3433 struct thread *t = &p->threads[p->thread_id];
3434 struct instruction *ip = t->ip;
3436 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3446 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3448 struct thread *t = &p->threads[p->thread_id];
3449 struct instruction *ip = t->ip;
3451 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3461 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3463 struct thread *t = &p->threads[p->thread_id];
3464 struct instruction *ip = t->ip;
3466 TRACE("[Thread %2u] sub\n", p->thread_id);
3476 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3478 struct thread *t = &p->threads[p->thread_id];
3479 struct instruction *ip = t->ip;
3481 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3491 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3493 struct thread *t = &p->threads[p->thread_id];
3494 struct instruction *ip = t->ip;
3496 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3506 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3508 struct thread *t = &p->threads[p->thread_id];
3509 struct instruction *ip = t->ip;
3511 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3521 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3523 struct thread *t = &p->threads[p->thread_id];
3524 struct instruction *ip = t->ip;
3526 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3536 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3538 struct thread *t = &p->threads[p->thread_id];
3539 struct instruction *ip = t->ip;
3541 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3551 instr_alu_shl_exec(struct rte_swx_pipeline *p)
3553 struct thread *t = &p->threads[p->thread_id];
3554 struct instruction *ip = t->ip;
3556 TRACE("[Thread %2u] shl\n", p->thread_id);
3566 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
3568 struct thread *t = &p->threads[p->thread_id];
3569 struct instruction *ip = t->ip;
3571 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
3581 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
3583 struct thread *t = &p->threads[p->thread_id];
3584 struct instruction *ip = t->ip;
3586 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
3596 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
3598 struct thread *t = &p->threads[p->thread_id];
3599 struct instruction *ip = t->ip;
3601 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
3611 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
3613 struct thread *t = &p->threads[p->thread_id];
3614 struct instruction *ip = t->ip;
3616 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
3626 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
3628 struct thread *t = &p->threads[p->thread_id];
3629 struct instruction *ip = t->ip;
3631 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
3641 instr_alu_shr_exec(struct rte_swx_pipeline *p)
3643 struct thread *t = &p->threads[p->thread_id];
3644 struct instruction *ip = t->ip;
3646 TRACE("[Thread %2u] shr\n", p->thread_id);
3656 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
3658 struct thread *t = &p->threads[p->thread_id];
3659 struct instruction *ip = t->ip;
3661 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
3671 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
3673 struct thread *t = &p->threads[p->thread_id];
3674 struct instruction *ip = t->ip;
3676 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
3686 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
3688 struct thread *t = &p->threads[p->thread_id];
3689 struct instruction *ip = t->ip;
3691 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
3701 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
3703 struct thread *t = &p->threads[p->thread_id];
3704 struct instruction *ip = t->ip;
3706 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
3716 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
3718 struct thread *t = &p->threads[p->thread_id];
3719 struct instruction *ip = t->ip;
3721 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
3731 instr_alu_and_exec(struct rte_swx_pipeline *p)
3733 struct thread *t = &p->threads[p->thread_id];
3734 struct instruction *ip = t->ip;
3736 TRACE("[Thread %2u] and\n", p->thread_id);
3746 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
3748 struct thread *t = &p->threads[p->thread_id];
3749 struct instruction *ip = t->ip;
3751 TRACE("[Thread %2u] and (s)\n", p->thread_id);
3761 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
3763 struct thread *t = &p->threads[p->thread_id];
3764 struct instruction *ip = t->ip;
3766 TRACE("[Thread %2u] and (i)\n", p->thread_id);
3776 instr_alu_or_exec(struct rte_swx_pipeline *p)
3778 struct thread *t = &p->threads[p->thread_id];
3779 struct instruction *ip = t->ip;
3781 TRACE("[Thread %2u] or\n", p->thread_id);
3791 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
3793 struct thread *t = &p->threads[p->thread_id];
3794 struct instruction *ip = t->ip;
3796 TRACE("[Thread %2u] or (s)\n", p->thread_id);
3806 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
3808 struct thread *t = &p->threads[p->thread_id];
3809 struct instruction *ip = t->ip;
3811 TRACE("[Thread %2u] or (i)\n", p->thread_id);
3821 instr_alu_xor_exec(struct rte_swx_pipeline *p)
3823 struct thread *t = &p->threads[p->thread_id];
3824 struct instruction *ip = t->ip;
3826 TRACE("[Thread %2u] xor\n", p->thread_id);
3836 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
3838 struct thread *t = &p->threads[p->thread_id];
3839 struct instruction *ip = t->ip;
3841 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
3851 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
3853 struct thread *t = &p->threads[p->thread_id];
3854 struct instruction *ip = t->ip;
3856 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
3866 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
3868 struct thread *t = &p->threads[p->thread_id];
3869 struct instruction *ip = t->ip;
3870 uint8_t *dst_struct, *src_struct;
3871 uint16_t *dst16_ptr, dst;
3872 uint64_t *src64_ptr, src64, src64_mask, src;
3875 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3878 dst_struct = t->structs[ip->alu.dst.struct_id];
3879 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3882 src_struct = t->structs[ip->alu.src.struct_id];
3883 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3885 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3886 src = src64 & src64_mask;
3891 /* The first input (r) is a 16-bit number. The second and the third
3892 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3893 * three numbers (output r) is a 34-bit number.
3895 r += (src >> 32) + (src & 0xFFFFFFFF);
3897 /* The first input is a 16-bit number. The second input is an 18-bit
3898 * number. In the worst case scenario, the sum of the two numbers is a
3901 r = (r & 0xFFFF) + (r >> 16);
3903 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3904 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3906 r = (r & 0xFFFF) + (r >> 16);
3908 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3909 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3910 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
3911 * therefore the output r is always a 16-bit number.
3913 r = (r & 0xFFFF) + (r >> 16);
3918 *dst16_ptr = (uint16_t)r;
3925 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
3927 struct thread *t = &p->threads[p->thread_id];
3928 struct instruction *ip = t->ip;
3929 uint8_t *dst_struct, *src_struct;
3930 uint16_t *dst16_ptr, dst;
3931 uint64_t *src64_ptr, src64, src64_mask, src;
3934 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
3937 dst_struct = t->structs[ip->alu.dst.struct_id];
3938 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3941 src_struct = t->structs[ip->alu.src.struct_id];
3942 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3944 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3945 src = src64 & src64_mask;
3950 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
3951 * the following sequence of operations in 2's complement arithmetic:
3952 * a '- b = (a - b) % 0xFFFF.
3954 * In order to prevent an underflow for the below subtraction, in which
3955 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
3956 * minuend), we first add a multiple of the 0xFFFF modulus to the
3957 * minuend. The number we add to the minuend needs to be a 34-bit number
3958 * or higher, so for readability reasons we picked the 36-bit multiple.
3959 * We are effectively turning the 16-bit minuend into a 36-bit number:
3960 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
3962 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
3964 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
3965 * result (the output r) is a 36-bit number.
3967 r -= (src >> 32) + (src & 0xFFFFFFFF);
3969 /* The first input is a 16-bit number. The second input is a 20-bit
3970 * number. Their sum is a 21-bit number.
3972 r = (r & 0xFFFF) + (r >> 16);
3974 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3975 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
3977 r = (r & 0xFFFF) + (r >> 16);
3979 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3980 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3981 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3982 * generated, therefore the output r is always a 16-bit number.
3984 r = (r & 0xFFFF) + (r >> 16);
3989 *dst16_ptr = (uint16_t)r;
3996 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
3998 struct thread *t = &p->threads[p->thread_id];
3999 struct instruction *ip = t->ip;
4000 uint8_t *dst_struct, *src_struct;
4001 uint16_t *dst16_ptr;
4002 uint32_t *src32_ptr;
4005 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4008 dst_struct = t->structs[ip->alu.dst.struct_id];
4009 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4011 src_struct = t->structs[ip->alu.src.struct_id];
4012 src32_ptr = (uint32_t *)&src_struct[0];
4014 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4015 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4016 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4017 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4018 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4020 /* The first input is a 16-bit number. The second input is a 19-bit
4021 * number. Their sum is a 20-bit number.
4023 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4025 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4026 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4028 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4030 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4031 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4032 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4033 * generated, therefore the output r is always a 16-bit number.
4035 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4038 r0 = r0 ? r0 : 0xFFFF;
4040 *dst16_ptr = (uint16_t)r0;
4047 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4049 struct thread *t = &p->threads[p->thread_id];
4050 struct instruction *ip = t->ip;
4051 uint8_t *dst_struct, *src_struct;
4052 uint16_t *dst16_ptr;
4053 uint32_t *src32_ptr;
4057 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4060 dst_struct = t->structs[ip->alu.dst.struct_id];
4061 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4063 src_struct = t->structs[ip->alu.src.struct_id];
4064 src32_ptr = (uint32_t *)&src_struct[0];
4066 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4067 * Therefore, in the worst case scenario, a 35-bit number is added to a
4068 * 16-bit number (the input r), so the output r is 36-bit number.
4070 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4073 /* The first input is a 16-bit number. The second input is a 20-bit
4074 * number. Their sum is a 21-bit number.
4076 r = (r & 0xFFFF) + (r >> 16);
4078 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4079 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4081 r = (r & 0xFFFF) + (r >> 16);
4083 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4084 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4085 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4086 * generated, therefore the output r is always a 16-bit number.
4088 r = (r & 0xFFFF) + (r >> 16);
4093 *dst16_ptr = (uint16_t)r;
4099 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
4102 instr_translate(struct rte_swx_pipeline *p,
4103 struct action *action,
4105 struct instruction *instr,
4106 struct instruction_data *data)
4108 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
4109 int n_tokens = 0, tpos = 0;
4111 /* Parse the instruction string into tokens. */
4115 token = strtok_r(string, " \t\v", &string);
4119 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
4121 tokens[n_tokens] = token;
4125 CHECK(n_tokens, EINVAL);
4127 /* Handle the optional instruction label. */
4128 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
4129 strcpy(data->label, tokens[0]);
4132 CHECK(n_tokens - tpos, EINVAL);
4135 /* Identify the instruction type. */
4136 if (!strcmp(tokens[tpos], "rx"))
4137 return instr_rx_translate(p,
4144 if (!strcmp(tokens[tpos], "tx"))
4145 return instr_tx_translate(p,
4152 if (!strcmp(tokens[tpos], "extract"))
4153 return instr_hdr_extract_translate(p,
4160 if (!strcmp(tokens[tpos], "emit"))
4161 return instr_hdr_emit_translate(p,
4168 if (!strcmp(tokens[tpos], "validate"))
4169 return instr_hdr_validate_translate(p,
4176 if (!strcmp(tokens[tpos], "invalidate"))
4177 return instr_hdr_invalidate_translate(p,
4184 if (!strcmp(tokens[tpos], "mov"))
4185 return instr_mov_translate(p,
4192 if (!strcmp(tokens[tpos], "dma"))
4193 return instr_dma_translate(p,
4200 if (!strcmp(tokens[tpos], "add"))
4201 return instr_alu_add_translate(p,
4208 if (!strcmp(tokens[tpos], "sub"))
4209 return instr_alu_sub_translate(p,
4216 if (!strcmp(tokens[tpos], "ckadd"))
4217 return instr_alu_ckadd_translate(p,
4224 if (!strcmp(tokens[tpos], "cksub"))
4225 return instr_alu_cksub_translate(p,
4232 if (!strcmp(tokens[tpos], "and"))
4233 return instr_alu_and_translate(p,
4240 if (!strcmp(tokens[tpos], "or"))
4241 return instr_alu_or_translate(p,
4248 if (!strcmp(tokens[tpos], "xor"))
4249 return instr_alu_xor_translate(p,
4256 if (!strcmp(tokens[tpos], "shl"))
4257 return instr_alu_shl_translate(p,
4264 if (!strcmp(tokens[tpos], "shr"))
4265 return instr_alu_shr_translate(p,
4276 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
4278 uint32_t count = 0, i;
4283 for (i = 0; i < n; i++)
4284 if (!strcmp(label, data[i].jmp_label))
4291 instr_label_check(struct instruction_data *instruction_data,
4292 uint32_t n_instructions)
4296 /* Check that all instruction labels are unique. */
4297 for (i = 0; i < n_instructions; i++) {
4298 struct instruction_data *data = &instruction_data[i];
4299 char *label = data->label;
4305 for (j = i + 1; j < n_instructions; j++)
4306 CHECK(strcmp(label, data[j].label), EINVAL);
4309 /* Get users for each instruction label. */
4310 for (i = 0; i < n_instructions; i++) {
4311 struct instruction_data *data = &instruction_data[i];
4312 char *label = data->label;
4314 data->n_users = label_is_used(instruction_data,
4323 instruction_config(struct rte_swx_pipeline *p,
4325 const char **instructions,
4326 uint32_t n_instructions)
4328 struct instruction *instr = NULL;
4329 struct instruction_data *data = NULL;
4330 char *string = NULL;
4334 CHECK(n_instructions, EINVAL);
4335 CHECK(instructions, EINVAL);
4336 for (i = 0; i < n_instructions; i++)
4337 CHECK(instructions[i], EINVAL);
4339 /* Memory allocation. */
4340 instr = calloc(n_instructions, sizeof(struct instruction));
4346 data = calloc(n_instructions, sizeof(struct instruction_data));
4352 for (i = 0; i < n_instructions; i++) {
4353 string = strdup(instructions[i]);
4359 err = instr_translate(p, a, string, &instr[i], &data[i]);
4366 err = instr_label_check(data, n_instructions);
4373 a->instructions = instr;
4374 a->n_instructions = n_instructions;
4376 p->instructions = instr;
4377 p->n_instructions = n_instructions;
4389 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
4391 static instr_exec_t instruction_table[] = {
4392 [INSTR_RX] = instr_rx_exec,
4393 [INSTR_TX] = instr_tx_exec,
4395 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
4396 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
4397 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
4398 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
4399 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
4400 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
4401 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
4402 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
4404 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
4405 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
4406 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
4407 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
4408 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
4409 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
4410 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
4411 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
4412 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
4414 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
4415 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
4417 [INSTR_MOV] = instr_mov_exec,
4418 [INSTR_MOV_S] = instr_mov_s_exec,
4419 [INSTR_MOV_I] = instr_mov_i_exec,
4421 [INSTR_DMA_HT] = instr_dma_ht_exec,
4422 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
4423 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
4424 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
4425 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
4426 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
4427 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
4428 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
4430 [INSTR_ALU_ADD] = instr_alu_add_exec,
4431 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
4432 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
4433 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
4434 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
4435 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
4437 [INSTR_ALU_SUB] = instr_alu_sub_exec,
4438 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
4439 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
4440 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
4441 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
4442 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
4444 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
4445 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
4446 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
4447 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
4449 [INSTR_ALU_AND] = instr_alu_and_exec,
4450 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
4451 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
4453 [INSTR_ALU_OR] = instr_alu_or_exec,
4454 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
4455 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
4457 [INSTR_ALU_XOR] = instr_alu_xor_exec,
4458 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
4459 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
4461 [INSTR_ALU_SHL] = instr_alu_shl_exec,
4462 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
4463 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
4464 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
4465 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
4466 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
4468 [INSTR_ALU_SHR] = instr_alu_shr_exec,
4469 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
4470 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
4471 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
4472 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
4473 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
4477 instr_exec(struct rte_swx_pipeline *p)
4479 struct thread *t = &p->threads[p->thread_id];
4480 struct instruction *ip = t->ip;
4481 instr_exec_t instr = instruction_table[ip->type];
4489 static struct action *
4490 action_find(struct rte_swx_pipeline *p, const char *name)
4492 struct action *elem;
4497 TAILQ_FOREACH(elem, &p->actions, node)
4498 if (strcmp(elem->name, name) == 0)
4504 static struct field *
4505 action_field_find(struct action *a, const char *name)
4507 return a->st ? struct_type_field_find(a->st, name) : NULL;
4510 static struct field *
4511 action_field_parse(struct action *action, const char *name)
4513 if (name[0] != 't' || name[1] != '.')
4516 return action_field_find(action, &name[2]);
4520 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
4522 const char *args_struct_type_name,
4523 const char **instructions,
4524 uint32_t n_instructions)
4526 struct struct_type *args_struct_type;
4532 CHECK_NAME(name, EINVAL);
4533 CHECK(!action_find(p, name), EEXIST);
4535 if (args_struct_type_name) {
4536 CHECK_NAME(args_struct_type_name, EINVAL);
4537 args_struct_type = struct_type_find(p, args_struct_type_name);
4538 CHECK(args_struct_type, EINVAL);
4540 args_struct_type = NULL;
4543 /* Node allocation. */
4544 a = calloc(1, sizeof(struct action));
4547 /* Node initialization. */
4548 strcpy(a->name, name);
4549 a->st = args_struct_type;
4550 a->id = p->n_actions;
4552 /* Instruction translation. */
4553 err = instruction_config(p, a, instructions, n_instructions);
4559 /* Node add to tailq. */
4560 TAILQ_INSERT_TAIL(&p->actions, a, node);
4567 action_build(struct rte_swx_pipeline *p)
4569 struct action *action;
4571 p->action_instructions = calloc(p->n_actions,
4572 sizeof(struct instruction *));
4573 CHECK(p->action_instructions, ENOMEM);
4575 TAILQ_FOREACH(action, &p->actions, node)
4576 p->action_instructions[action->id] = action->instructions;
4582 action_build_free(struct rte_swx_pipeline *p)
4584 free(p->action_instructions);
4585 p->action_instructions = NULL;
4589 action_free(struct rte_swx_pipeline *p)
4591 action_build_free(p);
4594 struct action *action;
4596 action = TAILQ_FIRST(&p->actions);
4600 TAILQ_REMOVE(&p->actions, action, node);
4601 free(action->instructions);
4609 static struct table_type *
4610 table_type_find(struct rte_swx_pipeline *p, const char *name)
4612 struct table_type *elem;
4614 TAILQ_FOREACH(elem, &p->table_types, node)
4615 if (strcmp(elem->name, name) == 0)
4621 static struct table_type *
4622 table_type_resolve(struct rte_swx_pipeline *p,
4623 const char *recommended_type_name,
4624 enum rte_swx_table_match_type match_type)
4626 struct table_type *elem;
4628 /* Only consider the recommended type if the match type is correct. */
4629 if (recommended_type_name)
4630 TAILQ_FOREACH(elem, &p->table_types, node)
4631 if (!strcmp(elem->name, recommended_type_name) &&
4632 (elem->match_type == match_type))
4635 /* Ignore the recommended type and get the first element with this match
4638 TAILQ_FOREACH(elem, &p->table_types, node)
4639 if (elem->match_type == match_type)
4645 static struct table *
4646 table_find(struct rte_swx_pipeline *p, const char *name)
4650 TAILQ_FOREACH(elem, &p->tables, node)
4651 if (strcmp(elem->name, name) == 0)
4657 static struct table *
4658 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4660 struct table *table = NULL;
4662 TAILQ_FOREACH(table, &p->tables, node)
4663 if (table->id == id)
4670 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4672 enum rte_swx_table_match_type match_type,
4673 struct rte_swx_table_ops *ops)
4675 struct table_type *elem;
4679 CHECK_NAME(name, EINVAL);
4680 CHECK(!table_type_find(p, name), EEXIST);
4683 CHECK(ops->create, EINVAL);
4684 CHECK(ops->lkp, EINVAL);
4685 CHECK(ops->free, EINVAL);
4687 /* Node allocation. */
4688 elem = calloc(1, sizeof(struct table_type));
4689 CHECK(elem, ENOMEM);
4691 /* Node initialization. */
4692 strcpy(elem->name, name);
4693 elem->match_type = match_type;
4694 memcpy(&elem->ops, ops, sizeof(*ops));
4696 /* Node add to tailq. */
4697 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4702 static enum rte_swx_table_match_type
4703 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4708 for (i = 0; i < n_fields; i++)
4709 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4713 return RTE_SWX_TABLE_MATCH_EXACT;
4715 if ((i == n_fields - 1) &&
4716 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4717 return RTE_SWX_TABLE_MATCH_LPM;
4719 return RTE_SWX_TABLE_MATCH_WILDCARD;
4723 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
4725 struct rte_swx_pipeline_table_params *params,
4726 const char *recommended_table_type_name,
4730 struct table_type *type;
4732 struct action *default_action;
4733 struct header *header = NULL;
4735 uint32_t offset_prev = 0, action_data_size_max = 0, i;
4739 CHECK_NAME(name, EINVAL);
4740 CHECK(!table_find(p, name), EEXIST);
4742 CHECK(params, EINVAL);
4745 CHECK(!params->n_fields || params->fields, EINVAL);
4746 for (i = 0; i < params->n_fields; i++) {
4747 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4749 struct field *hf, *mf;
4752 CHECK_NAME(field->name, EINVAL);
4754 hf = header_field_parse(p, field->name, &h);
4755 mf = metadata_field_parse(p, field->name);
4756 CHECK(hf || mf, EINVAL);
4758 offset = hf ? hf->offset : mf->offset;
4761 is_header = hf ? 1 : 0;
4762 header = hf ? h : NULL;
4763 offset_prev = offset;
4768 CHECK((is_header && hf && (h->id == header->id)) ||
4769 (!is_header && mf), EINVAL);
4771 CHECK(offset > offset_prev, EINVAL);
4772 offset_prev = offset;
4775 /* Action checks. */
4776 CHECK(params->n_actions, EINVAL);
4777 CHECK(params->action_names, EINVAL);
4778 for (i = 0; i < params->n_actions; i++) {
4779 const char *action_name = params->action_names[i];
4781 uint32_t action_data_size;
4783 CHECK(action_name, EINVAL);
4785 a = action_find(p, action_name);
4788 action_data_size = a->st ? a->st->n_bits / 8 : 0;
4789 if (action_data_size > action_data_size_max)
4790 action_data_size_max = action_data_size;
4793 CHECK(params->default_action_name, EINVAL);
4794 for (i = 0; i < p->n_actions; i++)
4795 if (!strcmp(params->action_names[i],
4796 params->default_action_name))
4798 CHECK(i < params->n_actions, EINVAL);
4799 default_action = action_find(p, params->default_action_name);
4800 CHECK((default_action->st && params->default_action_data) ||
4801 !params->default_action_data, EINVAL);
4803 /* Table type checks. */
4804 if (params->n_fields) {
4805 enum rte_swx_table_match_type match_type;
4807 match_type = table_match_type_resolve(params->fields,
4809 type = table_type_resolve(p,
4810 recommended_table_type_name,
4812 CHECK(type, EINVAL);
4817 /* Memory allocation. */
4818 t = calloc(1, sizeof(struct table));
4821 t->fields = calloc(params->n_fields, sizeof(struct match_field));
4827 t->actions = calloc(params->n_actions, sizeof(struct action *));
4834 if (action_data_size_max) {
4835 t->default_action_data = calloc(1, action_data_size_max);
4836 if (!t->default_action_data) {
4844 /* Node initialization. */
4845 strcpy(t->name, name);
4846 if (args && args[0])
4847 strcpy(t->args, args);
4850 for (i = 0; i < params->n_fields; i++) {
4851 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4852 struct match_field *f = &t->fields[i];
4854 f->match_type = field->match_type;
4855 f->field = is_header ?
4856 header_field_parse(p, field->name, NULL) :
4857 metadata_field_parse(p, field->name);
4859 t->n_fields = params->n_fields;
4860 t->is_header = is_header;
4863 for (i = 0; i < params->n_actions; i++)
4864 t->actions[i] = action_find(p, params->action_names[i]);
4865 t->default_action = default_action;
4866 if (default_action->st)
4867 memcpy(t->default_action_data,
4868 params->default_action_data,
4869 default_action->st->n_bits / 8);
4870 t->n_actions = params->n_actions;
4871 t->default_action_is_const = params->default_action_is_const;
4872 t->action_data_size_max = action_data_size_max;
4875 t->id = p->n_tables;
4877 /* Node add to tailq. */
4878 TAILQ_INSERT_TAIL(&p->tables, t, node);
4884 static struct rte_swx_table_params *
4885 table_params_get(struct table *table)
4887 struct rte_swx_table_params *params;
4888 struct field *first, *last;
4890 uint32_t key_size, key_offset, action_data_size, i;
4892 /* Memory allocation. */
4893 params = calloc(1, sizeof(struct rte_swx_table_params));
4897 /* Key offset and size. */
4898 first = table->fields[0].field;
4899 last = table->fields[table->n_fields - 1].field;
4900 key_offset = first->offset / 8;
4901 key_size = (last->offset + last->n_bits - first->offset) / 8;
4903 /* Memory allocation. */
4904 key_mask = calloc(1, key_size);
4911 for (i = 0; i < table->n_fields; i++) {
4912 struct field *f = table->fields[i].field;
4913 uint32_t start = (f->offset - first->offset) / 8;
4914 size_t size = f->n_bits / 8;
4916 memset(&key_mask[start], 0xFF, size);
4919 /* Action data size. */
4920 action_data_size = 0;
4921 for (i = 0; i < table->n_actions; i++) {
4922 struct action *action = table->actions[i];
4923 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
4925 if (ads > action_data_size)
4926 action_data_size = ads;
4930 params->match_type = table->type->match_type;
4931 params->key_size = key_size;
4932 params->key_offset = key_offset;
4933 params->key_mask0 = key_mask;
4934 params->action_data_size = action_data_size;
4935 params->n_keys_max = table->size;
4941 table_params_free(struct rte_swx_table_params *params)
4946 free(params->key_mask0);
4951 table_state_build(struct rte_swx_pipeline *p)
4953 struct table *table;
4955 p->table_state = calloc(p->n_tables,
4956 sizeof(struct rte_swx_table_state));
4957 CHECK(p->table_state, ENOMEM);
4959 TAILQ_FOREACH(table, &p->tables, node) {
4960 struct rte_swx_table_state *ts = &p->table_state[table->id];
4963 struct rte_swx_table_params *params;
4966 params = table_params_get(table);
4967 CHECK(params, ENOMEM);
4969 ts->obj = table->type->ops.create(params,
4974 table_params_free(params);
4975 CHECK(ts->obj, ENODEV);
4978 /* ts->default_action_data. */
4979 if (table->action_data_size_max) {
4980 ts->default_action_data =
4981 malloc(table->action_data_size_max);
4982 CHECK(ts->default_action_data, ENOMEM);
4984 memcpy(ts->default_action_data,
4985 table->default_action_data,
4986 table->action_data_size_max);
4989 /* ts->default_action_id. */
4990 ts->default_action_id = table->default_action->id;
4997 table_state_build_free(struct rte_swx_pipeline *p)
5001 if (!p->table_state)
5004 for (i = 0; i < p->n_tables; i++) {
5005 struct rte_swx_table_state *ts = &p->table_state[i];
5006 struct table *table = table_find_by_id(p, i);
5009 if (table->type && ts->obj)
5010 table->type->ops.free(ts->obj);
5012 /* ts->default_action_data. */
5013 free(ts->default_action_data);
5016 free(p->table_state);
5017 p->table_state = NULL;
5021 table_state_free(struct rte_swx_pipeline *p)
5023 table_state_build_free(p);
5027 table_stub_lkp(void *table __rte_unused,
5028 void *mailbox __rte_unused,
5029 uint8_t **key __rte_unused,
5030 uint64_t *action_id __rte_unused,
5031 uint8_t **action_data __rte_unused,
5035 return 1; /* DONE. */
5039 table_build(struct rte_swx_pipeline *p)
5043 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5044 struct thread *t = &p->threads[i];
5045 struct table *table;
5047 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
5048 CHECK(t->tables, ENOMEM);
5050 TAILQ_FOREACH(table, &p->tables, node) {
5051 struct table_runtime *r = &t->tables[table->id];
5056 size = table->type->ops.mailbox_size_get();
5059 r->func = table->type->ops.lkp;
5063 r->mailbox = calloc(1, size);
5064 CHECK(r->mailbox, ENOMEM);
5068 r->key = table->is_header ?
5069 &t->structs[table->header->struct_id] :
5070 &t->structs[p->metadata_struct_id];
5072 r->func = table_stub_lkp;
5081 table_build_free(struct rte_swx_pipeline *p)
5085 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5086 struct thread *t = &p->threads[i];
5092 for (j = 0; j < p->n_tables; j++) {
5093 struct table_runtime *r = &t->tables[j];
5104 table_free(struct rte_swx_pipeline *p)
5106 table_build_free(p);
5112 elem = TAILQ_FIRST(&p->tables);
5116 TAILQ_REMOVE(&p->tables, elem, node);
5118 free(elem->actions);
5119 free(elem->default_action_data);
5125 struct table_type *elem;
5127 elem = TAILQ_FIRST(&p->table_types);
5131 TAILQ_REMOVE(&p->table_types, elem, node);
5140 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
5142 struct rte_swx_pipeline *pipeline;
5144 /* Check input parameters. */
5147 /* Memory allocation. */
5148 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
5149 CHECK(pipeline, ENOMEM);
5151 /* Initialization. */
5152 TAILQ_INIT(&pipeline->struct_types);
5153 TAILQ_INIT(&pipeline->port_in_types);
5154 TAILQ_INIT(&pipeline->ports_in);
5155 TAILQ_INIT(&pipeline->port_out_types);
5156 TAILQ_INIT(&pipeline->ports_out);
5157 TAILQ_INIT(&pipeline->extern_types);
5158 TAILQ_INIT(&pipeline->extern_objs);
5159 TAILQ_INIT(&pipeline->extern_funcs);
5160 TAILQ_INIT(&pipeline->headers);
5161 TAILQ_INIT(&pipeline->actions);
5162 TAILQ_INIT(&pipeline->table_types);
5163 TAILQ_INIT(&pipeline->tables);
5165 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
5166 pipeline->numa_node = numa_node;
5173 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
5178 free(p->instructions);
5180 table_state_free(p);
5185 extern_func_free(p);
5195 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
5196 const char **instructions,
5197 uint32_t n_instructions)
5202 err = instruction_config(p, NULL, instructions, n_instructions);
5206 /* Thread instruction pointer reset. */
5207 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5208 struct thread *t = &p->threads[i];
5210 thread_ip_reset(p, t);
5217 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
5222 CHECK(p->build_done == 0, EEXIST);
5224 status = port_in_build(p);
5228 status = port_out_build(p);
5232 status = struct_build(p);
5236 status = extern_obj_build(p);
5240 status = extern_func_build(p);
5244 status = header_build(p);
5248 status = metadata_build(p);
5252 status = action_build(p);
5256 status = table_build(p);
5260 status = table_state_build(p);
5268 table_state_build_free(p);
5269 table_build_free(p);
5270 action_build_free(p);
5271 metadata_build_free(p);
5272 header_build_free(p);
5273 extern_func_build_free(p);
5274 extern_obj_build_free(p);
5275 port_out_build_free(p);
5276 port_in_build_free(p);
5277 struct_build_free(p);
5283 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
5287 for (i = 0; i < n_instructions; i++)
5295 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
5296 struct rte_swx_table_state **table_state)
5298 if (!p || !table_state || !p->build_done)
5301 *table_state = p->table_state;
5306 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
5307 struct rte_swx_table_state *table_state)
5309 if (!p || !table_state || !p->build_done)
5312 p->table_state = table_state;