1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
328 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
329 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
333 * dst = HMEF, src = HMEFTI
335 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
336 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
337 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
338 INSTR_ALU_SHL_HH, /* dst = H, src = H */
339 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
340 INSTR_ALU_SHL_HI, /* dst = H, src = I */
344 * dst = HMEF, src = HMEFTI
346 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
347 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
348 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
349 INSTR_ALU_SHR_HH, /* dst = H, src = H */
350 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
351 INSTR_ALU_SHR_HI, /* dst = H, src = I */
356 /* extern e.obj.func */
363 struct instr_operand {
378 uint8_t header_id[8];
379 uint8_t struct_id[8];
384 struct instr_hdr_validity {
392 struct instr_extern_obj {
397 struct instr_extern_func {
401 struct instr_dst_src {
402 struct instr_operand dst;
404 struct instr_operand src;
411 uint8_t header_id[8];
412 uint8_t struct_id[8];
423 enum instruction_type type;
426 struct instr_hdr_validity valid;
427 struct instr_dst_src mov;
428 struct instr_dma dma;
429 struct instr_dst_src alu;
430 struct instr_table table;
431 struct instr_extern_obj ext_obj;
432 struct instr_extern_func ext_func;
436 struct instruction_data {
437 char label[RTE_SWX_NAME_SIZE];
438 char jmp_label[RTE_SWX_NAME_SIZE];
439 uint32_t n_users; /* user = jmp instruction to this instruction. */
447 TAILQ_ENTRY(action) node;
448 char name[RTE_SWX_NAME_SIZE];
449 struct struct_type *st;
450 struct instruction *instructions;
451 uint32_t n_instructions;
455 TAILQ_HEAD(action_tailq, action);
461 TAILQ_ENTRY(table_type) node;
462 char name[RTE_SWX_NAME_SIZE];
463 enum rte_swx_table_match_type match_type;
464 struct rte_swx_table_ops ops;
467 TAILQ_HEAD(table_type_tailq, table_type);
470 enum rte_swx_table_match_type match_type;
475 TAILQ_ENTRY(table) node;
476 char name[RTE_SWX_NAME_SIZE];
477 char args[RTE_SWX_NAME_SIZE];
478 struct table_type *type; /* NULL when n_fields == 0. */
481 struct match_field *fields;
483 int is_header; /* Only valid when n_fields > 0. */
484 struct header *header; /* Only valid when n_fields > 0. */
487 struct action **actions;
488 struct action *default_action;
489 uint8_t *default_action_data;
491 int default_action_is_const;
492 uint32_t action_data_size_max;
498 TAILQ_HEAD(table_tailq, table);
500 struct table_runtime {
501 rte_swx_table_lookup_t func;
511 struct rte_swx_pkt pkt;
517 /* Packet headers. */
518 struct header_runtime *headers; /* Extracted or generated headers. */
519 struct header_out_runtime *headers_out; /* Emitted headers. */
520 uint8_t *header_storage;
521 uint8_t *header_out_storage;
522 uint64_t valid_headers;
523 uint32_t n_headers_out;
525 /* Packet meta-data. */
529 struct table_runtime *tables;
530 struct rte_swx_table_state *table_state;
532 int hit; /* 0 = Miss, 1 = Hit. */
534 /* Extern objects and functions. */
535 struct extern_obj_runtime *extern_objs;
536 struct extern_func_runtime *extern_funcs;
539 struct instruction *ip;
540 struct instruction *ret;
543 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
544 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
545 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
547 #define ALU(thread, ip, operator) \
549 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
550 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
551 uint64_t dst64 = *dst64_ptr; \
552 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
553 uint64_t dst = dst64 & dst64_mask; \
555 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
556 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
557 uint64_t src64 = *src64_ptr; \
558 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
559 uint64_t src = src64 & src64_mask; \
561 uint64_t result = dst operator src; \
563 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
566 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
568 #define ALU_S(thread, ip, operator) \
570 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
571 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
572 uint64_t dst64 = *dst64_ptr; \
573 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
574 uint64_t dst = dst64 & dst64_mask; \
576 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
577 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
578 uint64_t src64 = *src64_ptr; \
579 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
581 uint64_t result = dst operator src; \
583 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
588 #define ALU_HM(thread, ip, operator) \
590 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
591 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
592 uint64_t dst64 = *dst64_ptr; \
593 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
594 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
596 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
597 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
598 uint64_t src64 = *src64_ptr; \
599 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
600 uint64_t src = src64 & src64_mask; \
602 uint64_t result = dst operator src; \
603 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
605 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
608 #define ALU_HH(thread, ip, operator) \
610 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
611 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
612 uint64_t dst64 = *dst64_ptr; \
613 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
614 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
616 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
617 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
618 uint64_t src64 = *src64_ptr; \
619 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
621 uint64_t result = dst operator src; \
622 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
624 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
636 #define ALU_I(thread, ip, operator) \
638 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
639 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
640 uint64_t dst64 = *dst64_ptr; \
641 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
642 uint64_t dst = dst64 & dst64_mask; \
644 uint64_t src = (ip)->alu.src_val; \
646 uint64_t result = dst operator src; \
648 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
653 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
655 #define ALU_HI(thread, ip, operator) \
657 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
658 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
659 uint64_t dst64 = *dst64_ptr; \
660 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
661 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
663 uint64_t src = (ip)->alu.src_val; \
665 uint64_t result = dst operator src; \
666 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
668 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
677 #define MOV(thread, ip) \
679 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
680 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
681 uint64_t dst64 = *dst64_ptr; \
682 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
684 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
685 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
686 uint64_t src64 = *src64_ptr; \
687 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
688 uint64_t src = src64 & src64_mask; \
690 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
693 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
695 #define MOV_S(thread, ip) \
697 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
698 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
699 uint64_t dst64 = *dst64_ptr; \
700 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
702 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
703 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
704 uint64_t src64 = *src64_ptr; \
705 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
707 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
716 #define MOV_I(thread, ip) \
718 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
719 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
720 uint64_t dst64 = *dst64_ptr; \
721 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
723 uint64_t src = (ip)->mov.src_val; \
725 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
728 #define METADATA_READ(thread, offset, n_bits) \
730 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
731 uint64_t m64 = *m64_ptr; \
732 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
736 #define METADATA_WRITE(thread, offset, n_bits, value) \
738 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
739 uint64_t m64 = *m64_ptr; \
740 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
742 uint64_t m_new = value; \
744 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
747 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
748 #define RTE_SWX_PIPELINE_THREADS_MAX 16
751 struct rte_swx_pipeline {
752 struct struct_type_tailq struct_types;
753 struct port_in_type_tailq port_in_types;
754 struct port_in_tailq ports_in;
755 struct port_out_type_tailq port_out_types;
756 struct port_out_tailq ports_out;
757 struct extern_type_tailq extern_types;
758 struct extern_obj_tailq extern_objs;
759 struct extern_func_tailq extern_funcs;
760 struct header_tailq headers;
761 struct struct_type *metadata_st;
762 uint32_t metadata_struct_id;
763 struct action_tailq actions;
764 struct table_type_tailq table_types;
765 struct table_tailq tables;
767 struct port_in_runtime *in;
768 struct port_out_runtime *out;
769 struct instruction **action_instructions;
770 struct rte_swx_table_state *table_state;
771 struct instruction *instructions;
772 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
776 uint32_t n_ports_out;
777 uint32_t n_extern_objs;
778 uint32_t n_extern_funcs;
784 uint32_t n_instructions;
792 static struct struct_type *
793 struct_type_find(struct rte_swx_pipeline *p, const char *name)
795 struct struct_type *elem;
797 TAILQ_FOREACH(elem, &p->struct_types, node)
798 if (strcmp(elem->name, name) == 0)
804 static struct field *
805 struct_type_field_find(struct struct_type *st, const char *name)
809 for (i = 0; i < st->n_fields; i++) {
810 struct field *f = &st->fields[i];
812 if (strcmp(f->name, name) == 0)
820 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
822 struct rte_swx_field_params *fields,
825 struct struct_type *st;
829 CHECK_NAME(name, EINVAL);
830 CHECK(fields, EINVAL);
831 CHECK(n_fields, EINVAL);
833 for (i = 0; i < n_fields; i++) {
834 struct rte_swx_field_params *f = &fields[i];
837 CHECK_NAME(f->name, EINVAL);
838 CHECK(f->n_bits, EINVAL);
839 CHECK(f->n_bits <= 64, EINVAL);
840 CHECK((f->n_bits & 7) == 0, EINVAL);
842 for (j = 0; j < i; j++) {
843 struct rte_swx_field_params *f_prev = &fields[j];
845 CHECK(strcmp(f->name, f_prev->name), EINVAL);
849 CHECK(!struct_type_find(p, name), EEXIST);
851 /* Node allocation. */
852 st = calloc(1, sizeof(struct struct_type));
855 st->fields = calloc(n_fields, sizeof(struct field));
861 /* Node initialization. */
862 strcpy(st->name, name);
863 for (i = 0; i < n_fields; i++) {
864 struct field *dst = &st->fields[i];
865 struct rte_swx_field_params *src = &fields[i];
867 strcpy(dst->name, src->name);
868 dst->n_bits = src->n_bits;
869 dst->offset = st->n_bits;
871 st->n_bits += src->n_bits;
873 st->n_fields = n_fields;
875 /* Node add to tailq. */
876 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
882 struct_build(struct rte_swx_pipeline *p)
886 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
887 struct thread *t = &p->threads[i];
889 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
890 CHECK(t->structs, ENOMEM);
897 struct_build_free(struct rte_swx_pipeline *p)
901 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
902 struct thread *t = &p->threads[i];
910 struct_free(struct rte_swx_pipeline *p)
912 struct_build_free(p);
916 struct struct_type *elem;
918 elem = TAILQ_FIRST(&p->struct_types);
922 TAILQ_REMOVE(&p->struct_types, elem, node);
931 static struct port_in_type *
932 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
934 struct port_in_type *elem;
939 TAILQ_FOREACH(elem, &p->port_in_types, node)
940 if (strcmp(elem->name, name) == 0)
947 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
949 struct rte_swx_port_in_ops *ops)
951 struct port_in_type *elem;
954 CHECK_NAME(name, EINVAL);
956 CHECK(ops->create, EINVAL);
957 CHECK(ops->free, EINVAL);
958 CHECK(ops->pkt_rx, EINVAL);
959 CHECK(ops->stats_read, EINVAL);
961 CHECK(!port_in_type_find(p, name), EEXIST);
963 /* Node allocation. */
964 elem = calloc(1, sizeof(struct port_in_type));
967 /* Node initialization. */
968 strcpy(elem->name, name);
969 memcpy(&elem->ops, ops, sizeof(*ops));
971 /* Node add to tailq. */
972 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
977 static struct port_in *
978 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
980 struct port_in *port;
982 TAILQ_FOREACH(port, &p->ports_in, node)
983 if (port->id == port_id)
990 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
992 const char *port_type_name,
995 struct port_in_type *type = NULL;
996 struct port_in *port = NULL;
1001 CHECK(!port_in_find(p, port_id), EINVAL);
1003 CHECK_NAME(port_type_name, EINVAL);
1004 type = port_in_type_find(p, port_type_name);
1005 CHECK(type, EINVAL);
1007 obj = type->ops.create(args);
1010 /* Node allocation. */
1011 port = calloc(1, sizeof(struct port_in));
1012 CHECK(port, ENOMEM);
1014 /* Node initialization. */
1019 /* Node add to tailq. */
1020 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1021 if (p->n_ports_in < port_id + 1)
1022 p->n_ports_in = port_id + 1;
1028 port_in_build(struct rte_swx_pipeline *p)
1030 struct port_in *port;
1033 CHECK(p->n_ports_in, EINVAL);
1034 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1036 for (i = 0; i < p->n_ports_in; i++)
1037 CHECK(port_in_find(p, i), EINVAL);
1039 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1040 CHECK(p->in, ENOMEM);
1042 TAILQ_FOREACH(port, &p->ports_in, node) {
1043 struct port_in_runtime *in = &p->in[port->id];
1045 in->pkt_rx = port->type->ops.pkt_rx;
1046 in->obj = port->obj;
1053 port_in_build_free(struct rte_swx_pipeline *p)
1060 port_in_free(struct rte_swx_pipeline *p)
1062 port_in_build_free(p);
1066 struct port_in *port;
1068 port = TAILQ_FIRST(&p->ports_in);
1072 TAILQ_REMOVE(&p->ports_in, port, node);
1073 port->type->ops.free(port->obj);
1077 /* Input port types. */
1079 struct port_in_type *elem;
1081 elem = TAILQ_FIRST(&p->port_in_types);
1085 TAILQ_REMOVE(&p->port_in_types, elem, node);
1093 static struct port_out_type *
1094 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1096 struct port_out_type *elem;
1101 TAILQ_FOREACH(elem, &p->port_out_types, node)
1102 if (!strcmp(elem->name, name))
1109 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1111 struct rte_swx_port_out_ops *ops)
1113 struct port_out_type *elem;
1116 CHECK_NAME(name, EINVAL);
1118 CHECK(ops->create, EINVAL);
1119 CHECK(ops->free, EINVAL);
1120 CHECK(ops->pkt_tx, EINVAL);
1121 CHECK(ops->stats_read, EINVAL);
1123 CHECK(!port_out_type_find(p, name), EEXIST);
1125 /* Node allocation. */
1126 elem = calloc(1, sizeof(struct port_out_type));
1127 CHECK(elem, ENOMEM);
1129 /* Node initialization. */
1130 strcpy(elem->name, name);
1131 memcpy(&elem->ops, ops, sizeof(*ops));
1133 /* Node add to tailq. */
1134 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1139 static struct port_out *
1140 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1142 struct port_out *port;
1144 TAILQ_FOREACH(port, &p->ports_out, node)
1145 if (port->id == port_id)
1152 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1154 const char *port_type_name,
1157 struct port_out_type *type = NULL;
1158 struct port_out *port = NULL;
1163 CHECK(!port_out_find(p, port_id), EINVAL);
1165 CHECK_NAME(port_type_name, EINVAL);
1166 type = port_out_type_find(p, port_type_name);
1167 CHECK(type, EINVAL);
1169 obj = type->ops.create(args);
1172 /* Node allocation. */
1173 port = calloc(1, sizeof(struct port_out));
1174 CHECK(port, ENOMEM);
1176 /* Node initialization. */
1181 /* Node add to tailq. */
1182 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1183 if (p->n_ports_out < port_id + 1)
1184 p->n_ports_out = port_id + 1;
1190 port_out_build(struct rte_swx_pipeline *p)
1192 struct port_out *port;
1195 CHECK(p->n_ports_out, EINVAL);
1197 for (i = 0; i < p->n_ports_out; i++)
1198 CHECK(port_out_find(p, i), EINVAL);
1200 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1201 CHECK(p->out, ENOMEM);
1203 TAILQ_FOREACH(port, &p->ports_out, node) {
1204 struct port_out_runtime *out = &p->out[port->id];
1206 out->pkt_tx = port->type->ops.pkt_tx;
1207 out->flush = port->type->ops.flush;
1208 out->obj = port->obj;
1215 port_out_build_free(struct rte_swx_pipeline *p)
1222 port_out_free(struct rte_swx_pipeline *p)
1224 port_out_build_free(p);
1228 struct port_out *port;
1230 port = TAILQ_FIRST(&p->ports_out);
1234 TAILQ_REMOVE(&p->ports_out, port, node);
1235 port->type->ops.free(port->obj);
1239 /* Output port types. */
1241 struct port_out_type *elem;
1243 elem = TAILQ_FIRST(&p->port_out_types);
1247 TAILQ_REMOVE(&p->port_out_types, elem, node);
1255 static struct extern_type *
1256 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1258 struct extern_type *elem;
1260 TAILQ_FOREACH(elem, &p->extern_types, node)
1261 if (strcmp(elem->name, name) == 0)
1267 static struct extern_type_member_func *
1268 extern_type_member_func_find(struct extern_type *type, const char *name)
1270 struct extern_type_member_func *elem;
1272 TAILQ_FOREACH(elem, &type->funcs, node)
1273 if (strcmp(elem->name, name) == 0)
1279 static struct extern_obj *
1280 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1282 struct extern_obj *elem;
1284 TAILQ_FOREACH(elem, &p->extern_objs, node)
1285 if (strcmp(elem->name, name) == 0)
1291 static struct extern_type_member_func *
1292 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1294 struct extern_obj **obj)
1296 struct extern_obj *object;
1297 struct extern_type_member_func *func;
1298 char *object_name, *func_name;
1300 if (name[0] != 'e' || name[1] != '.')
1303 object_name = strdup(&name[2]);
1307 func_name = strchr(object_name, '.');
1316 object = extern_obj_find(p, object_name);
1322 func = extern_type_member_func_find(object->type, func_name);
1335 static struct field *
1336 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1338 struct extern_obj **object)
1340 struct extern_obj *obj;
1342 char *obj_name, *field_name;
1344 if ((name[0] != 'e') || (name[1] != '.'))
1347 obj_name = strdup(&name[2]);
1351 field_name = strchr(obj_name, '.');
1360 obj = extern_obj_find(p, obj_name);
1366 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1380 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1382 const char *mailbox_struct_type_name,
1383 rte_swx_extern_type_constructor_t constructor,
1384 rte_swx_extern_type_destructor_t destructor)
1386 struct extern_type *elem;
1387 struct struct_type *mailbox_struct_type;
1391 CHECK_NAME(name, EINVAL);
1392 CHECK(!extern_type_find(p, name), EEXIST);
1394 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1395 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1396 CHECK(mailbox_struct_type, EINVAL);
1398 CHECK(constructor, EINVAL);
1399 CHECK(destructor, EINVAL);
1401 /* Node allocation. */
1402 elem = calloc(1, sizeof(struct extern_type));
1403 CHECK(elem, ENOMEM);
1405 /* Node initialization. */
1406 strcpy(elem->name, name);
1407 elem->mailbox_struct_type = mailbox_struct_type;
1408 elem->constructor = constructor;
1409 elem->destructor = destructor;
1410 TAILQ_INIT(&elem->funcs);
1412 /* Node add to tailq. */
1413 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1419 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1420 const char *extern_type_name,
1422 rte_swx_extern_type_member_func_t member_func)
1424 struct extern_type *type;
1425 struct extern_type_member_func *type_member;
1429 CHECK(extern_type_name, EINVAL);
1430 type = extern_type_find(p, extern_type_name);
1431 CHECK(type, EINVAL);
1432 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1434 CHECK(name, EINVAL);
1435 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1437 CHECK(member_func, EINVAL);
1439 /* Node allocation. */
1440 type_member = calloc(1, sizeof(struct extern_type_member_func));
1441 CHECK(type_member, ENOMEM);
1443 /* Node initialization. */
1444 strcpy(type_member->name, name);
1445 type_member->func = member_func;
1446 type_member->id = type->n_funcs;
1448 /* Node add to tailq. */
1449 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1456 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1457 const char *extern_type_name,
1461 struct extern_type *type;
1462 struct extern_obj *obj;
1467 CHECK_NAME(extern_type_name, EINVAL);
1468 type = extern_type_find(p, extern_type_name);
1469 CHECK(type, EINVAL);
1471 CHECK_NAME(name, EINVAL);
1472 CHECK(!extern_obj_find(p, name), EEXIST);
1474 /* Node allocation. */
1475 obj = calloc(1, sizeof(struct extern_obj));
1478 /* Object construction. */
1479 obj_handle = type->constructor(args);
1485 /* Node initialization. */
1486 strcpy(obj->name, name);
1488 obj->obj = obj_handle;
1489 obj->struct_id = p->n_structs;
1490 obj->id = p->n_extern_objs;
1492 /* Node add to tailq. */
1493 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1501 extern_obj_build(struct rte_swx_pipeline *p)
1505 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1506 struct thread *t = &p->threads[i];
1507 struct extern_obj *obj;
1509 t->extern_objs = calloc(p->n_extern_objs,
1510 sizeof(struct extern_obj_runtime));
1511 CHECK(t->extern_objs, ENOMEM);
1513 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1514 struct extern_obj_runtime *r =
1515 &t->extern_objs[obj->id];
1516 struct extern_type_member_func *func;
1517 uint32_t mailbox_size =
1518 obj->type->mailbox_struct_type->n_bits / 8;
1522 r->mailbox = calloc(1, mailbox_size);
1523 CHECK(r->mailbox, ENOMEM);
1525 TAILQ_FOREACH(func, &obj->type->funcs, node)
1526 r->funcs[func->id] = func->func;
1528 t->structs[obj->struct_id] = r->mailbox;
1536 extern_obj_build_free(struct rte_swx_pipeline *p)
1540 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1541 struct thread *t = &p->threads[i];
1544 if (!t->extern_objs)
1547 for (j = 0; j < p->n_extern_objs; j++) {
1548 struct extern_obj_runtime *r = &t->extern_objs[j];
1553 free(t->extern_objs);
1554 t->extern_objs = NULL;
1559 extern_obj_free(struct rte_swx_pipeline *p)
1561 extern_obj_build_free(p);
1563 /* Extern objects. */
1565 struct extern_obj *elem;
1567 elem = TAILQ_FIRST(&p->extern_objs);
1571 TAILQ_REMOVE(&p->extern_objs, elem, node);
1573 elem->type->destructor(elem->obj);
1579 struct extern_type *elem;
1581 elem = TAILQ_FIRST(&p->extern_types);
1585 TAILQ_REMOVE(&p->extern_types, elem, node);
1588 struct extern_type_member_func *func;
1590 func = TAILQ_FIRST(&elem->funcs);
1594 TAILQ_REMOVE(&elem->funcs, func, node);
1605 static struct extern_func *
1606 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1608 struct extern_func *elem;
1610 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1611 if (strcmp(elem->name, name) == 0)
1617 static struct extern_func *
1618 extern_func_parse(struct rte_swx_pipeline *p,
1621 if (name[0] != 'f' || name[1] != '.')
1624 return extern_func_find(p, &name[2]);
1627 static struct field *
1628 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1630 struct extern_func **function)
1632 struct extern_func *func;
1634 char *func_name, *field_name;
1636 if ((name[0] != 'f') || (name[1] != '.'))
1639 func_name = strdup(&name[2]);
1643 field_name = strchr(func_name, '.');
1652 func = extern_func_find(p, func_name);
1658 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1672 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1674 const char *mailbox_struct_type_name,
1675 rte_swx_extern_func_t func)
1677 struct extern_func *f;
1678 struct struct_type *mailbox_struct_type;
1682 CHECK_NAME(name, EINVAL);
1683 CHECK(!extern_func_find(p, name), EEXIST);
1685 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1686 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1687 CHECK(mailbox_struct_type, EINVAL);
1689 CHECK(func, EINVAL);
1691 /* Node allocation. */
1692 f = calloc(1, sizeof(struct extern_func));
1693 CHECK(func, ENOMEM);
1695 /* Node initialization. */
1696 strcpy(f->name, name);
1697 f->mailbox_struct_type = mailbox_struct_type;
1699 f->struct_id = p->n_structs;
1700 f->id = p->n_extern_funcs;
1702 /* Node add to tailq. */
1703 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1704 p->n_extern_funcs++;
1711 extern_func_build(struct rte_swx_pipeline *p)
1715 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1716 struct thread *t = &p->threads[i];
1717 struct extern_func *func;
1719 /* Memory allocation. */
1720 t->extern_funcs = calloc(p->n_extern_funcs,
1721 sizeof(struct extern_func_runtime));
1722 CHECK(t->extern_funcs, ENOMEM);
1724 /* Extern function. */
1725 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1726 struct extern_func_runtime *r =
1727 &t->extern_funcs[func->id];
1728 uint32_t mailbox_size =
1729 func->mailbox_struct_type->n_bits / 8;
1731 r->func = func->func;
1733 r->mailbox = calloc(1, mailbox_size);
1734 CHECK(r->mailbox, ENOMEM);
1736 t->structs[func->struct_id] = r->mailbox;
1744 extern_func_build_free(struct rte_swx_pipeline *p)
1748 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1749 struct thread *t = &p->threads[i];
1752 if (!t->extern_funcs)
1755 for (j = 0; j < p->n_extern_funcs; j++) {
1756 struct extern_func_runtime *r = &t->extern_funcs[j];
1761 free(t->extern_funcs);
1762 t->extern_funcs = NULL;
1767 extern_func_free(struct rte_swx_pipeline *p)
1769 extern_func_build_free(p);
1772 struct extern_func *elem;
1774 elem = TAILQ_FIRST(&p->extern_funcs);
1778 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1786 static struct header *
1787 header_find(struct rte_swx_pipeline *p, const char *name)
1789 struct header *elem;
1791 TAILQ_FOREACH(elem, &p->headers, node)
1792 if (strcmp(elem->name, name) == 0)
1798 static struct header *
1799 header_parse(struct rte_swx_pipeline *p,
1802 if (name[0] != 'h' || name[1] != '.')
1805 return header_find(p, &name[2]);
1808 static struct field *
1809 header_field_parse(struct rte_swx_pipeline *p,
1811 struct header **header)
1815 char *header_name, *field_name;
1817 if ((name[0] != 'h') || (name[1] != '.'))
1820 header_name = strdup(&name[2]);
1824 field_name = strchr(header_name, '.');
1833 h = header_find(p, header_name);
1839 f = struct_type_field_find(h->st, field_name);
1853 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1855 const char *struct_type_name)
1857 struct struct_type *st;
1859 size_t n_headers_max;
1862 CHECK_NAME(name, EINVAL);
1863 CHECK_NAME(struct_type_name, EINVAL);
1865 CHECK(!header_find(p, name), EEXIST);
1867 st = struct_type_find(p, struct_type_name);
1870 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1871 CHECK(p->n_headers < n_headers_max, ENOSPC);
1873 /* Node allocation. */
1874 h = calloc(1, sizeof(struct header));
1877 /* Node initialization. */
1878 strcpy(h->name, name);
1880 h->struct_id = p->n_structs;
1881 h->id = p->n_headers;
1883 /* Node add to tailq. */
1884 TAILQ_INSERT_TAIL(&p->headers, h, node);
1892 header_build(struct rte_swx_pipeline *p)
1895 uint32_t n_bytes = 0, i;
1897 TAILQ_FOREACH(h, &p->headers, node) {
1898 n_bytes += h->st->n_bits / 8;
1901 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1902 struct thread *t = &p->threads[i];
1903 uint32_t offset = 0;
1905 t->headers = calloc(p->n_headers,
1906 sizeof(struct header_runtime));
1907 CHECK(t->headers, ENOMEM);
1909 t->headers_out = calloc(p->n_headers,
1910 sizeof(struct header_out_runtime));
1911 CHECK(t->headers_out, ENOMEM);
1913 t->header_storage = calloc(1, n_bytes);
1914 CHECK(t->header_storage, ENOMEM);
1916 t->header_out_storage = calloc(1, n_bytes);
1917 CHECK(t->header_out_storage, ENOMEM);
1919 TAILQ_FOREACH(h, &p->headers, node) {
1920 uint8_t *header_storage;
1922 header_storage = &t->header_storage[offset];
1923 offset += h->st->n_bits / 8;
1925 t->headers[h->id].ptr0 = header_storage;
1926 t->structs[h->struct_id] = header_storage;
1934 header_build_free(struct rte_swx_pipeline *p)
1938 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1939 struct thread *t = &p->threads[i];
1941 free(t->headers_out);
1942 t->headers_out = NULL;
1947 free(t->header_out_storage);
1948 t->header_out_storage = NULL;
1950 free(t->header_storage);
1951 t->header_storage = NULL;
1956 header_free(struct rte_swx_pipeline *p)
1958 header_build_free(p);
1961 struct header *elem;
1963 elem = TAILQ_FIRST(&p->headers);
1967 TAILQ_REMOVE(&p->headers, elem, node);
1975 static struct field *
1976 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1978 if (!p->metadata_st)
1981 if (name[0] != 'm' || name[1] != '.')
1984 return struct_type_field_find(p->metadata_st, &name[2]);
1988 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1989 const char *struct_type_name)
1991 struct struct_type *st = NULL;
1995 CHECK_NAME(struct_type_name, EINVAL);
1996 st = struct_type_find(p, struct_type_name);
1998 CHECK(!p->metadata_st, EINVAL);
2000 p->metadata_st = st;
2001 p->metadata_struct_id = p->n_structs;
2009 metadata_build(struct rte_swx_pipeline *p)
2011 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2014 /* Thread-level initialization. */
2015 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2016 struct thread *t = &p->threads[i];
2019 metadata = calloc(1, n_bytes);
2020 CHECK(metadata, ENOMEM);
2022 t->metadata = metadata;
2023 t->structs[p->metadata_struct_id] = metadata;
2030 metadata_build_free(struct rte_swx_pipeline *p)
2034 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2035 struct thread *t = &p->threads[i];
2043 metadata_free(struct rte_swx_pipeline *p)
2045 metadata_build_free(p);
2051 static struct field *
2052 action_field_parse(struct action *action, const char *name);
2054 static struct field *
2055 struct_field_parse(struct rte_swx_pipeline *p,
2056 struct action *action,
2058 uint32_t *struct_id)
2065 struct header *header;
2067 f = header_field_parse(p, name, &header);
2071 *struct_id = header->struct_id;
2077 f = metadata_field_parse(p, name);
2081 *struct_id = p->metadata_struct_id;
2090 f = action_field_parse(action, name);
2100 struct extern_obj *obj;
2102 f = extern_obj_mailbox_field_parse(p, name, &obj);
2106 *struct_id = obj->struct_id;
2112 struct extern_func *func;
2114 f = extern_func_mailbox_field_parse(p, name, &func);
2118 *struct_id = func->struct_id;
2128 pipeline_port_inc(struct rte_swx_pipeline *p)
2130 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2134 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2136 t->ip = p->instructions;
2140 thread_ip_action_call(struct rte_swx_pipeline *p,
2145 t->ip = p->action_instructions[action_id];
2149 thread_ip_inc(struct rte_swx_pipeline *p);
2152 thread_ip_inc(struct rte_swx_pipeline *p)
2154 struct thread *t = &p->threads[p->thread_id];
2160 thread_ip_inc_cond(struct thread *t, int cond)
2166 thread_yield(struct rte_swx_pipeline *p)
2168 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2172 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2174 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2181 instr_rx_translate(struct rte_swx_pipeline *p,
2182 struct action *action,
2185 struct instruction *instr,
2186 struct instruction_data *data __rte_unused)
2190 CHECK(!action, EINVAL);
2191 CHECK(n_tokens == 2, EINVAL);
2193 f = metadata_field_parse(p, tokens[1]);
2196 instr->type = INSTR_RX;
2197 instr->io.io.offset = f->offset / 8;
2198 instr->io.io.n_bits = f->n_bits;
2203 instr_rx_exec(struct rte_swx_pipeline *p);
2206 instr_rx_exec(struct rte_swx_pipeline *p)
2208 struct thread *t = &p->threads[p->thread_id];
2209 struct instruction *ip = t->ip;
2210 struct port_in_runtime *port = &p->in[p->port_id];
2211 struct rte_swx_pkt *pkt = &t->pkt;
2215 pkt_received = port->pkt_rx(port->obj, pkt);
2216 t->ptr = &pkt->pkt[pkt->offset];
2217 rte_prefetch0(t->ptr);
2219 TRACE("[Thread %2u] rx %s from port %u\n",
2221 pkt_received ? "1 pkt" : "0 pkts",
2225 t->valid_headers = 0;
2226 t->n_headers_out = 0;
2229 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2232 t->table_state = p->table_state;
2235 pipeline_port_inc(p);
2236 thread_ip_inc_cond(t, pkt_received);
2244 instr_tx_translate(struct rte_swx_pipeline *p,
2245 struct action *action __rte_unused,
2248 struct instruction *instr,
2249 struct instruction_data *data __rte_unused)
2253 CHECK(n_tokens == 2, EINVAL);
2255 f = metadata_field_parse(p, tokens[1]);
2258 instr->type = INSTR_TX;
2259 instr->io.io.offset = f->offset / 8;
2260 instr->io.io.n_bits = f->n_bits;
2265 emit_handler(struct thread *t)
2267 struct header_out_runtime *h0 = &t->headers_out[0];
2268 struct header_out_runtime *h1 = &t->headers_out[1];
2269 uint32_t offset = 0, i;
2271 /* No header change or header decapsulation. */
2272 if ((t->n_headers_out == 1) &&
2273 (h0->ptr + h0->n_bytes == t->ptr)) {
2274 TRACE("Emit handler: no header change or header decap.\n");
2276 t->pkt.offset -= h0->n_bytes;
2277 t->pkt.length += h0->n_bytes;
2282 /* Header encapsulation (optionally, with prior header decasulation). */
2283 if ((t->n_headers_out == 2) &&
2284 (h1->ptr + h1->n_bytes == t->ptr) &&
2285 (h0->ptr == h0->ptr0)) {
2288 TRACE("Emit handler: header encapsulation.\n");
2290 offset = h0->n_bytes + h1->n_bytes;
2291 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2292 t->pkt.offset -= offset;
2293 t->pkt.length += offset;
2298 /* Header insertion. */
2301 /* Header extraction. */
2304 /* For any other case. */
2305 TRACE("Emit handler: complex case.\n");
2307 for (i = 0; i < t->n_headers_out; i++) {
2308 struct header_out_runtime *h = &t->headers_out[i];
2310 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2311 offset += h->n_bytes;
2315 memcpy(t->ptr - offset, t->header_out_storage, offset);
2316 t->pkt.offset -= offset;
2317 t->pkt.length += offset;
2322 instr_tx_exec(struct rte_swx_pipeline *p);
2325 instr_tx_exec(struct rte_swx_pipeline *p)
2327 struct thread *t = &p->threads[p->thread_id];
2328 struct instruction *ip = t->ip;
2329 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2330 struct port_out_runtime *port = &p->out[port_id];
2331 struct rte_swx_pkt *pkt = &t->pkt;
2333 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2341 port->pkt_tx(port->obj, pkt);
2344 thread_ip_reset(p, t);
2352 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2353 struct action *action,
2356 struct instruction *instr,
2357 struct instruction_data *data __rte_unused)
2361 CHECK(!action, EINVAL);
2362 CHECK(n_tokens == 2, EINVAL);
2364 h = header_parse(p, tokens[1]);
2367 instr->type = INSTR_HDR_EXTRACT;
2368 instr->io.hdr.header_id[0] = h->id;
2369 instr->io.hdr.struct_id[0] = h->struct_id;
2370 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2375 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2378 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2380 struct thread *t = &p->threads[p->thread_id];
2381 struct instruction *ip = t->ip;
2382 uint64_t valid_headers = t->valid_headers;
2383 uint8_t *ptr = t->ptr;
2384 uint32_t offset = t->pkt.offset;
2385 uint32_t length = t->pkt.length;
2388 for (i = 0; i < n_extract; i++) {
2389 uint32_t header_id = ip->io.hdr.header_id[i];
2390 uint32_t struct_id = ip->io.hdr.struct_id[i];
2391 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2393 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2399 t->structs[struct_id] = ptr;
2400 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2409 t->valid_headers = valid_headers;
2412 t->pkt.offset = offset;
2413 t->pkt.length = length;
2418 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2420 __instr_hdr_extract_exec(p, 1);
2427 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2429 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2432 __instr_hdr_extract_exec(p, 2);
2439 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2441 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2444 __instr_hdr_extract_exec(p, 3);
2451 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2453 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2456 __instr_hdr_extract_exec(p, 4);
2463 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2465 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2468 __instr_hdr_extract_exec(p, 5);
2475 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2477 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2480 __instr_hdr_extract_exec(p, 6);
2487 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2489 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2492 __instr_hdr_extract_exec(p, 7);
2499 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2501 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2504 __instr_hdr_extract_exec(p, 8);
2514 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2515 struct action *action __rte_unused,
2518 struct instruction *instr,
2519 struct instruction_data *data __rte_unused)
2523 CHECK(n_tokens == 2, EINVAL);
2525 h = header_parse(p, tokens[1]);
2528 instr->type = INSTR_HDR_EMIT;
2529 instr->io.hdr.header_id[0] = h->id;
2530 instr->io.hdr.struct_id[0] = h->struct_id;
2531 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2536 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2539 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2541 struct thread *t = &p->threads[p->thread_id];
2542 struct instruction *ip = t->ip;
2543 uint32_t n_headers_out = t->n_headers_out;
2544 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2545 uint8_t *ho_ptr = NULL;
2546 uint32_t ho_nbytes = 0, i;
2548 for (i = 0; i < n_emit; i++) {
2549 uint32_t header_id = ip->io.hdr.header_id[i];
2550 uint32_t struct_id = ip->io.hdr.struct_id[i];
2551 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2553 struct header_runtime *hi = &t->headers[header_id];
2554 uint8_t *hi_ptr = t->structs[struct_id];
2556 TRACE("[Thread %2u]: emit header %u\n",
2562 if (!t->n_headers_out) {
2563 ho = &t->headers_out[0];
2565 ho->ptr0 = hi->ptr0;
2569 ho_nbytes = n_bytes;
2576 ho_nbytes = ho->n_bytes;
2580 if (ho_ptr + ho_nbytes == hi_ptr) {
2581 ho_nbytes += n_bytes;
2583 ho->n_bytes = ho_nbytes;
2586 ho->ptr0 = hi->ptr0;
2590 ho_nbytes = n_bytes;
2596 ho->n_bytes = ho_nbytes;
2597 t->n_headers_out = n_headers_out;
2601 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2603 __instr_hdr_emit_exec(p, 1);
2610 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2612 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2615 __instr_hdr_emit_exec(p, 1);
2620 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2622 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2625 __instr_hdr_emit_exec(p, 2);
2630 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2632 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2635 __instr_hdr_emit_exec(p, 3);
2640 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2642 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2645 __instr_hdr_emit_exec(p, 4);
2650 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2652 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2655 __instr_hdr_emit_exec(p, 5);
2660 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2662 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2665 __instr_hdr_emit_exec(p, 6);
2670 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2672 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2675 __instr_hdr_emit_exec(p, 7);
2680 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2682 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2685 __instr_hdr_emit_exec(p, 8);
2693 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2694 struct action *action __rte_unused,
2697 struct instruction *instr,
2698 struct instruction_data *data __rte_unused)
2702 CHECK(n_tokens == 2, EINVAL);
2704 h = header_parse(p, tokens[1]);
2707 instr->type = INSTR_HDR_VALIDATE;
2708 instr->valid.header_id = h->id;
2713 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2715 struct thread *t = &p->threads[p->thread_id];
2716 struct instruction *ip = t->ip;
2717 uint32_t header_id = ip->valid.header_id;
2719 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2722 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2732 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2733 struct action *action __rte_unused,
2736 struct instruction *instr,
2737 struct instruction_data *data __rte_unused)
2741 CHECK(n_tokens == 2, EINVAL);
2743 h = header_parse(p, tokens[1]);
2746 instr->type = INSTR_HDR_INVALIDATE;
2747 instr->valid.header_id = h->id;
2752 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2754 struct thread *t = &p->threads[p->thread_id];
2755 struct instruction *ip = t->ip;
2756 uint32_t header_id = ip->valid.header_id;
2758 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2761 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2770 static struct table *
2771 table_find(struct rte_swx_pipeline *p, const char *name);
2774 instr_table_translate(struct rte_swx_pipeline *p,
2775 struct action *action,
2778 struct instruction *instr,
2779 struct instruction_data *data __rte_unused)
2783 CHECK(!action, EINVAL);
2784 CHECK(n_tokens == 2, EINVAL);
2786 t = table_find(p, tokens[1]);
2789 instr->type = INSTR_TABLE;
2790 instr->table.table_id = t->id;
2795 instr_table_exec(struct rte_swx_pipeline *p)
2797 struct thread *t = &p->threads[p->thread_id];
2798 struct instruction *ip = t->ip;
2799 uint32_t table_id = ip->table.table_id;
2800 struct rte_swx_table_state *ts = &t->table_state[table_id];
2801 struct table_runtime *table = &t->tables[table_id];
2803 uint8_t *action_data;
2807 done = table->func(ts->obj,
2815 TRACE("[Thread %2u] table %u (not finalized)\n",
2823 action_id = hit ? action_id : ts->default_action_id;
2824 action_data = hit ? action_data : ts->default_action_data;
2826 TRACE("[Thread %2u] table %u (%s, action %u)\n",
2829 hit ? "hit" : "miss",
2830 (uint32_t)action_id);
2832 t->action_id = action_id;
2833 t->structs[0] = action_data;
2837 thread_ip_action_call(p, t, action_id);
2844 instr_extern_translate(struct rte_swx_pipeline *p,
2845 struct action *action __rte_unused,
2848 struct instruction *instr,
2849 struct instruction_data *data __rte_unused)
2851 char *token = tokens[1];
2853 CHECK(n_tokens == 2, EINVAL);
2855 if (token[0] == 'e') {
2856 struct extern_obj *obj;
2857 struct extern_type_member_func *func;
2859 func = extern_obj_member_func_parse(p, token, &obj);
2860 CHECK(func, EINVAL);
2862 instr->type = INSTR_EXTERN_OBJ;
2863 instr->ext_obj.ext_obj_id = obj->id;
2864 instr->ext_obj.func_id = func->id;
2869 if (token[0] == 'f') {
2870 struct extern_func *func;
2872 func = extern_func_parse(p, token);
2873 CHECK(func, EINVAL);
2875 instr->type = INSTR_EXTERN_FUNC;
2876 instr->ext_func.ext_func_id = func->id;
2885 instr_extern_obj_exec(struct rte_swx_pipeline *p)
2887 struct thread *t = &p->threads[p->thread_id];
2888 struct instruction *ip = t->ip;
2889 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2890 uint32_t func_id = ip->ext_obj.func_id;
2891 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2892 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
2894 TRACE("[Thread %2u] extern obj %u member func %u\n",
2899 /* Extern object member function execute. */
2900 uint32_t done = func(obj->obj, obj->mailbox);
2903 thread_ip_inc_cond(t, done);
2904 thread_yield_cond(p, done ^ 1);
2908 instr_extern_func_exec(struct rte_swx_pipeline *p)
2910 struct thread *t = &p->threads[p->thread_id];
2911 struct instruction *ip = t->ip;
2912 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2913 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2914 rte_swx_extern_func_t func = ext_func->func;
2916 TRACE("[Thread %2u] extern func %u\n",
2920 /* Extern function execute. */
2921 uint32_t done = func(ext_func->mailbox);
2924 thread_ip_inc_cond(t, done);
2925 thread_yield_cond(p, done ^ 1);
2932 instr_mov_translate(struct rte_swx_pipeline *p,
2933 struct action *action,
2936 struct instruction *instr,
2937 struct instruction_data *data __rte_unused)
2939 char *dst = tokens[1], *src = tokens[2];
2940 struct field *fdst, *fsrc;
2941 uint32_t dst_struct_id, src_struct_id, src_val;
2943 CHECK(n_tokens == 3, EINVAL);
2945 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2946 CHECK(fdst, EINVAL);
2949 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2951 instr->type = INSTR_MOV;
2952 if ((dst[0] == 'h' && src[0] != 'h') ||
2953 (dst[0] != 'h' && src[0] == 'h'))
2954 instr->type = INSTR_MOV_S;
2956 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2957 instr->mov.dst.n_bits = fdst->n_bits;
2958 instr->mov.dst.offset = fdst->offset / 8;
2959 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2960 instr->mov.src.n_bits = fsrc->n_bits;
2961 instr->mov.src.offset = fsrc->offset / 8;
2966 src_val = strtoul(src, &src, 0);
2967 CHECK(!src[0], EINVAL);
2970 src_val = htonl(src_val);
2972 instr->type = INSTR_MOV_I;
2973 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2974 instr->mov.dst.n_bits = fdst->n_bits;
2975 instr->mov.dst.offset = fdst->offset / 8;
2976 instr->mov.src_val = (uint32_t)src_val;
2981 instr_mov_exec(struct rte_swx_pipeline *p)
2983 struct thread *t = &p->threads[p->thread_id];
2984 struct instruction *ip = t->ip;
2986 TRACE("[Thread %2u] mov\n",
2996 instr_mov_s_exec(struct rte_swx_pipeline *p)
2998 struct thread *t = &p->threads[p->thread_id];
2999 struct instruction *ip = t->ip;
3001 TRACE("[Thread %2u] mov (s)\n",
3011 instr_mov_i_exec(struct rte_swx_pipeline *p)
3013 struct thread *t = &p->threads[p->thread_id];
3014 struct instruction *ip = t->ip;
3016 TRACE("[Thread %2u] mov m.f %x\n",
3030 instr_dma_translate(struct rte_swx_pipeline *p,
3031 struct action *action,
3034 struct instruction *instr,
3035 struct instruction_data *data __rte_unused)
3037 char *dst = tokens[1];
3038 char *src = tokens[2];
3042 CHECK(action, EINVAL);
3043 CHECK(n_tokens == 3, EINVAL);
3045 h = header_parse(p, dst);
3048 tf = action_field_parse(action, src);
3051 instr->type = INSTR_DMA_HT;
3052 instr->dma.dst.header_id[0] = h->id;
3053 instr->dma.dst.struct_id[0] = h->struct_id;
3054 instr->dma.n_bytes[0] = h->st->n_bits / 8;
3055 instr->dma.src.offset[0] = tf->offset / 8;
3061 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3064 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3066 struct thread *t = &p->threads[p->thread_id];
3067 struct instruction *ip = t->ip;
3068 uint8_t *action_data = t->structs[0];
3069 uint64_t valid_headers = t->valid_headers;
3072 for (i = 0; i < n_dma; i++) {
3073 uint32_t header_id = ip->dma.dst.header_id[i];
3074 uint32_t struct_id = ip->dma.dst.struct_id[i];
3075 uint32_t offset = ip->dma.src.offset[i];
3076 uint32_t n_bytes = ip->dma.n_bytes[i];
3078 struct header_runtime *h = &t->headers[header_id];
3079 uint8_t *h_ptr0 = h->ptr0;
3080 uint8_t *h_ptr = t->structs[struct_id];
3082 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3084 void *src = &action_data[offset];
3086 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3089 memcpy(dst, src, n_bytes);
3090 t->structs[struct_id] = dst;
3091 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3094 t->valid_headers = valid_headers;
3098 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3100 __instr_dma_ht_exec(p, 1);
3107 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3109 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3112 __instr_dma_ht_exec(p, 2);
3119 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3121 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3124 __instr_dma_ht_exec(p, 3);
3131 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3133 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3136 __instr_dma_ht_exec(p, 4);
3143 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3145 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3148 __instr_dma_ht_exec(p, 5);
3155 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3157 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3160 __instr_dma_ht_exec(p, 6);
3167 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3169 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3172 __instr_dma_ht_exec(p, 7);
3179 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3181 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3184 __instr_dma_ht_exec(p, 8);
3194 instr_alu_add_translate(struct rte_swx_pipeline *p,
3195 struct action *action,
3198 struct instruction *instr,
3199 struct instruction_data *data __rte_unused)
3201 char *dst = tokens[1], *src = tokens[2];
3202 struct field *fdst, *fsrc;
3203 uint32_t dst_struct_id, src_struct_id, src_val;
3205 CHECK(n_tokens == 3, EINVAL);
3207 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3208 CHECK(fdst, EINVAL);
3210 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3211 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3213 instr->type = INSTR_ALU_ADD;
3214 if (dst[0] == 'h' && src[0] == 'm')
3215 instr->type = INSTR_ALU_ADD_HM;
3216 if (dst[0] == 'm' && src[0] == 'h')
3217 instr->type = INSTR_ALU_ADD_MH;
3218 if (dst[0] == 'h' && src[0] == 'h')
3219 instr->type = INSTR_ALU_ADD_HH;
3221 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3222 instr->alu.dst.n_bits = fdst->n_bits;
3223 instr->alu.dst.offset = fdst->offset / 8;
3224 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3225 instr->alu.src.n_bits = fsrc->n_bits;
3226 instr->alu.src.offset = fsrc->offset / 8;
3230 /* ADD_MI, ADD_HI. */
3231 src_val = strtoul(src, &src, 0);
3232 CHECK(!src[0], EINVAL);
3234 instr->type = INSTR_ALU_ADD_MI;
3236 instr->type = INSTR_ALU_ADD_HI;
3238 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3239 instr->alu.dst.n_bits = fdst->n_bits;
3240 instr->alu.dst.offset = fdst->offset / 8;
3241 instr->alu.src_val = (uint32_t)src_val;
3246 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3247 struct action *action,
3250 struct instruction *instr,
3251 struct instruction_data *data __rte_unused)
3253 char *dst = tokens[1], *src = tokens[2];
3254 struct field *fdst, *fsrc;
3255 uint32_t dst_struct_id, src_struct_id, src_val;
3257 CHECK(n_tokens == 3, EINVAL);
3259 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3260 CHECK(fdst, EINVAL);
3262 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3263 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3265 instr->type = INSTR_ALU_SUB;
3266 if (dst[0] == 'h' && src[0] == 'm')
3267 instr->type = INSTR_ALU_SUB_HM;
3268 if (dst[0] == 'm' && src[0] == 'h')
3269 instr->type = INSTR_ALU_SUB_MH;
3270 if (dst[0] == 'h' && src[0] == 'h')
3271 instr->type = INSTR_ALU_SUB_HH;
3273 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3274 instr->alu.dst.n_bits = fdst->n_bits;
3275 instr->alu.dst.offset = fdst->offset / 8;
3276 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3277 instr->alu.src.n_bits = fsrc->n_bits;
3278 instr->alu.src.offset = fsrc->offset / 8;
3282 /* SUB_MI, SUB_HI. */
3283 src_val = strtoul(src, &src, 0);
3284 CHECK(!src[0], EINVAL);
3286 instr->type = INSTR_ALU_SUB_MI;
3288 instr->type = INSTR_ALU_SUB_HI;
3290 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3291 instr->alu.dst.n_bits = fdst->n_bits;
3292 instr->alu.dst.offset = fdst->offset / 8;
3293 instr->alu.src_val = (uint32_t)src_val;
3298 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3299 struct action *action __rte_unused,
3302 struct instruction *instr,
3303 struct instruction_data *data __rte_unused)
3305 char *dst = tokens[1], *src = tokens[2];
3306 struct header *hdst, *hsrc;
3307 struct field *fdst, *fsrc;
3309 CHECK(n_tokens == 3, EINVAL);
3311 fdst = header_field_parse(p, dst, &hdst);
3312 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3315 fsrc = header_field_parse(p, src, &hsrc);
3317 instr->type = INSTR_ALU_CKADD_FIELD;
3318 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3319 instr->alu.dst.n_bits = fdst->n_bits;
3320 instr->alu.dst.offset = fdst->offset / 8;
3321 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3322 instr->alu.src.n_bits = fsrc->n_bits;
3323 instr->alu.src.offset = fsrc->offset / 8;
3327 /* CKADD_STRUCT, CKADD_STRUCT20. */
3328 hsrc = header_parse(p, src);
3329 CHECK(hsrc, EINVAL);
3331 instr->type = INSTR_ALU_CKADD_STRUCT;
3332 if ((hsrc->st->n_bits / 8) == 20)
3333 instr->type = INSTR_ALU_CKADD_STRUCT20;
3335 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3336 instr->alu.dst.n_bits = fdst->n_bits;
3337 instr->alu.dst.offset = fdst->offset / 8;
3338 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3339 instr->alu.src.n_bits = hsrc->st->n_bits;
3340 instr->alu.src.offset = 0; /* Unused. */
3345 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3346 struct action *action __rte_unused,
3349 struct instruction *instr,
3350 struct instruction_data *data __rte_unused)
3352 char *dst = tokens[1], *src = tokens[2];
3353 struct header *hdst, *hsrc;
3354 struct field *fdst, *fsrc;
3356 CHECK(n_tokens == 3, EINVAL);
3358 fdst = header_field_parse(p, dst, &hdst);
3359 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3361 fsrc = header_field_parse(p, src, &hsrc);
3362 CHECK(fsrc, EINVAL);
3364 instr->type = INSTR_ALU_CKSUB_FIELD;
3365 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3366 instr->alu.dst.n_bits = fdst->n_bits;
3367 instr->alu.dst.offset = fdst->offset / 8;
3368 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3369 instr->alu.src.n_bits = fsrc->n_bits;
3370 instr->alu.src.offset = fsrc->offset / 8;
3375 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3376 struct action *action,
3379 struct instruction *instr,
3380 struct instruction_data *data __rte_unused)
3382 char *dst = tokens[1], *src = tokens[2];
3383 struct field *fdst, *fsrc;
3384 uint32_t dst_struct_id, src_struct_id, src_val;
3386 CHECK(n_tokens == 3, EINVAL);
3388 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3389 CHECK(fdst, EINVAL);
3391 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3392 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3394 instr->type = INSTR_ALU_SHL;
3395 if (dst[0] == 'h' && src[0] == 'm')
3396 instr->type = INSTR_ALU_SHL_HM;
3397 if (dst[0] == 'm' && src[0] == 'h')
3398 instr->type = INSTR_ALU_SHL_MH;
3399 if (dst[0] == 'h' && src[0] == 'h')
3400 instr->type = INSTR_ALU_SHL_HH;
3402 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3403 instr->alu.dst.n_bits = fdst->n_bits;
3404 instr->alu.dst.offset = fdst->offset / 8;
3405 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3406 instr->alu.src.n_bits = fsrc->n_bits;
3407 instr->alu.src.offset = fsrc->offset / 8;
3411 /* SHL_MI, SHL_HI. */
3412 src_val = strtoul(src, &src, 0);
3413 CHECK(!src[0], EINVAL);
3415 instr->type = INSTR_ALU_SHL_MI;
3417 instr->type = INSTR_ALU_SHL_HI;
3419 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3420 instr->alu.dst.n_bits = fdst->n_bits;
3421 instr->alu.dst.offset = fdst->offset / 8;
3422 instr->alu.src_val = (uint32_t)src_val;
3427 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3428 struct action *action,
3431 struct instruction *instr,
3432 struct instruction_data *data __rte_unused)
3434 char *dst = tokens[1], *src = tokens[2];
3435 struct field *fdst, *fsrc;
3436 uint32_t dst_struct_id, src_struct_id, src_val;
3438 CHECK(n_tokens == 3, EINVAL);
3440 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3441 CHECK(fdst, EINVAL);
3443 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3444 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3446 instr->type = INSTR_ALU_SHR;
3447 if (dst[0] == 'h' && src[0] == 'm')
3448 instr->type = INSTR_ALU_SHR_HM;
3449 if (dst[0] == 'm' && src[0] == 'h')
3450 instr->type = INSTR_ALU_SHR_MH;
3451 if (dst[0] == 'h' && src[0] == 'h')
3452 instr->type = INSTR_ALU_SHR_HH;
3454 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3455 instr->alu.dst.n_bits = fdst->n_bits;
3456 instr->alu.dst.offset = fdst->offset / 8;
3457 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3458 instr->alu.src.n_bits = fsrc->n_bits;
3459 instr->alu.src.offset = fsrc->offset / 8;
3463 /* SHR_MI, SHR_HI. */
3464 src_val = strtoul(src, &src, 0);
3465 CHECK(!src[0], EINVAL);
3467 instr->type = INSTR_ALU_SHR_MI;
3469 instr->type = INSTR_ALU_SHR_HI;
3471 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3472 instr->alu.dst.n_bits = fdst->n_bits;
3473 instr->alu.dst.offset = fdst->offset / 8;
3474 instr->alu.src_val = (uint32_t)src_val;
3479 instr_alu_and_translate(struct rte_swx_pipeline *p,
3480 struct action *action,
3483 struct instruction *instr,
3484 struct instruction_data *data __rte_unused)
3486 char *dst = tokens[1], *src = tokens[2];
3487 struct field *fdst, *fsrc;
3488 uint32_t dst_struct_id, src_struct_id, src_val;
3490 CHECK(n_tokens == 3, EINVAL);
3492 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3493 CHECK(fdst, EINVAL);
3496 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3498 instr->type = INSTR_ALU_AND;
3499 if ((dst[0] == 'h' && src[0] != 'h') ||
3500 (dst[0] != 'h' && src[0] == 'h'))
3501 instr->type = INSTR_ALU_AND_S;
3503 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3504 instr->alu.dst.n_bits = fdst->n_bits;
3505 instr->alu.dst.offset = fdst->offset / 8;
3506 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3507 instr->alu.src.n_bits = fsrc->n_bits;
3508 instr->alu.src.offset = fsrc->offset / 8;
3513 src_val = strtoul(src, &src, 0);
3514 CHECK(!src[0], EINVAL);
3517 src_val = htonl(src_val);
3519 instr->type = INSTR_ALU_AND_I;
3520 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3521 instr->alu.dst.n_bits = fdst->n_bits;
3522 instr->alu.dst.offset = fdst->offset / 8;
3523 instr->alu.src_val = (uint32_t)src_val;
3528 instr_alu_or_translate(struct rte_swx_pipeline *p,
3529 struct action *action,
3532 struct instruction *instr,
3533 struct instruction_data *data __rte_unused)
3535 char *dst = tokens[1], *src = tokens[2];
3536 struct field *fdst, *fsrc;
3537 uint32_t dst_struct_id, src_struct_id, src_val;
3539 CHECK(n_tokens == 3, EINVAL);
3541 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3542 CHECK(fdst, EINVAL);
3545 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3547 instr->type = INSTR_ALU_OR;
3548 if ((dst[0] == 'h' && src[0] != 'h') ||
3549 (dst[0] != 'h' && src[0] == 'h'))
3550 instr->type = INSTR_ALU_OR_S;
3552 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3553 instr->alu.dst.n_bits = fdst->n_bits;
3554 instr->alu.dst.offset = fdst->offset / 8;
3555 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3556 instr->alu.src.n_bits = fsrc->n_bits;
3557 instr->alu.src.offset = fsrc->offset / 8;
3562 src_val = strtoul(src, &src, 0);
3563 CHECK(!src[0], EINVAL);
3566 src_val = htonl(src_val);
3568 instr->type = INSTR_ALU_OR_I;
3569 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3570 instr->alu.dst.n_bits = fdst->n_bits;
3571 instr->alu.dst.offset = fdst->offset / 8;
3572 instr->alu.src_val = (uint32_t)src_val;
3577 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3578 struct action *action,
3581 struct instruction *instr,
3582 struct instruction_data *data __rte_unused)
3584 char *dst = tokens[1], *src = tokens[2];
3585 struct field *fdst, *fsrc;
3586 uint32_t dst_struct_id, src_struct_id, src_val;
3588 CHECK(n_tokens == 3, EINVAL);
3590 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3591 CHECK(fdst, EINVAL);
3594 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3596 instr->type = INSTR_ALU_XOR;
3597 if ((dst[0] == 'h' && src[0] != 'h') ||
3598 (dst[0] != 'h' && src[0] == 'h'))
3599 instr->type = INSTR_ALU_XOR_S;
3601 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3602 instr->alu.dst.n_bits = fdst->n_bits;
3603 instr->alu.dst.offset = fdst->offset / 8;
3604 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3605 instr->alu.src.n_bits = fsrc->n_bits;
3606 instr->alu.src.offset = fsrc->offset / 8;
3611 src_val = strtoul(src, &src, 0);
3612 CHECK(!src[0], EINVAL);
3615 src_val = htonl(src_val);
3617 instr->type = INSTR_ALU_XOR_I;
3618 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3619 instr->alu.dst.n_bits = fdst->n_bits;
3620 instr->alu.dst.offset = fdst->offset / 8;
3621 instr->alu.src_val = (uint32_t)src_val;
3626 instr_alu_add_exec(struct rte_swx_pipeline *p)
3628 struct thread *t = &p->threads[p->thread_id];
3629 struct instruction *ip = t->ip;
3631 TRACE("[Thread %2u] add\n", p->thread_id);
3641 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3643 struct thread *t = &p->threads[p->thread_id];
3644 struct instruction *ip = t->ip;
3646 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3656 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3658 struct thread *t = &p->threads[p->thread_id];
3659 struct instruction *ip = t->ip;
3661 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3671 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3673 struct thread *t = &p->threads[p->thread_id];
3674 struct instruction *ip = t->ip;
3676 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3686 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3688 struct thread *t = &p->threads[p->thread_id];
3689 struct instruction *ip = t->ip;
3691 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3701 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3703 struct thread *t = &p->threads[p->thread_id];
3704 struct instruction *ip = t->ip;
3706 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3716 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3718 struct thread *t = &p->threads[p->thread_id];
3719 struct instruction *ip = t->ip;
3721 TRACE("[Thread %2u] sub\n", p->thread_id);
3731 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3733 struct thread *t = &p->threads[p->thread_id];
3734 struct instruction *ip = t->ip;
3736 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3746 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3748 struct thread *t = &p->threads[p->thread_id];
3749 struct instruction *ip = t->ip;
3751 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3761 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3763 struct thread *t = &p->threads[p->thread_id];
3764 struct instruction *ip = t->ip;
3766 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3776 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3778 struct thread *t = &p->threads[p->thread_id];
3779 struct instruction *ip = t->ip;
3781 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3791 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3793 struct thread *t = &p->threads[p->thread_id];
3794 struct instruction *ip = t->ip;
3796 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3806 instr_alu_shl_exec(struct rte_swx_pipeline *p)
3808 struct thread *t = &p->threads[p->thread_id];
3809 struct instruction *ip = t->ip;
3811 TRACE("[Thread %2u] shl\n", p->thread_id);
3821 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
3823 struct thread *t = &p->threads[p->thread_id];
3824 struct instruction *ip = t->ip;
3826 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
3836 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
3838 struct thread *t = &p->threads[p->thread_id];
3839 struct instruction *ip = t->ip;
3841 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
3851 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
3853 struct thread *t = &p->threads[p->thread_id];
3854 struct instruction *ip = t->ip;
3856 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
3866 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
3868 struct thread *t = &p->threads[p->thread_id];
3869 struct instruction *ip = t->ip;
3871 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
3881 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
3883 struct thread *t = &p->threads[p->thread_id];
3884 struct instruction *ip = t->ip;
3886 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
3896 instr_alu_shr_exec(struct rte_swx_pipeline *p)
3898 struct thread *t = &p->threads[p->thread_id];
3899 struct instruction *ip = t->ip;
3901 TRACE("[Thread %2u] shr\n", p->thread_id);
3911 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
3913 struct thread *t = &p->threads[p->thread_id];
3914 struct instruction *ip = t->ip;
3916 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
3926 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
3928 struct thread *t = &p->threads[p->thread_id];
3929 struct instruction *ip = t->ip;
3931 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
3941 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
3943 struct thread *t = &p->threads[p->thread_id];
3944 struct instruction *ip = t->ip;
3946 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
3956 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
3958 struct thread *t = &p->threads[p->thread_id];
3959 struct instruction *ip = t->ip;
3961 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
3971 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
3973 struct thread *t = &p->threads[p->thread_id];
3974 struct instruction *ip = t->ip;
3976 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
3986 instr_alu_and_exec(struct rte_swx_pipeline *p)
3988 struct thread *t = &p->threads[p->thread_id];
3989 struct instruction *ip = t->ip;
3991 TRACE("[Thread %2u] and\n", p->thread_id);
4001 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
4003 struct thread *t = &p->threads[p->thread_id];
4004 struct instruction *ip = t->ip;
4006 TRACE("[Thread %2u] and (s)\n", p->thread_id);
4016 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4018 struct thread *t = &p->threads[p->thread_id];
4019 struct instruction *ip = t->ip;
4021 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4031 instr_alu_or_exec(struct rte_swx_pipeline *p)
4033 struct thread *t = &p->threads[p->thread_id];
4034 struct instruction *ip = t->ip;
4036 TRACE("[Thread %2u] or\n", p->thread_id);
4046 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
4048 struct thread *t = &p->threads[p->thread_id];
4049 struct instruction *ip = t->ip;
4051 TRACE("[Thread %2u] or (s)\n", p->thread_id);
4061 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4063 struct thread *t = &p->threads[p->thread_id];
4064 struct instruction *ip = t->ip;
4066 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4076 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4078 struct thread *t = &p->threads[p->thread_id];
4079 struct instruction *ip = t->ip;
4081 TRACE("[Thread %2u] xor\n", p->thread_id);
4091 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
4093 struct thread *t = &p->threads[p->thread_id];
4094 struct instruction *ip = t->ip;
4096 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
4106 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4108 struct thread *t = &p->threads[p->thread_id];
4109 struct instruction *ip = t->ip;
4111 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4121 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4123 struct thread *t = &p->threads[p->thread_id];
4124 struct instruction *ip = t->ip;
4125 uint8_t *dst_struct, *src_struct;
4126 uint16_t *dst16_ptr, dst;
4127 uint64_t *src64_ptr, src64, src64_mask, src;
4130 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4133 dst_struct = t->structs[ip->alu.dst.struct_id];
4134 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4137 src_struct = t->structs[ip->alu.src.struct_id];
4138 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4140 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4141 src = src64 & src64_mask;
4146 /* The first input (r) is a 16-bit number. The second and the third
4147 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4148 * three numbers (output r) is a 34-bit number.
4150 r += (src >> 32) + (src & 0xFFFFFFFF);
4152 /* The first input is a 16-bit number. The second input is an 18-bit
4153 * number. In the worst case scenario, the sum of the two numbers is a
4156 r = (r & 0xFFFF) + (r >> 16);
4158 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4159 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4161 r = (r & 0xFFFF) + (r >> 16);
4163 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4164 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4165 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4166 * therefore the output r is always a 16-bit number.
4168 r = (r & 0xFFFF) + (r >> 16);
4173 *dst16_ptr = (uint16_t)r;
4180 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4182 struct thread *t = &p->threads[p->thread_id];
4183 struct instruction *ip = t->ip;
4184 uint8_t *dst_struct, *src_struct;
4185 uint16_t *dst16_ptr, dst;
4186 uint64_t *src64_ptr, src64, src64_mask, src;
4189 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4192 dst_struct = t->structs[ip->alu.dst.struct_id];
4193 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4196 src_struct = t->structs[ip->alu.src.struct_id];
4197 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4199 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4200 src = src64 & src64_mask;
4205 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
4206 * the following sequence of operations in 2's complement arithmetic:
4207 * a '- b = (a - b) % 0xFFFF.
4209 * In order to prevent an underflow for the below subtraction, in which
4210 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
4211 * minuend), we first add a multiple of the 0xFFFF modulus to the
4212 * minuend. The number we add to the minuend needs to be a 34-bit number
4213 * or higher, so for readability reasons we picked the 36-bit multiple.
4214 * We are effectively turning the 16-bit minuend into a 36-bit number:
4215 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
4217 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
4219 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
4220 * result (the output r) is a 36-bit number.
4222 r -= (src >> 32) + (src & 0xFFFFFFFF);
4224 /* The first input is a 16-bit number. The second input is a 20-bit
4225 * number. Their sum is a 21-bit number.
4227 r = (r & 0xFFFF) + (r >> 16);
4229 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4230 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
4232 r = (r & 0xFFFF) + (r >> 16);
4234 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4235 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4236 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4237 * generated, therefore the output r is always a 16-bit number.
4239 r = (r & 0xFFFF) + (r >> 16);
4244 *dst16_ptr = (uint16_t)r;
4251 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
4253 struct thread *t = &p->threads[p->thread_id];
4254 struct instruction *ip = t->ip;
4255 uint8_t *dst_struct, *src_struct;
4256 uint16_t *dst16_ptr;
4257 uint32_t *src32_ptr;
4260 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4263 dst_struct = t->structs[ip->alu.dst.struct_id];
4264 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4266 src_struct = t->structs[ip->alu.src.struct_id];
4267 src32_ptr = (uint32_t *)&src_struct[0];
4269 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4270 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4271 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4272 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4273 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4275 /* The first input is a 16-bit number. The second input is a 19-bit
4276 * number. Their sum is a 20-bit number.
4278 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4280 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4281 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4283 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4285 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4286 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4287 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4288 * generated, therefore the output r is always a 16-bit number.
4290 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4293 r0 = r0 ? r0 : 0xFFFF;
4295 *dst16_ptr = (uint16_t)r0;
4302 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4304 struct thread *t = &p->threads[p->thread_id];
4305 struct instruction *ip = t->ip;
4306 uint8_t *dst_struct, *src_struct;
4307 uint16_t *dst16_ptr;
4308 uint32_t *src32_ptr;
4312 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4315 dst_struct = t->structs[ip->alu.dst.struct_id];
4316 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4318 src_struct = t->structs[ip->alu.src.struct_id];
4319 src32_ptr = (uint32_t *)&src_struct[0];
4321 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4322 * Therefore, in the worst case scenario, a 35-bit number is added to a
4323 * 16-bit number (the input r), so the output r is 36-bit number.
4325 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4328 /* The first input is a 16-bit number. The second input is a 20-bit
4329 * number. Their sum is a 21-bit number.
4331 r = (r & 0xFFFF) + (r >> 16);
4333 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4334 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4336 r = (r & 0xFFFF) + (r >> 16);
4338 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4339 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4340 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4341 * generated, therefore the output r is always a 16-bit number.
4343 r = (r & 0xFFFF) + (r >> 16);
4348 *dst16_ptr = (uint16_t)r;
4354 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
4357 instr_translate(struct rte_swx_pipeline *p,
4358 struct action *action,
4360 struct instruction *instr,
4361 struct instruction_data *data)
4363 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
4364 int n_tokens = 0, tpos = 0;
4366 /* Parse the instruction string into tokens. */
4370 token = strtok_r(string, " \t\v", &string);
4374 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
4376 tokens[n_tokens] = token;
4380 CHECK(n_tokens, EINVAL);
4382 /* Handle the optional instruction label. */
4383 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
4384 strcpy(data->label, tokens[0]);
4387 CHECK(n_tokens - tpos, EINVAL);
4390 /* Identify the instruction type. */
4391 if (!strcmp(tokens[tpos], "rx"))
4392 return instr_rx_translate(p,
4399 if (!strcmp(tokens[tpos], "tx"))
4400 return instr_tx_translate(p,
4407 if (!strcmp(tokens[tpos], "extract"))
4408 return instr_hdr_extract_translate(p,
4415 if (!strcmp(tokens[tpos], "emit"))
4416 return instr_hdr_emit_translate(p,
4423 if (!strcmp(tokens[tpos], "validate"))
4424 return instr_hdr_validate_translate(p,
4431 if (!strcmp(tokens[tpos], "invalidate"))
4432 return instr_hdr_invalidate_translate(p,
4439 if (!strcmp(tokens[tpos], "mov"))
4440 return instr_mov_translate(p,
4447 if (!strcmp(tokens[tpos], "dma"))
4448 return instr_dma_translate(p,
4455 if (!strcmp(tokens[tpos], "add"))
4456 return instr_alu_add_translate(p,
4463 if (!strcmp(tokens[tpos], "sub"))
4464 return instr_alu_sub_translate(p,
4471 if (!strcmp(tokens[tpos], "ckadd"))
4472 return instr_alu_ckadd_translate(p,
4479 if (!strcmp(tokens[tpos], "cksub"))
4480 return instr_alu_cksub_translate(p,
4487 if (!strcmp(tokens[tpos], "and"))
4488 return instr_alu_and_translate(p,
4495 if (!strcmp(tokens[tpos], "or"))
4496 return instr_alu_or_translate(p,
4503 if (!strcmp(tokens[tpos], "xor"))
4504 return instr_alu_xor_translate(p,
4511 if (!strcmp(tokens[tpos], "shl"))
4512 return instr_alu_shl_translate(p,
4519 if (!strcmp(tokens[tpos], "shr"))
4520 return instr_alu_shr_translate(p,
4527 if (!strcmp(tokens[tpos], "table"))
4528 return instr_table_translate(p,
4535 if (!strcmp(tokens[tpos], "extern"))
4536 return instr_extern_translate(p,
4547 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
4549 uint32_t count = 0, i;
4554 for (i = 0; i < n; i++)
4555 if (!strcmp(label, data[i].jmp_label))
4562 instr_label_check(struct instruction_data *instruction_data,
4563 uint32_t n_instructions)
4567 /* Check that all instruction labels are unique. */
4568 for (i = 0; i < n_instructions; i++) {
4569 struct instruction_data *data = &instruction_data[i];
4570 char *label = data->label;
4576 for (j = i + 1; j < n_instructions; j++)
4577 CHECK(strcmp(label, data[j].label), EINVAL);
4580 /* Get users for each instruction label. */
4581 for (i = 0; i < n_instructions; i++) {
4582 struct instruction_data *data = &instruction_data[i];
4583 char *label = data->label;
4585 data->n_users = label_is_used(instruction_data,
4594 instruction_config(struct rte_swx_pipeline *p,
4596 const char **instructions,
4597 uint32_t n_instructions)
4599 struct instruction *instr = NULL;
4600 struct instruction_data *data = NULL;
4601 char *string = NULL;
4605 CHECK(n_instructions, EINVAL);
4606 CHECK(instructions, EINVAL);
4607 for (i = 0; i < n_instructions; i++)
4608 CHECK(instructions[i], EINVAL);
4610 /* Memory allocation. */
4611 instr = calloc(n_instructions, sizeof(struct instruction));
4617 data = calloc(n_instructions, sizeof(struct instruction_data));
4623 for (i = 0; i < n_instructions; i++) {
4624 string = strdup(instructions[i]);
4630 err = instr_translate(p, a, string, &instr[i], &data[i]);
4637 err = instr_label_check(data, n_instructions);
4644 a->instructions = instr;
4645 a->n_instructions = n_instructions;
4647 p->instructions = instr;
4648 p->n_instructions = n_instructions;
4660 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
4662 static instr_exec_t instruction_table[] = {
4663 [INSTR_RX] = instr_rx_exec,
4664 [INSTR_TX] = instr_tx_exec,
4666 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
4667 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
4668 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
4669 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
4670 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
4671 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
4672 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
4673 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
4675 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
4676 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
4677 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
4678 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
4679 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
4680 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
4681 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
4682 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
4683 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
4685 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
4686 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
4688 [INSTR_MOV] = instr_mov_exec,
4689 [INSTR_MOV_S] = instr_mov_s_exec,
4690 [INSTR_MOV_I] = instr_mov_i_exec,
4692 [INSTR_DMA_HT] = instr_dma_ht_exec,
4693 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
4694 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
4695 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
4696 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
4697 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
4698 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
4699 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
4701 [INSTR_ALU_ADD] = instr_alu_add_exec,
4702 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
4703 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
4704 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
4705 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
4706 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
4708 [INSTR_ALU_SUB] = instr_alu_sub_exec,
4709 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
4710 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
4711 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
4712 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
4713 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
4715 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
4716 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
4717 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
4718 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
4720 [INSTR_ALU_AND] = instr_alu_and_exec,
4721 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
4722 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
4724 [INSTR_ALU_OR] = instr_alu_or_exec,
4725 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
4726 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
4728 [INSTR_ALU_XOR] = instr_alu_xor_exec,
4729 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
4730 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
4732 [INSTR_ALU_SHL] = instr_alu_shl_exec,
4733 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
4734 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
4735 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
4736 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
4737 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
4739 [INSTR_ALU_SHR] = instr_alu_shr_exec,
4740 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
4741 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
4742 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
4743 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
4744 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
4746 [INSTR_TABLE] = instr_table_exec,
4747 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
4748 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
4752 instr_exec(struct rte_swx_pipeline *p)
4754 struct thread *t = &p->threads[p->thread_id];
4755 struct instruction *ip = t->ip;
4756 instr_exec_t instr = instruction_table[ip->type];
4764 static struct action *
4765 action_find(struct rte_swx_pipeline *p, const char *name)
4767 struct action *elem;
4772 TAILQ_FOREACH(elem, &p->actions, node)
4773 if (strcmp(elem->name, name) == 0)
4779 static struct field *
4780 action_field_find(struct action *a, const char *name)
4782 return a->st ? struct_type_field_find(a->st, name) : NULL;
4785 static struct field *
4786 action_field_parse(struct action *action, const char *name)
4788 if (name[0] != 't' || name[1] != '.')
4791 return action_field_find(action, &name[2]);
4795 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
4797 const char *args_struct_type_name,
4798 const char **instructions,
4799 uint32_t n_instructions)
4801 struct struct_type *args_struct_type;
4807 CHECK_NAME(name, EINVAL);
4808 CHECK(!action_find(p, name), EEXIST);
4810 if (args_struct_type_name) {
4811 CHECK_NAME(args_struct_type_name, EINVAL);
4812 args_struct_type = struct_type_find(p, args_struct_type_name);
4813 CHECK(args_struct_type, EINVAL);
4815 args_struct_type = NULL;
4818 /* Node allocation. */
4819 a = calloc(1, sizeof(struct action));
4822 /* Node initialization. */
4823 strcpy(a->name, name);
4824 a->st = args_struct_type;
4825 a->id = p->n_actions;
4827 /* Instruction translation. */
4828 err = instruction_config(p, a, instructions, n_instructions);
4834 /* Node add to tailq. */
4835 TAILQ_INSERT_TAIL(&p->actions, a, node);
4842 action_build(struct rte_swx_pipeline *p)
4844 struct action *action;
4846 p->action_instructions = calloc(p->n_actions,
4847 sizeof(struct instruction *));
4848 CHECK(p->action_instructions, ENOMEM);
4850 TAILQ_FOREACH(action, &p->actions, node)
4851 p->action_instructions[action->id] = action->instructions;
4857 action_build_free(struct rte_swx_pipeline *p)
4859 free(p->action_instructions);
4860 p->action_instructions = NULL;
4864 action_free(struct rte_swx_pipeline *p)
4866 action_build_free(p);
4869 struct action *action;
4871 action = TAILQ_FIRST(&p->actions);
4875 TAILQ_REMOVE(&p->actions, action, node);
4876 free(action->instructions);
4884 static struct table_type *
4885 table_type_find(struct rte_swx_pipeline *p, const char *name)
4887 struct table_type *elem;
4889 TAILQ_FOREACH(elem, &p->table_types, node)
4890 if (strcmp(elem->name, name) == 0)
4896 static struct table_type *
4897 table_type_resolve(struct rte_swx_pipeline *p,
4898 const char *recommended_type_name,
4899 enum rte_swx_table_match_type match_type)
4901 struct table_type *elem;
4903 /* Only consider the recommended type if the match type is correct. */
4904 if (recommended_type_name)
4905 TAILQ_FOREACH(elem, &p->table_types, node)
4906 if (!strcmp(elem->name, recommended_type_name) &&
4907 (elem->match_type == match_type))
4910 /* Ignore the recommended type and get the first element with this match
4913 TAILQ_FOREACH(elem, &p->table_types, node)
4914 if (elem->match_type == match_type)
4920 static struct table *
4921 table_find(struct rte_swx_pipeline *p, const char *name)
4925 TAILQ_FOREACH(elem, &p->tables, node)
4926 if (strcmp(elem->name, name) == 0)
4932 static struct table *
4933 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4935 struct table *table = NULL;
4937 TAILQ_FOREACH(table, &p->tables, node)
4938 if (table->id == id)
4945 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4947 enum rte_swx_table_match_type match_type,
4948 struct rte_swx_table_ops *ops)
4950 struct table_type *elem;
4954 CHECK_NAME(name, EINVAL);
4955 CHECK(!table_type_find(p, name), EEXIST);
4958 CHECK(ops->create, EINVAL);
4959 CHECK(ops->lkp, EINVAL);
4960 CHECK(ops->free, EINVAL);
4962 /* Node allocation. */
4963 elem = calloc(1, sizeof(struct table_type));
4964 CHECK(elem, ENOMEM);
4966 /* Node initialization. */
4967 strcpy(elem->name, name);
4968 elem->match_type = match_type;
4969 memcpy(&elem->ops, ops, sizeof(*ops));
4971 /* Node add to tailq. */
4972 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4977 static enum rte_swx_table_match_type
4978 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4983 for (i = 0; i < n_fields; i++)
4984 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4988 return RTE_SWX_TABLE_MATCH_EXACT;
4990 if ((i == n_fields - 1) &&
4991 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4992 return RTE_SWX_TABLE_MATCH_LPM;
4994 return RTE_SWX_TABLE_MATCH_WILDCARD;
4998 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
5000 struct rte_swx_pipeline_table_params *params,
5001 const char *recommended_table_type_name,
5005 struct table_type *type;
5007 struct action *default_action;
5008 struct header *header = NULL;
5010 uint32_t offset_prev = 0, action_data_size_max = 0, i;
5014 CHECK_NAME(name, EINVAL);
5015 CHECK(!table_find(p, name), EEXIST);
5017 CHECK(params, EINVAL);
5020 CHECK(!params->n_fields || params->fields, EINVAL);
5021 for (i = 0; i < params->n_fields; i++) {
5022 struct rte_swx_match_field_params *field = ¶ms->fields[i];
5024 struct field *hf, *mf;
5027 CHECK_NAME(field->name, EINVAL);
5029 hf = header_field_parse(p, field->name, &h);
5030 mf = metadata_field_parse(p, field->name);
5031 CHECK(hf || mf, EINVAL);
5033 offset = hf ? hf->offset : mf->offset;
5036 is_header = hf ? 1 : 0;
5037 header = hf ? h : NULL;
5038 offset_prev = offset;
5043 CHECK((is_header && hf && (h->id == header->id)) ||
5044 (!is_header && mf), EINVAL);
5046 CHECK(offset > offset_prev, EINVAL);
5047 offset_prev = offset;
5050 /* Action checks. */
5051 CHECK(params->n_actions, EINVAL);
5052 CHECK(params->action_names, EINVAL);
5053 for (i = 0; i < params->n_actions; i++) {
5054 const char *action_name = params->action_names[i];
5056 uint32_t action_data_size;
5058 CHECK(action_name, EINVAL);
5060 a = action_find(p, action_name);
5063 action_data_size = a->st ? a->st->n_bits / 8 : 0;
5064 if (action_data_size > action_data_size_max)
5065 action_data_size_max = action_data_size;
5068 CHECK(params->default_action_name, EINVAL);
5069 for (i = 0; i < p->n_actions; i++)
5070 if (!strcmp(params->action_names[i],
5071 params->default_action_name))
5073 CHECK(i < params->n_actions, EINVAL);
5074 default_action = action_find(p, params->default_action_name);
5075 CHECK((default_action->st && params->default_action_data) ||
5076 !params->default_action_data, EINVAL);
5078 /* Table type checks. */
5079 if (params->n_fields) {
5080 enum rte_swx_table_match_type match_type;
5082 match_type = table_match_type_resolve(params->fields,
5084 type = table_type_resolve(p,
5085 recommended_table_type_name,
5087 CHECK(type, EINVAL);
5092 /* Memory allocation. */
5093 t = calloc(1, sizeof(struct table));
5096 t->fields = calloc(params->n_fields, sizeof(struct match_field));
5102 t->actions = calloc(params->n_actions, sizeof(struct action *));
5109 if (action_data_size_max) {
5110 t->default_action_data = calloc(1, action_data_size_max);
5111 if (!t->default_action_data) {
5119 /* Node initialization. */
5120 strcpy(t->name, name);
5121 if (args && args[0])
5122 strcpy(t->args, args);
5125 for (i = 0; i < params->n_fields; i++) {
5126 struct rte_swx_match_field_params *field = ¶ms->fields[i];
5127 struct match_field *f = &t->fields[i];
5129 f->match_type = field->match_type;
5130 f->field = is_header ?
5131 header_field_parse(p, field->name, NULL) :
5132 metadata_field_parse(p, field->name);
5134 t->n_fields = params->n_fields;
5135 t->is_header = is_header;
5138 for (i = 0; i < params->n_actions; i++)
5139 t->actions[i] = action_find(p, params->action_names[i]);
5140 t->default_action = default_action;
5141 if (default_action->st)
5142 memcpy(t->default_action_data,
5143 params->default_action_data,
5144 default_action->st->n_bits / 8);
5145 t->n_actions = params->n_actions;
5146 t->default_action_is_const = params->default_action_is_const;
5147 t->action_data_size_max = action_data_size_max;
5150 t->id = p->n_tables;
5152 /* Node add to tailq. */
5153 TAILQ_INSERT_TAIL(&p->tables, t, node);
5159 static struct rte_swx_table_params *
5160 table_params_get(struct table *table)
5162 struct rte_swx_table_params *params;
5163 struct field *first, *last;
5165 uint32_t key_size, key_offset, action_data_size, i;
5167 /* Memory allocation. */
5168 params = calloc(1, sizeof(struct rte_swx_table_params));
5172 /* Key offset and size. */
5173 first = table->fields[0].field;
5174 last = table->fields[table->n_fields - 1].field;
5175 key_offset = first->offset / 8;
5176 key_size = (last->offset + last->n_bits - first->offset) / 8;
5178 /* Memory allocation. */
5179 key_mask = calloc(1, key_size);
5186 for (i = 0; i < table->n_fields; i++) {
5187 struct field *f = table->fields[i].field;
5188 uint32_t start = (f->offset - first->offset) / 8;
5189 size_t size = f->n_bits / 8;
5191 memset(&key_mask[start], 0xFF, size);
5194 /* Action data size. */
5195 action_data_size = 0;
5196 for (i = 0; i < table->n_actions; i++) {
5197 struct action *action = table->actions[i];
5198 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
5200 if (ads > action_data_size)
5201 action_data_size = ads;
5205 params->match_type = table->type->match_type;
5206 params->key_size = key_size;
5207 params->key_offset = key_offset;
5208 params->key_mask0 = key_mask;
5209 params->action_data_size = action_data_size;
5210 params->n_keys_max = table->size;
5216 table_params_free(struct rte_swx_table_params *params)
5221 free(params->key_mask0);
5226 table_state_build(struct rte_swx_pipeline *p)
5228 struct table *table;
5230 p->table_state = calloc(p->n_tables,
5231 sizeof(struct rte_swx_table_state));
5232 CHECK(p->table_state, ENOMEM);
5234 TAILQ_FOREACH(table, &p->tables, node) {
5235 struct rte_swx_table_state *ts = &p->table_state[table->id];
5238 struct rte_swx_table_params *params;
5241 params = table_params_get(table);
5242 CHECK(params, ENOMEM);
5244 ts->obj = table->type->ops.create(params,
5249 table_params_free(params);
5250 CHECK(ts->obj, ENODEV);
5253 /* ts->default_action_data. */
5254 if (table->action_data_size_max) {
5255 ts->default_action_data =
5256 malloc(table->action_data_size_max);
5257 CHECK(ts->default_action_data, ENOMEM);
5259 memcpy(ts->default_action_data,
5260 table->default_action_data,
5261 table->action_data_size_max);
5264 /* ts->default_action_id. */
5265 ts->default_action_id = table->default_action->id;
5272 table_state_build_free(struct rte_swx_pipeline *p)
5276 if (!p->table_state)
5279 for (i = 0; i < p->n_tables; i++) {
5280 struct rte_swx_table_state *ts = &p->table_state[i];
5281 struct table *table = table_find_by_id(p, i);
5284 if (table->type && ts->obj)
5285 table->type->ops.free(ts->obj);
5287 /* ts->default_action_data. */
5288 free(ts->default_action_data);
5291 free(p->table_state);
5292 p->table_state = NULL;
5296 table_state_free(struct rte_swx_pipeline *p)
5298 table_state_build_free(p);
5302 table_stub_lkp(void *table __rte_unused,
5303 void *mailbox __rte_unused,
5304 uint8_t **key __rte_unused,
5305 uint64_t *action_id __rte_unused,
5306 uint8_t **action_data __rte_unused,
5310 return 1; /* DONE. */
5314 table_build(struct rte_swx_pipeline *p)
5318 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5319 struct thread *t = &p->threads[i];
5320 struct table *table;
5322 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
5323 CHECK(t->tables, ENOMEM);
5325 TAILQ_FOREACH(table, &p->tables, node) {
5326 struct table_runtime *r = &t->tables[table->id];
5331 size = table->type->ops.mailbox_size_get();
5334 r->func = table->type->ops.lkp;
5338 r->mailbox = calloc(1, size);
5339 CHECK(r->mailbox, ENOMEM);
5343 r->key = table->is_header ?
5344 &t->structs[table->header->struct_id] :
5345 &t->structs[p->metadata_struct_id];
5347 r->func = table_stub_lkp;
5356 table_build_free(struct rte_swx_pipeline *p)
5360 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5361 struct thread *t = &p->threads[i];
5367 for (j = 0; j < p->n_tables; j++) {
5368 struct table_runtime *r = &t->tables[j];
5379 table_free(struct rte_swx_pipeline *p)
5381 table_build_free(p);
5387 elem = TAILQ_FIRST(&p->tables);
5391 TAILQ_REMOVE(&p->tables, elem, node);
5393 free(elem->actions);
5394 free(elem->default_action_data);
5400 struct table_type *elem;
5402 elem = TAILQ_FIRST(&p->table_types);
5406 TAILQ_REMOVE(&p->table_types, elem, node);
5415 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
5417 struct rte_swx_pipeline *pipeline;
5419 /* Check input parameters. */
5422 /* Memory allocation. */
5423 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
5424 CHECK(pipeline, ENOMEM);
5426 /* Initialization. */
5427 TAILQ_INIT(&pipeline->struct_types);
5428 TAILQ_INIT(&pipeline->port_in_types);
5429 TAILQ_INIT(&pipeline->ports_in);
5430 TAILQ_INIT(&pipeline->port_out_types);
5431 TAILQ_INIT(&pipeline->ports_out);
5432 TAILQ_INIT(&pipeline->extern_types);
5433 TAILQ_INIT(&pipeline->extern_objs);
5434 TAILQ_INIT(&pipeline->extern_funcs);
5435 TAILQ_INIT(&pipeline->headers);
5436 TAILQ_INIT(&pipeline->actions);
5437 TAILQ_INIT(&pipeline->table_types);
5438 TAILQ_INIT(&pipeline->tables);
5440 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
5441 pipeline->numa_node = numa_node;
5448 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
5453 free(p->instructions);
5455 table_state_free(p);
5460 extern_func_free(p);
5470 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
5471 const char **instructions,
5472 uint32_t n_instructions)
5477 err = instruction_config(p, NULL, instructions, n_instructions);
5481 /* Thread instruction pointer reset. */
5482 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5483 struct thread *t = &p->threads[i];
5485 thread_ip_reset(p, t);
5492 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
5497 CHECK(p->build_done == 0, EEXIST);
5499 status = port_in_build(p);
5503 status = port_out_build(p);
5507 status = struct_build(p);
5511 status = extern_obj_build(p);
5515 status = extern_func_build(p);
5519 status = header_build(p);
5523 status = metadata_build(p);
5527 status = action_build(p);
5531 status = table_build(p);
5535 status = table_state_build(p);
5543 table_state_build_free(p);
5544 table_build_free(p);
5545 action_build_free(p);
5546 metadata_build_free(p);
5547 header_build_free(p);
5548 extern_func_build_free(p);
5549 extern_obj_build_free(p);
5550 port_out_build_free(p);
5551 port_in_build_free(p);
5552 struct_build_free(p);
5558 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
5562 for (i = 0; i < n_instructions; i++)
5570 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
5571 struct rte_swx_table_state **table_state)
5573 if (!p || !table_state || !p->build_done)
5576 *table_state = p->table_state;
5581 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
5582 struct rte_swx_table_state *table_state)
5584 if (!p || !table_state || !p->build_done)
5587 p->table_state = table_state;