1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
328 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
329 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
333 * dst = HMEF, src = HMEFTI
335 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
336 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
337 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
338 INSTR_ALU_SHL_HH, /* dst = H, src = H */
339 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
340 INSTR_ALU_SHL_HI, /* dst = H, src = I */
343 struct instr_operand {
358 uint8_t header_id[8];
359 uint8_t struct_id[8];
364 struct instr_hdr_validity {
368 struct instr_dst_src {
369 struct instr_operand dst;
371 struct instr_operand src;
378 uint8_t header_id[8];
379 uint8_t struct_id[8];
390 enum instruction_type type;
393 struct instr_hdr_validity valid;
394 struct instr_dst_src mov;
395 struct instr_dma dma;
396 struct instr_dst_src alu;
400 struct instruction_data {
401 char label[RTE_SWX_NAME_SIZE];
402 char jmp_label[RTE_SWX_NAME_SIZE];
403 uint32_t n_users; /* user = jmp instruction to this instruction. */
411 TAILQ_ENTRY(action) node;
412 char name[RTE_SWX_NAME_SIZE];
413 struct struct_type *st;
414 struct instruction *instructions;
415 uint32_t n_instructions;
419 TAILQ_HEAD(action_tailq, action);
425 TAILQ_ENTRY(table_type) node;
426 char name[RTE_SWX_NAME_SIZE];
427 enum rte_swx_table_match_type match_type;
428 struct rte_swx_table_ops ops;
431 TAILQ_HEAD(table_type_tailq, table_type);
434 enum rte_swx_table_match_type match_type;
439 TAILQ_ENTRY(table) node;
440 char name[RTE_SWX_NAME_SIZE];
441 char args[RTE_SWX_NAME_SIZE];
442 struct table_type *type; /* NULL when n_fields == 0. */
445 struct match_field *fields;
447 int is_header; /* Only valid when n_fields > 0. */
448 struct header *header; /* Only valid when n_fields > 0. */
451 struct action **actions;
452 struct action *default_action;
453 uint8_t *default_action_data;
455 int default_action_is_const;
456 uint32_t action_data_size_max;
462 TAILQ_HEAD(table_tailq, table);
464 struct table_runtime {
465 rte_swx_table_lookup_t func;
475 struct rte_swx_pkt pkt;
481 /* Packet headers. */
482 struct header_runtime *headers; /* Extracted or generated headers. */
483 struct header_out_runtime *headers_out; /* Emitted headers. */
484 uint8_t *header_storage;
485 uint8_t *header_out_storage;
486 uint64_t valid_headers;
487 uint32_t n_headers_out;
489 /* Packet meta-data. */
493 struct table_runtime *tables;
494 struct rte_swx_table_state *table_state;
496 int hit; /* 0 = Miss, 1 = Hit. */
498 /* Extern objects and functions. */
499 struct extern_obj_runtime *extern_objs;
500 struct extern_func_runtime *extern_funcs;
503 struct instruction *ip;
504 struct instruction *ret;
507 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
508 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
509 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
511 #define ALU(thread, ip, operator) \
513 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
514 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
515 uint64_t dst64 = *dst64_ptr; \
516 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
517 uint64_t dst = dst64 & dst64_mask; \
519 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
520 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
521 uint64_t src64 = *src64_ptr; \
522 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
523 uint64_t src = src64 & src64_mask; \
525 uint64_t result = dst operator src; \
527 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
530 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
532 #define ALU_S(thread, ip, operator) \
534 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
535 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
536 uint64_t dst64 = *dst64_ptr; \
537 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
538 uint64_t dst = dst64 & dst64_mask; \
540 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
541 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
542 uint64_t src64 = *src64_ptr; \
543 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
545 uint64_t result = dst operator src; \
547 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
552 #define ALU_HM(thread, ip, operator) \
554 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
555 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
556 uint64_t dst64 = *dst64_ptr; \
557 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
558 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
560 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
561 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
562 uint64_t src64 = *src64_ptr; \
563 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
564 uint64_t src = src64 & src64_mask; \
566 uint64_t result = dst operator src; \
567 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
569 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
572 #define ALU_HH(thread, ip, operator) \
574 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
575 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
576 uint64_t dst64 = *dst64_ptr; \
577 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
578 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
580 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
581 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
582 uint64_t src64 = *src64_ptr; \
583 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
585 uint64_t result = dst operator src; \
586 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
588 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
600 #define ALU_I(thread, ip, operator) \
602 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
603 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
604 uint64_t dst64 = *dst64_ptr; \
605 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
606 uint64_t dst = dst64 & dst64_mask; \
608 uint64_t src = (ip)->alu.src_val; \
610 uint64_t result = dst operator src; \
612 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
617 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
619 #define ALU_HI(thread, ip, operator) \
621 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
622 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
623 uint64_t dst64 = *dst64_ptr; \
624 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
625 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
627 uint64_t src = (ip)->alu.src_val; \
629 uint64_t result = dst operator src; \
630 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
632 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
641 #define MOV(thread, ip) \
643 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
644 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
645 uint64_t dst64 = *dst64_ptr; \
646 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
648 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
649 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
650 uint64_t src64 = *src64_ptr; \
651 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
652 uint64_t src = src64 & src64_mask; \
654 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
657 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
659 #define MOV_S(thread, ip) \
661 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
662 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
663 uint64_t dst64 = *dst64_ptr; \
664 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
666 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
667 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
668 uint64_t src64 = *src64_ptr; \
669 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
671 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
680 #define MOV_I(thread, ip) \
682 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
683 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
684 uint64_t dst64 = *dst64_ptr; \
685 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
687 uint64_t src = (ip)->mov.src_val; \
689 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
692 #define METADATA_READ(thread, offset, n_bits) \
694 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
695 uint64_t m64 = *m64_ptr; \
696 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
700 #define METADATA_WRITE(thread, offset, n_bits, value) \
702 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
703 uint64_t m64 = *m64_ptr; \
704 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
706 uint64_t m_new = value; \
708 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
711 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
712 #define RTE_SWX_PIPELINE_THREADS_MAX 16
715 struct rte_swx_pipeline {
716 struct struct_type_tailq struct_types;
717 struct port_in_type_tailq port_in_types;
718 struct port_in_tailq ports_in;
719 struct port_out_type_tailq port_out_types;
720 struct port_out_tailq ports_out;
721 struct extern_type_tailq extern_types;
722 struct extern_obj_tailq extern_objs;
723 struct extern_func_tailq extern_funcs;
724 struct header_tailq headers;
725 struct struct_type *metadata_st;
726 uint32_t metadata_struct_id;
727 struct action_tailq actions;
728 struct table_type_tailq table_types;
729 struct table_tailq tables;
731 struct port_in_runtime *in;
732 struct port_out_runtime *out;
733 struct instruction **action_instructions;
734 struct rte_swx_table_state *table_state;
735 struct instruction *instructions;
736 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
740 uint32_t n_ports_out;
741 uint32_t n_extern_objs;
742 uint32_t n_extern_funcs;
748 uint32_t n_instructions;
756 static struct struct_type *
757 struct_type_find(struct rte_swx_pipeline *p, const char *name)
759 struct struct_type *elem;
761 TAILQ_FOREACH(elem, &p->struct_types, node)
762 if (strcmp(elem->name, name) == 0)
768 static struct field *
769 struct_type_field_find(struct struct_type *st, const char *name)
773 for (i = 0; i < st->n_fields; i++) {
774 struct field *f = &st->fields[i];
776 if (strcmp(f->name, name) == 0)
784 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
786 struct rte_swx_field_params *fields,
789 struct struct_type *st;
793 CHECK_NAME(name, EINVAL);
794 CHECK(fields, EINVAL);
795 CHECK(n_fields, EINVAL);
797 for (i = 0; i < n_fields; i++) {
798 struct rte_swx_field_params *f = &fields[i];
801 CHECK_NAME(f->name, EINVAL);
802 CHECK(f->n_bits, EINVAL);
803 CHECK(f->n_bits <= 64, EINVAL);
804 CHECK((f->n_bits & 7) == 0, EINVAL);
806 for (j = 0; j < i; j++) {
807 struct rte_swx_field_params *f_prev = &fields[j];
809 CHECK(strcmp(f->name, f_prev->name), EINVAL);
813 CHECK(!struct_type_find(p, name), EEXIST);
815 /* Node allocation. */
816 st = calloc(1, sizeof(struct struct_type));
819 st->fields = calloc(n_fields, sizeof(struct field));
825 /* Node initialization. */
826 strcpy(st->name, name);
827 for (i = 0; i < n_fields; i++) {
828 struct field *dst = &st->fields[i];
829 struct rte_swx_field_params *src = &fields[i];
831 strcpy(dst->name, src->name);
832 dst->n_bits = src->n_bits;
833 dst->offset = st->n_bits;
835 st->n_bits += src->n_bits;
837 st->n_fields = n_fields;
839 /* Node add to tailq. */
840 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
846 struct_build(struct rte_swx_pipeline *p)
850 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
851 struct thread *t = &p->threads[i];
853 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
854 CHECK(t->structs, ENOMEM);
861 struct_build_free(struct rte_swx_pipeline *p)
865 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
866 struct thread *t = &p->threads[i];
874 struct_free(struct rte_swx_pipeline *p)
876 struct_build_free(p);
880 struct struct_type *elem;
882 elem = TAILQ_FIRST(&p->struct_types);
886 TAILQ_REMOVE(&p->struct_types, elem, node);
895 static struct port_in_type *
896 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
898 struct port_in_type *elem;
903 TAILQ_FOREACH(elem, &p->port_in_types, node)
904 if (strcmp(elem->name, name) == 0)
911 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
913 struct rte_swx_port_in_ops *ops)
915 struct port_in_type *elem;
918 CHECK_NAME(name, EINVAL);
920 CHECK(ops->create, EINVAL);
921 CHECK(ops->free, EINVAL);
922 CHECK(ops->pkt_rx, EINVAL);
923 CHECK(ops->stats_read, EINVAL);
925 CHECK(!port_in_type_find(p, name), EEXIST);
927 /* Node allocation. */
928 elem = calloc(1, sizeof(struct port_in_type));
931 /* Node initialization. */
932 strcpy(elem->name, name);
933 memcpy(&elem->ops, ops, sizeof(*ops));
935 /* Node add to tailq. */
936 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
941 static struct port_in *
942 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
944 struct port_in *port;
946 TAILQ_FOREACH(port, &p->ports_in, node)
947 if (port->id == port_id)
954 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
956 const char *port_type_name,
959 struct port_in_type *type = NULL;
960 struct port_in *port = NULL;
965 CHECK(!port_in_find(p, port_id), EINVAL);
967 CHECK_NAME(port_type_name, EINVAL);
968 type = port_in_type_find(p, port_type_name);
971 obj = type->ops.create(args);
974 /* Node allocation. */
975 port = calloc(1, sizeof(struct port_in));
978 /* Node initialization. */
983 /* Node add to tailq. */
984 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
985 if (p->n_ports_in < port_id + 1)
986 p->n_ports_in = port_id + 1;
992 port_in_build(struct rte_swx_pipeline *p)
994 struct port_in *port;
997 CHECK(p->n_ports_in, EINVAL);
998 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1000 for (i = 0; i < p->n_ports_in; i++)
1001 CHECK(port_in_find(p, i), EINVAL);
1003 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1004 CHECK(p->in, ENOMEM);
1006 TAILQ_FOREACH(port, &p->ports_in, node) {
1007 struct port_in_runtime *in = &p->in[port->id];
1009 in->pkt_rx = port->type->ops.pkt_rx;
1010 in->obj = port->obj;
1017 port_in_build_free(struct rte_swx_pipeline *p)
1024 port_in_free(struct rte_swx_pipeline *p)
1026 port_in_build_free(p);
1030 struct port_in *port;
1032 port = TAILQ_FIRST(&p->ports_in);
1036 TAILQ_REMOVE(&p->ports_in, port, node);
1037 port->type->ops.free(port->obj);
1041 /* Input port types. */
1043 struct port_in_type *elem;
1045 elem = TAILQ_FIRST(&p->port_in_types);
1049 TAILQ_REMOVE(&p->port_in_types, elem, node);
1057 static struct port_out_type *
1058 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1060 struct port_out_type *elem;
1065 TAILQ_FOREACH(elem, &p->port_out_types, node)
1066 if (!strcmp(elem->name, name))
1073 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1075 struct rte_swx_port_out_ops *ops)
1077 struct port_out_type *elem;
1080 CHECK_NAME(name, EINVAL);
1082 CHECK(ops->create, EINVAL);
1083 CHECK(ops->free, EINVAL);
1084 CHECK(ops->pkt_tx, EINVAL);
1085 CHECK(ops->stats_read, EINVAL);
1087 CHECK(!port_out_type_find(p, name), EEXIST);
1089 /* Node allocation. */
1090 elem = calloc(1, sizeof(struct port_out_type));
1091 CHECK(elem, ENOMEM);
1093 /* Node initialization. */
1094 strcpy(elem->name, name);
1095 memcpy(&elem->ops, ops, sizeof(*ops));
1097 /* Node add to tailq. */
1098 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1103 static struct port_out *
1104 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1106 struct port_out *port;
1108 TAILQ_FOREACH(port, &p->ports_out, node)
1109 if (port->id == port_id)
1116 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1118 const char *port_type_name,
1121 struct port_out_type *type = NULL;
1122 struct port_out *port = NULL;
1127 CHECK(!port_out_find(p, port_id), EINVAL);
1129 CHECK_NAME(port_type_name, EINVAL);
1130 type = port_out_type_find(p, port_type_name);
1131 CHECK(type, EINVAL);
1133 obj = type->ops.create(args);
1136 /* Node allocation. */
1137 port = calloc(1, sizeof(struct port_out));
1138 CHECK(port, ENOMEM);
1140 /* Node initialization. */
1145 /* Node add to tailq. */
1146 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1147 if (p->n_ports_out < port_id + 1)
1148 p->n_ports_out = port_id + 1;
1154 port_out_build(struct rte_swx_pipeline *p)
1156 struct port_out *port;
1159 CHECK(p->n_ports_out, EINVAL);
1161 for (i = 0; i < p->n_ports_out; i++)
1162 CHECK(port_out_find(p, i), EINVAL);
1164 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1165 CHECK(p->out, ENOMEM);
1167 TAILQ_FOREACH(port, &p->ports_out, node) {
1168 struct port_out_runtime *out = &p->out[port->id];
1170 out->pkt_tx = port->type->ops.pkt_tx;
1171 out->flush = port->type->ops.flush;
1172 out->obj = port->obj;
1179 port_out_build_free(struct rte_swx_pipeline *p)
1186 port_out_free(struct rte_swx_pipeline *p)
1188 port_out_build_free(p);
1192 struct port_out *port;
1194 port = TAILQ_FIRST(&p->ports_out);
1198 TAILQ_REMOVE(&p->ports_out, port, node);
1199 port->type->ops.free(port->obj);
1203 /* Output port types. */
1205 struct port_out_type *elem;
1207 elem = TAILQ_FIRST(&p->port_out_types);
1211 TAILQ_REMOVE(&p->port_out_types, elem, node);
1219 static struct extern_type *
1220 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1222 struct extern_type *elem;
1224 TAILQ_FOREACH(elem, &p->extern_types, node)
1225 if (strcmp(elem->name, name) == 0)
1231 static struct extern_type_member_func *
1232 extern_type_member_func_find(struct extern_type *type, const char *name)
1234 struct extern_type_member_func *elem;
1236 TAILQ_FOREACH(elem, &type->funcs, node)
1237 if (strcmp(elem->name, name) == 0)
1243 static struct extern_obj *
1244 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1246 struct extern_obj *elem;
1248 TAILQ_FOREACH(elem, &p->extern_objs, node)
1249 if (strcmp(elem->name, name) == 0)
1255 static struct field *
1256 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1258 struct extern_obj **object)
1260 struct extern_obj *obj;
1262 char *obj_name, *field_name;
1264 if ((name[0] != 'e') || (name[1] != '.'))
1267 obj_name = strdup(&name[2]);
1271 field_name = strchr(obj_name, '.');
1280 obj = extern_obj_find(p, obj_name);
1286 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1300 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1302 const char *mailbox_struct_type_name,
1303 rte_swx_extern_type_constructor_t constructor,
1304 rte_swx_extern_type_destructor_t destructor)
1306 struct extern_type *elem;
1307 struct struct_type *mailbox_struct_type;
1311 CHECK_NAME(name, EINVAL);
1312 CHECK(!extern_type_find(p, name), EEXIST);
1314 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1315 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1316 CHECK(mailbox_struct_type, EINVAL);
1318 CHECK(constructor, EINVAL);
1319 CHECK(destructor, EINVAL);
1321 /* Node allocation. */
1322 elem = calloc(1, sizeof(struct extern_type));
1323 CHECK(elem, ENOMEM);
1325 /* Node initialization. */
1326 strcpy(elem->name, name);
1327 elem->mailbox_struct_type = mailbox_struct_type;
1328 elem->constructor = constructor;
1329 elem->destructor = destructor;
1330 TAILQ_INIT(&elem->funcs);
1332 /* Node add to tailq. */
1333 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1339 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1340 const char *extern_type_name,
1342 rte_swx_extern_type_member_func_t member_func)
1344 struct extern_type *type;
1345 struct extern_type_member_func *type_member;
1349 CHECK(extern_type_name, EINVAL);
1350 type = extern_type_find(p, extern_type_name);
1351 CHECK(type, EINVAL);
1352 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1354 CHECK(name, EINVAL);
1355 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1357 CHECK(member_func, EINVAL);
1359 /* Node allocation. */
1360 type_member = calloc(1, sizeof(struct extern_type_member_func));
1361 CHECK(type_member, ENOMEM);
1363 /* Node initialization. */
1364 strcpy(type_member->name, name);
1365 type_member->func = member_func;
1366 type_member->id = type->n_funcs;
1368 /* Node add to tailq. */
1369 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1376 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1377 const char *extern_type_name,
1381 struct extern_type *type;
1382 struct extern_obj *obj;
1387 CHECK_NAME(extern_type_name, EINVAL);
1388 type = extern_type_find(p, extern_type_name);
1389 CHECK(type, EINVAL);
1391 CHECK_NAME(name, EINVAL);
1392 CHECK(!extern_obj_find(p, name), EEXIST);
1394 /* Node allocation. */
1395 obj = calloc(1, sizeof(struct extern_obj));
1398 /* Object construction. */
1399 obj_handle = type->constructor(args);
1405 /* Node initialization. */
1406 strcpy(obj->name, name);
1408 obj->obj = obj_handle;
1409 obj->struct_id = p->n_structs;
1410 obj->id = p->n_extern_objs;
1412 /* Node add to tailq. */
1413 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1421 extern_obj_build(struct rte_swx_pipeline *p)
1425 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1426 struct thread *t = &p->threads[i];
1427 struct extern_obj *obj;
1429 t->extern_objs = calloc(p->n_extern_objs,
1430 sizeof(struct extern_obj_runtime));
1431 CHECK(t->extern_objs, ENOMEM);
1433 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1434 struct extern_obj_runtime *r =
1435 &t->extern_objs[obj->id];
1436 struct extern_type_member_func *func;
1437 uint32_t mailbox_size =
1438 obj->type->mailbox_struct_type->n_bits / 8;
1442 r->mailbox = calloc(1, mailbox_size);
1443 CHECK(r->mailbox, ENOMEM);
1445 TAILQ_FOREACH(func, &obj->type->funcs, node)
1446 r->funcs[func->id] = func->func;
1448 t->structs[obj->struct_id] = r->mailbox;
1456 extern_obj_build_free(struct rte_swx_pipeline *p)
1460 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1461 struct thread *t = &p->threads[i];
1464 if (!t->extern_objs)
1467 for (j = 0; j < p->n_extern_objs; j++) {
1468 struct extern_obj_runtime *r = &t->extern_objs[j];
1473 free(t->extern_objs);
1474 t->extern_objs = NULL;
1479 extern_obj_free(struct rte_swx_pipeline *p)
1481 extern_obj_build_free(p);
1483 /* Extern objects. */
1485 struct extern_obj *elem;
1487 elem = TAILQ_FIRST(&p->extern_objs);
1491 TAILQ_REMOVE(&p->extern_objs, elem, node);
1493 elem->type->destructor(elem->obj);
1499 struct extern_type *elem;
1501 elem = TAILQ_FIRST(&p->extern_types);
1505 TAILQ_REMOVE(&p->extern_types, elem, node);
1508 struct extern_type_member_func *func;
1510 func = TAILQ_FIRST(&elem->funcs);
1514 TAILQ_REMOVE(&elem->funcs, func, node);
1525 static struct extern_func *
1526 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1528 struct extern_func *elem;
1530 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1531 if (strcmp(elem->name, name) == 0)
1537 static struct field *
1538 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1540 struct extern_func **function)
1542 struct extern_func *func;
1544 char *func_name, *field_name;
1546 if ((name[0] != 'f') || (name[1] != '.'))
1549 func_name = strdup(&name[2]);
1553 field_name = strchr(func_name, '.');
1562 func = extern_func_find(p, func_name);
1568 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1582 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1584 const char *mailbox_struct_type_name,
1585 rte_swx_extern_func_t func)
1587 struct extern_func *f;
1588 struct struct_type *mailbox_struct_type;
1592 CHECK_NAME(name, EINVAL);
1593 CHECK(!extern_func_find(p, name), EEXIST);
1595 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1596 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1597 CHECK(mailbox_struct_type, EINVAL);
1599 CHECK(func, EINVAL);
1601 /* Node allocation. */
1602 f = calloc(1, sizeof(struct extern_func));
1603 CHECK(func, ENOMEM);
1605 /* Node initialization. */
1606 strcpy(f->name, name);
1607 f->mailbox_struct_type = mailbox_struct_type;
1609 f->struct_id = p->n_structs;
1610 f->id = p->n_extern_funcs;
1612 /* Node add to tailq. */
1613 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1614 p->n_extern_funcs++;
1621 extern_func_build(struct rte_swx_pipeline *p)
1625 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1626 struct thread *t = &p->threads[i];
1627 struct extern_func *func;
1629 /* Memory allocation. */
1630 t->extern_funcs = calloc(p->n_extern_funcs,
1631 sizeof(struct extern_func_runtime));
1632 CHECK(t->extern_funcs, ENOMEM);
1634 /* Extern function. */
1635 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1636 struct extern_func_runtime *r =
1637 &t->extern_funcs[func->id];
1638 uint32_t mailbox_size =
1639 func->mailbox_struct_type->n_bits / 8;
1641 r->func = func->func;
1643 r->mailbox = calloc(1, mailbox_size);
1644 CHECK(r->mailbox, ENOMEM);
1646 t->structs[func->struct_id] = r->mailbox;
1654 extern_func_build_free(struct rte_swx_pipeline *p)
1658 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1659 struct thread *t = &p->threads[i];
1662 if (!t->extern_funcs)
1665 for (j = 0; j < p->n_extern_funcs; j++) {
1666 struct extern_func_runtime *r = &t->extern_funcs[j];
1671 free(t->extern_funcs);
1672 t->extern_funcs = NULL;
1677 extern_func_free(struct rte_swx_pipeline *p)
1679 extern_func_build_free(p);
1682 struct extern_func *elem;
1684 elem = TAILQ_FIRST(&p->extern_funcs);
1688 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1696 static struct header *
1697 header_find(struct rte_swx_pipeline *p, const char *name)
1699 struct header *elem;
1701 TAILQ_FOREACH(elem, &p->headers, node)
1702 if (strcmp(elem->name, name) == 0)
1708 static struct header *
1709 header_parse(struct rte_swx_pipeline *p,
1712 if (name[0] != 'h' || name[1] != '.')
1715 return header_find(p, &name[2]);
1718 static struct field *
1719 header_field_parse(struct rte_swx_pipeline *p,
1721 struct header **header)
1725 char *header_name, *field_name;
1727 if ((name[0] != 'h') || (name[1] != '.'))
1730 header_name = strdup(&name[2]);
1734 field_name = strchr(header_name, '.');
1743 h = header_find(p, header_name);
1749 f = struct_type_field_find(h->st, field_name);
1763 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1765 const char *struct_type_name)
1767 struct struct_type *st;
1769 size_t n_headers_max;
1772 CHECK_NAME(name, EINVAL);
1773 CHECK_NAME(struct_type_name, EINVAL);
1775 CHECK(!header_find(p, name), EEXIST);
1777 st = struct_type_find(p, struct_type_name);
1780 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1781 CHECK(p->n_headers < n_headers_max, ENOSPC);
1783 /* Node allocation. */
1784 h = calloc(1, sizeof(struct header));
1787 /* Node initialization. */
1788 strcpy(h->name, name);
1790 h->struct_id = p->n_structs;
1791 h->id = p->n_headers;
1793 /* Node add to tailq. */
1794 TAILQ_INSERT_TAIL(&p->headers, h, node);
1802 header_build(struct rte_swx_pipeline *p)
1805 uint32_t n_bytes = 0, i;
1807 TAILQ_FOREACH(h, &p->headers, node) {
1808 n_bytes += h->st->n_bits / 8;
1811 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1812 struct thread *t = &p->threads[i];
1813 uint32_t offset = 0;
1815 t->headers = calloc(p->n_headers,
1816 sizeof(struct header_runtime));
1817 CHECK(t->headers, ENOMEM);
1819 t->headers_out = calloc(p->n_headers,
1820 sizeof(struct header_out_runtime));
1821 CHECK(t->headers_out, ENOMEM);
1823 t->header_storage = calloc(1, n_bytes);
1824 CHECK(t->header_storage, ENOMEM);
1826 t->header_out_storage = calloc(1, n_bytes);
1827 CHECK(t->header_out_storage, ENOMEM);
1829 TAILQ_FOREACH(h, &p->headers, node) {
1830 uint8_t *header_storage;
1832 header_storage = &t->header_storage[offset];
1833 offset += h->st->n_bits / 8;
1835 t->headers[h->id].ptr0 = header_storage;
1836 t->structs[h->struct_id] = header_storage;
1844 header_build_free(struct rte_swx_pipeline *p)
1848 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1849 struct thread *t = &p->threads[i];
1851 free(t->headers_out);
1852 t->headers_out = NULL;
1857 free(t->header_out_storage);
1858 t->header_out_storage = NULL;
1860 free(t->header_storage);
1861 t->header_storage = NULL;
1866 header_free(struct rte_swx_pipeline *p)
1868 header_build_free(p);
1871 struct header *elem;
1873 elem = TAILQ_FIRST(&p->headers);
1877 TAILQ_REMOVE(&p->headers, elem, node);
1885 static struct field *
1886 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1888 if (!p->metadata_st)
1891 if (name[0] != 'm' || name[1] != '.')
1894 return struct_type_field_find(p->metadata_st, &name[2]);
1898 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1899 const char *struct_type_name)
1901 struct struct_type *st = NULL;
1905 CHECK_NAME(struct_type_name, EINVAL);
1906 st = struct_type_find(p, struct_type_name);
1908 CHECK(!p->metadata_st, EINVAL);
1910 p->metadata_st = st;
1911 p->metadata_struct_id = p->n_structs;
1919 metadata_build(struct rte_swx_pipeline *p)
1921 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1924 /* Thread-level initialization. */
1925 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1926 struct thread *t = &p->threads[i];
1929 metadata = calloc(1, n_bytes);
1930 CHECK(metadata, ENOMEM);
1932 t->metadata = metadata;
1933 t->structs[p->metadata_struct_id] = metadata;
1940 metadata_build_free(struct rte_swx_pipeline *p)
1944 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1945 struct thread *t = &p->threads[i];
1953 metadata_free(struct rte_swx_pipeline *p)
1955 metadata_build_free(p);
1961 static struct field *
1962 action_field_parse(struct action *action, const char *name);
1964 static struct field *
1965 struct_field_parse(struct rte_swx_pipeline *p,
1966 struct action *action,
1968 uint32_t *struct_id)
1975 struct header *header;
1977 f = header_field_parse(p, name, &header);
1981 *struct_id = header->struct_id;
1987 f = metadata_field_parse(p, name);
1991 *struct_id = p->metadata_struct_id;
2000 f = action_field_parse(action, name);
2010 struct extern_obj *obj;
2012 f = extern_obj_mailbox_field_parse(p, name, &obj);
2016 *struct_id = obj->struct_id;
2022 struct extern_func *func;
2024 f = extern_func_mailbox_field_parse(p, name, &func);
2028 *struct_id = func->struct_id;
2038 pipeline_port_inc(struct rte_swx_pipeline *p)
2040 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2044 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2046 t->ip = p->instructions;
2050 thread_ip_inc(struct rte_swx_pipeline *p);
2053 thread_ip_inc(struct rte_swx_pipeline *p)
2055 struct thread *t = &p->threads[p->thread_id];
2061 thread_ip_inc_cond(struct thread *t, int cond)
2067 thread_yield(struct rte_swx_pipeline *p)
2069 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2076 instr_rx_translate(struct rte_swx_pipeline *p,
2077 struct action *action,
2080 struct instruction *instr,
2081 struct instruction_data *data __rte_unused)
2085 CHECK(!action, EINVAL);
2086 CHECK(n_tokens == 2, EINVAL);
2088 f = metadata_field_parse(p, tokens[1]);
2091 instr->type = INSTR_RX;
2092 instr->io.io.offset = f->offset / 8;
2093 instr->io.io.n_bits = f->n_bits;
2098 instr_rx_exec(struct rte_swx_pipeline *p);
2101 instr_rx_exec(struct rte_swx_pipeline *p)
2103 struct thread *t = &p->threads[p->thread_id];
2104 struct instruction *ip = t->ip;
2105 struct port_in_runtime *port = &p->in[p->port_id];
2106 struct rte_swx_pkt *pkt = &t->pkt;
2110 pkt_received = port->pkt_rx(port->obj, pkt);
2111 t->ptr = &pkt->pkt[pkt->offset];
2112 rte_prefetch0(t->ptr);
2114 TRACE("[Thread %2u] rx %s from port %u\n",
2116 pkt_received ? "1 pkt" : "0 pkts",
2120 t->valid_headers = 0;
2121 t->n_headers_out = 0;
2124 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2127 t->table_state = p->table_state;
2130 pipeline_port_inc(p);
2131 thread_ip_inc_cond(t, pkt_received);
2139 instr_tx_translate(struct rte_swx_pipeline *p,
2140 struct action *action __rte_unused,
2143 struct instruction *instr,
2144 struct instruction_data *data __rte_unused)
2148 CHECK(n_tokens == 2, EINVAL);
2150 f = metadata_field_parse(p, tokens[1]);
2153 instr->type = INSTR_TX;
2154 instr->io.io.offset = f->offset / 8;
2155 instr->io.io.n_bits = f->n_bits;
2160 emit_handler(struct thread *t)
2162 struct header_out_runtime *h0 = &t->headers_out[0];
2163 struct header_out_runtime *h1 = &t->headers_out[1];
2164 uint32_t offset = 0, i;
2166 /* No header change or header decapsulation. */
2167 if ((t->n_headers_out == 1) &&
2168 (h0->ptr + h0->n_bytes == t->ptr)) {
2169 TRACE("Emit handler: no header change or header decap.\n");
2171 t->pkt.offset -= h0->n_bytes;
2172 t->pkt.length += h0->n_bytes;
2177 /* Header encapsulation (optionally, with prior header decasulation). */
2178 if ((t->n_headers_out == 2) &&
2179 (h1->ptr + h1->n_bytes == t->ptr) &&
2180 (h0->ptr == h0->ptr0)) {
2183 TRACE("Emit handler: header encapsulation.\n");
2185 offset = h0->n_bytes + h1->n_bytes;
2186 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2187 t->pkt.offset -= offset;
2188 t->pkt.length += offset;
2193 /* Header insertion. */
2196 /* Header extraction. */
2199 /* For any other case. */
2200 TRACE("Emit handler: complex case.\n");
2202 for (i = 0; i < t->n_headers_out; i++) {
2203 struct header_out_runtime *h = &t->headers_out[i];
2205 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2206 offset += h->n_bytes;
2210 memcpy(t->ptr - offset, t->header_out_storage, offset);
2211 t->pkt.offset -= offset;
2212 t->pkt.length += offset;
2217 instr_tx_exec(struct rte_swx_pipeline *p);
2220 instr_tx_exec(struct rte_swx_pipeline *p)
2222 struct thread *t = &p->threads[p->thread_id];
2223 struct instruction *ip = t->ip;
2224 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2225 struct port_out_runtime *port = &p->out[port_id];
2226 struct rte_swx_pkt *pkt = &t->pkt;
2228 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2236 port->pkt_tx(port->obj, pkt);
2239 thread_ip_reset(p, t);
2247 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2248 struct action *action,
2251 struct instruction *instr,
2252 struct instruction_data *data __rte_unused)
2256 CHECK(!action, EINVAL);
2257 CHECK(n_tokens == 2, EINVAL);
2259 h = header_parse(p, tokens[1]);
2262 instr->type = INSTR_HDR_EXTRACT;
2263 instr->io.hdr.header_id[0] = h->id;
2264 instr->io.hdr.struct_id[0] = h->struct_id;
2265 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2270 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2273 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2275 struct thread *t = &p->threads[p->thread_id];
2276 struct instruction *ip = t->ip;
2277 uint64_t valid_headers = t->valid_headers;
2278 uint8_t *ptr = t->ptr;
2279 uint32_t offset = t->pkt.offset;
2280 uint32_t length = t->pkt.length;
2283 for (i = 0; i < n_extract; i++) {
2284 uint32_t header_id = ip->io.hdr.header_id[i];
2285 uint32_t struct_id = ip->io.hdr.struct_id[i];
2286 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2288 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2294 t->structs[struct_id] = ptr;
2295 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2304 t->valid_headers = valid_headers;
2307 t->pkt.offset = offset;
2308 t->pkt.length = length;
2313 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2315 __instr_hdr_extract_exec(p, 1);
2322 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2324 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2327 __instr_hdr_extract_exec(p, 2);
2334 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2336 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2339 __instr_hdr_extract_exec(p, 3);
2346 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2348 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2351 __instr_hdr_extract_exec(p, 4);
2358 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2360 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2363 __instr_hdr_extract_exec(p, 5);
2370 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2372 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2375 __instr_hdr_extract_exec(p, 6);
2382 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2384 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2387 __instr_hdr_extract_exec(p, 7);
2394 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2396 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2399 __instr_hdr_extract_exec(p, 8);
2409 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2410 struct action *action __rte_unused,
2413 struct instruction *instr,
2414 struct instruction_data *data __rte_unused)
2418 CHECK(n_tokens == 2, EINVAL);
2420 h = header_parse(p, tokens[1]);
2423 instr->type = INSTR_HDR_EMIT;
2424 instr->io.hdr.header_id[0] = h->id;
2425 instr->io.hdr.struct_id[0] = h->struct_id;
2426 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2431 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2434 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2436 struct thread *t = &p->threads[p->thread_id];
2437 struct instruction *ip = t->ip;
2438 uint32_t n_headers_out = t->n_headers_out;
2439 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2440 uint8_t *ho_ptr = NULL;
2441 uint32_t ho_nbytes = 0, i;
2443 for (i = 0; i < n_emit; i++) {
2444 uint32_t header_id = ip->io.hdr.header_id[i];
2445 uint32_t struct_id = ip->io.hdr.struct_id[i];
2446 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2448 struct header_runtime *hi = &t->headers[header_id];
2449 uint8_t *hi_ptr = t->structs[struct_id];
2451 TRACE("[Thread %2u]: emit header %u\n",
2457 if (!t->n_headers_out) {
2458 ho = &t->headers_out[0];
2460 ho->ptr0 = hi->ptr0;
2464 ho_nbytes = n_bytes;
2471 ho_nbytes = ho->n_bytes;
2475 if (ho_ptr + ho_nbytes == hi_ptr) {
2476 ho_nbytes += n_bytes;
2478 ho->n_bytes = ho_nbytes;
2481 ho->ptr0 = hi->ptr0;
2485 ho_nbytes = n_bytes;
2491 ho->n_bytes = ho_nbytes;
2492 t->n_headers_out = n_headers_out;
2496 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2498 __instr_hdr_emit_exec(p, 1);
2505 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2507 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2510 __instr_hdr_emit_exec(p, 1);
2515 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2517 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2520 __instr_hdr_emit_exec(p, 2);
2525 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2527 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2530 __instr_hdr_emit_exec(p, 3);
2535 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2537 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2540 __instr_hdr_emit_exec(p, 4);
2545 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2547 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2550 __instr_hdr_emit_exec(p, 5);
2555 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2557 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2560 __instr_hdr_emit_exec(p, 6);
2565 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2567 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2570 __instr_hdr_emit_exec(p, 7);
2575 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2577 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2580 __instr_hdr_emit_exec(p, 8);
2588 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2589 struct action *action __rte_unused,
2592 struct instruction *instr,
2593 struct instruction_data *data __rte_unused)
2597 CHECK(n_tokens == 2, EINVAL);
2599 h = header_parse(p, tokens[1]);
2602 instr->type = INSTR_HDR_VALIDATE;
2603 instr->valid.header_id = h->id;
2608 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2610 struct thread *t = &p->threads[p->thread_id];
2611 struct instruction *ip = t->ip;
2612 uint32_t header_id = ip->valid.header_id;
2614 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2617 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2627 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2628 struct action *action __rte_unused,
2631 struct instruction *instr,
2632 struct instruction_data *data __rte_unused)
2636 CHECK(n_tokens == 2, EINVAL);
2638 h = header_parse(p, tokens[1]);
2641 instr->type = INSTR_HDR_INVALIDATE;
2642 instr->valid.header_id = h->id;
2647 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2649 struct thread *t = &p->threads[p->thread_id];
2650 struct instruction *ip = t->ip;
2651 uint32_t header_id = ip->valid.header_id;
2653 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2656 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2666 instr_mov_translate(struct rte_swx_pipeline *p,
2667 struct action *action,
2670 struct instruction *instr,
2671 struct instruction_data *data __rte_unused)
2673 char *dst = tokens[1], *src = tokens[2];
2674 struct field *fdst, *fsrc;
2675 uint32_t dst_struct_id, src_struct_id, src_val;
2677 CHECK(n_tokens == 3, EINVAL);
2679 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2680 CHECK(fdst, EINVAL);
2683 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2685 instr->type = INSTR_MOV;
2686 if ((dst[0] == 'h' && src[0] != 'h') ||
2687 (dst[0] != 'h' && src[0] == 'h'))
2688 instr->type = INSTR_MOV_S;
2690 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2691 instr->mov.dst.n_bits = fdst->n_bits;
2692 instr->mov.dst.offset = fdst->offset / 8;
2693 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2694 instr->mov.src.n_bits = fsrc->n_bits;
2695 instr->mov.src.offset = fsrc->offset / 8;
2700 src_val = strtoul(src, &src, 0);
2701 CHECK(!src[0], EINVAL);
2704 src_val = htonl(src_val);
2706 instr->type = INSTR_MOV_I;
2707 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2708 instr->mov.dst.n_bits = fdst->n_bits;
2709 instr->mov.dst.offset = fdst->offset / 8;
2710 instr->mov.src_val = (uint32_t)src_val;
2715 instr_mov_exec(struct rte_swx_pipeline *p)
2717 struct thread *t = &p->threads[p->thread_id];
2718 struct instruction *ip = t->ip;
2720 TRACE("[Thread %2u] mov\n",
2730 instr_mov_s_exec(struct rte_swx_pipeline *p)
2732 struct thread *t = &p->threads[p->thread_id];
2733 struct instruction *ip = t->ip;
2735 TRACE("[Thread %2u] mov (s)\n",
2745 instr_mov_i_exec(struct rte_swx_pipeline *p)
2747 struct thread *t = &p->threads[p->thread_id];
2748 struct instruction *ip = t->ip;
2750 TRACE("[Thread %2u] mov m.f %x\n",
2764 instr_dma_translate(struct rte_swx_pipeline *p,
2765 struct action *action,
2768 struct instruction *instr,
2769 struct instruction_data *data __rte_unused)
2771 char *dst = tokens[1];
2772 char *src = tokens[2];
2776 CHECK(action, EINVAL);
2777 CHECK(n_tokens == 3, EINVAL);
2779 h = header_parse(p, dst);
2782 tf = action_field_parse(action, src);
2785 instr->type = INSTR_DMA_HT;
2786 instr->dma.dst.header_id[0] = h->id;
2787 instr->dma.dst.struct_id[0] = h->struct_id;
2788 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2789 instr->dma.src.offset[0] = tf->offset / 8;
2795 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2798 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2800 struct thread *t = &p->threads[p->thread_id];
2801 struct instruction *ip = t->ip;
2802 uint8_t *action_data = t->structs[0];
2803 uint64_t valid_headers = t->valid_headers;
2806 for (i = 0; i < n_dma; i++) {
2807 uint32_t header_id = ip->dma.dst.header_id[i];
2808 uint32_t struct_id = ip->dma.dst.struct_id[i];
2809 uint32_t offset = ip->dma.src.offset[i];
2810 uint32_t n_bytes = ip->dma.n_bytes[i];
2812 struct header_runtime *h = &t->headers[header_id];
2813 uint8_t *h_ptr0 = h->ptr0;
2814 uint8_t *h_ptr = t->structs[struct_id];
2816 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2818 void *src = &action_data[offset];
2820 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2823 memcpy(dst, src, n_bytes);
2824 t->structs[struct_id] = dst;
2825 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2828 t->valid_headers = valid_headers;
2832 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2834 __instr_dma_ht_exec(p, 1);
2841 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2843 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2846 __instr_dma_ht_exec(p, 2);
2853 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2855 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2858 __instr_dma_ht_exec(p, 3);
2865 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2867 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2870 __instr_dma_ht_exec(p, 4);
2877 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2879 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2882 __instr_dma_ht_exec(p, 5);
2889 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2891 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2894 __instr_dma_ht_exec(p, 6);
2901 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2903 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2906 __instr_dma_ht_exec(p, 7);
2913 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2915 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2918 __instr_dma_ht_exec(p, 8);
2928 instr_alu_add_translate(struct rte_swx_pipeline *p,
2929 struct action *action,
2932 struct instruction *instr,
2933 struct instruction_data *data __rte_unused)
2935 char *dst = tokens[1], *src = tokens[2];
2936 struct field *fdst, *fsrc;
2937 uint32_t dst_struct_id, src_struct_id, src_val;
2939 CHECK(n_tokens == 3, EINVAL);
2941 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2942 CHECK(fdst, EINVAL);
2944 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2945 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2947 instr->type = INSTR_ALU_ADD;
2948 if (dst[0] == 'h' && src[0] == 'm')
2949 instr->type = INSTR_ALU_ADD_HM;
2950 if (dst[0] == 'm' && src[0] == 'h')
2951 instr->type = INSTR_ALU_ADD_MH;
2952 if (dst[0] == 'h' && src[0] == 'h')
2953 instr->type = INSTR_ALU_ADD_HH;
2955 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2956 instr->alu.dst.n_bits = fdst->n_bits;
2957 instr->alu.dst.offset = fdst->offset / 8;
2958 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2959 instr->alu.src.n_bits = fsrc->n_bits;
2960 instr->alu.src.offset = fsrc->offset / 8;
2964 /* ADD_MI, ADD_HI. */
2965 src_val = strtoul(src, &src, 0);
2966 CHECK(!src[0], EINVAL);
2968 instr->type = INSTR_ALU_ADD_MI;
2970 instr->type = INSTR_ALU_ADD_HI;
2972 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2973 instr->alu.dst.n_bits = fdst->n_bits;
2974 instr->alu.dst.offset = fdst->offset / 8;
2975 instr->alu.src_val = (uint32_t)src_val;
2980 instr_alu_sub_translate(struct rte_swx_pipeline *p,
2981 struct action *action,
2984 struct instruction *instr,
2985 struct instruction_data *data __rte_unused)
2987 char *dst = tokens[1], *src = tokens[2];
2988 struct field *fdst, *fsrc;
2989 uint32_t dst_struct_id, src_struct_id, src_val;
2991 CHECK(n_tokens == 3, EINVAL);
2993 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2994 CHECK(fdst, EINVAL);
2996 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
2997 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2999 instr->type = INSTR_ALU_SUB;
3000 if (dst[0] == 'h' && src[0] == 'm')
3001 instr->type = INSTR_ALU_SUB_HM;
3002 if (dst[0] == 'm' && src[0] == 'h')
3003 instr->type = INSTR_ALU_SUB_MH;
3004 if (dst[0] == 'h' && src[0] == 'h')
3005 instr->type = INSTR_ALU_SUB_HH;
3007 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3008 instr->alu.dst.n_bits = fdst->n_bits;
3009 instr->alu.dst.offset = fdst->offset / 8;
3010 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3011 instr->alu.src.n_bits = fsrc->n_bits;
3012 instr->alu.src.offset = fsrc->offset / 8;
3016 /* SUB_MI, SUB_HI. */
3017 src_val = strtoul(src, &src, 0);
3018 CHECK(!src[0], EINVAL);
3020 instr->type = INSTR_ALU_SUB_MI;
3022 instr->type = INSTR_ALU_SUB_HI;
3024 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3025 instr->alu.dst.n_bits = fdst->n_bits;
3026 instr->alu.dst.offset = fdst->offset / 8;
3027 instr->alu.src_val = (uint32_t)src_val;
3032 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3033 struct action *action __rte_unused,
3036 struct instruction *instr,
3037 struct instruction_data *data __rte_unused)
3039 char *dst = tokens[1], *src = tokens[2];
3040 struct header *hdst, *hsrc;
3041 struct field *fdst, *fsrc;
3043 CHECK(n_tokens == 3, EINVAL);
3045 fdst = header_field_parse(p, dst, &hdst);
3046 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3049 fsrc = header_field_parse(p, src, &hsrc);
3051 instr->type = INSTR_ALU_CKADD_FIELD;
3052 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3053 instr->alu.dst.n_bits = fdst->n_bits;
3054 instr->alu.dst.offset = fdst->offset / 8;
3055 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3056 instr->alu.src.n_bits = fsrc->n_bits;
3057 instr->alu.src.offset = fsrc->offset / 8;
3061 /* CKADD_STRUCT, CKADD_STRUCT20. */
3062 hsrc = header_parse(p, src);
3063 CHECK(hsrc, EINVAL);
3065 instr->type = INSTR_ALU_CKADD_STRUCT;
3066 if ((hsrc->st->n_bits / 8) == 20)
3067 instr->type = INSTR_ALU_CKADD_STRUCT20;
3069 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3070 instr->alu.dst.n_bits = fdst->n_bits;
3071 instr->alu.dst.offset = fdst->offset / 8;
3072 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3073 instr->alu.src.n_bits = hsrc->st->n_bits;
3074 instr->alu.src.offset = 0; /* Unused. */
3079 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3080 struct action *action __rte_unused,
3083 struct instruction *instr,
3084 struct instruction_data *data __rte_unused)
3086 char *dst = tokens[1], *src = tokens[2];
3087 struct header *hdst, *hsrc;
3088 struct field *fdst, *fsrc;
3090 CHECK(n_tokens == 3, EINVAL);
3092 fdst = header_field_parse(p, dst, &hdst);
3093 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3095 fsrc = header_field_parse(p, src, &hsrc);
3096 CHECK(fsrc, EINVAL);
3098 instr->type = INSTR_ALU_CKSUB_FIELD;
3099 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3100 instr->alu.dst.n_bits = fdst->n_bits;
3101 instr->alu.dst.offset = fdst->offset / 8;
3102 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3103 instr->alu.src.n_bits = fsrc->n_bits;
3104 instr->alu.src.offset = fsrc->offset / 8;
3109 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3110 struct action *action,
3113 struct instruction *instr,
3114 struct instruction_data *data __rte_unused)
3116 char *dst = tokens[1], *src = tokens[2];
3117 struct field *fdst, *fsrc;
3118 uint32_t dst_struct_id, src_struct_id, src_val;
3120 CHECK(n_tokens == 3, EINVAL);
3122 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3123 CHECK(fdst, EINVAL);
3125 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3126 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3128 instr->type = INSTR_ALU_SHL;
3129 if (dst[0] == 'h' && src[0] == 'm')
3130 instr->type = INSTR_ALU_SHL_HM;
3131 if (dst[0] == 'm' && src[0] == 'h')
3132 instr->type = INSTR_ALU_SHL_MH;
3133 if (dst[0] == 'h' && src[0] == 'h')
3134 instr->type = INSTR_ALU_SHL_HH;
3136 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3137 instr->alu.dst.n_bits = fdst->n_bits;
3138 instr->alu.dst.offset = fdst->offset / 8;
3139 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3140 instr->alu.src.n_bits = fsrc->n_bits;
3141 instr->alu.src.offset = fsrc->offset / 8;
3145 /* SHL_MI, SHL_HI. */
3146 src_val = strtoul(src, &src, 0);
3147 CHECK(!src[0], EINVAL);
3149 instr->type = INSTR_ALU_SHL_MI;
3151 instr->type = INSTR_ALU_SHL_HI;
3153 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3154 instr->alu.dst.n_bits = fdst->n_bits;
3155 instr->alu.dst.offset = fdst->offset / 8;
3156 instr->alu.src_val = (uint32_t)src_val;
3161 instr_alu_and_translate(struct rte_swx_pipeline *p,
3162 struct action *action,
3165 struct instruction *instr,
3166 struct instruction_data *data __rte_unused)
3168 char *dst = tokens[1], *src = tokens[2];
3169 struct field *fdst, *fsrc;
3170 uint32_t dst_struct_id, src_struct_id, src_val;
3172 CHECK(n_tokens == 3, EINVAL);
3174 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3175 CHECK(fdst, EINVAL);
3178 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3180 instr->type = INSTR_ALU_AND;
3181 if ((dst[0] == 'h' && src[0] != 'h') ||
3182 (dst[0] != 'h' && src[0] == 'h'))
3183 instr->type = INSTR_ALU_AND_S;
3185 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3186 instr->alu.dst.n_bits = fdst->n_bits;
3187 instr->alu.dst.offset = fdst->offset / 8;
3188 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3189 instr->alu.src.n_bits = fsrc->n_bits;
3190 instr->alu.src.offset = fsrc->offset / 8;
3195 src_val = strtoul(src, &src, 0);
3196 CHECK(!src[0], EINVAL);
3199 src_val = htonl(src_val);
3201 instr->type = INSTR_ALU_AND_I;
3202 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3203 instr->alu.dst.n_bits = fdst->n_bits;
3204 instr->alu.dst.offset = fdst->offset / 8;
3205 instr->alu.src_val = (uint32_t)src_val;
3210 instr_alu_or_translate(struct rte_swx_pipeline *p,
3211 struct action *action,
3214 struct instruction *instr,
3215 struct instruction_data *data __rte_unused)
3217 char *dst = tokens[1], *src = tokens[2];
3218 struct field *fdst, *fsrc;
3219 uint32_t dst_struct_id, src_struct_id, src_val;
3221 CHECK(n_tokens == 3, EINVAL);
3223 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3224 CHECK(fdst, EINVAL);
3227 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3229 instr->type = INSTR_ALU_OR;
3230 if ((dst[0] == 'h' && src[0] != 'h') ||
3231 (dst[0] != 'h' && src[0] == 'h'))
3232 instr->type = INSTR_ALU_OR_S;
3234 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3235 instr->alu.dst.n_bits = fdst->n_bits;
3236 instr->alu.dst.offset = fdst->offset / 8;
3237 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3238 instr->alu.src.n_bits = fsrc->n_bits;
3239 instr->alu.src.offset = fsrc->offset / 8;
3244 src_val = strtoul(src, &src, 0);
3245 CHECK(!src[0], EINVAL);
3248 src_val = htonl(src_val);
3250 instr->type = INSTR_ALU_OR_I;
3251 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3252 instr->alu.dst.n_bits = fdst->n_bits;
3253 instr->alu.dst.offset = fdst->offset / 8;
3254 instr->alu.src_val = (uint32_t)src_val;
3259 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3260 struct action *action,
3263 struct instruction *instr,
3264 struct instruction_data *data __rte_unused)
3266 char *dst = tokens[1], *src = tokens[2];
3267 struct field *fdst, *fsrc;
3268 uint32_t dst_struct_id, src_struct_id, src_val;
3270 CHECK(n_tokens == 3, EINVAL);
3272 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3273 CHECK(fdst, EINVAL);
3276 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3278 instr->type = INSTR_ALU_XOR;
3279 if ((dst[0] == 'h' && src[0] != 'h') ||
3280 (dst[0] != 'h' && src[0] == 'h'))
3281 instr->type = INSTR_ALU_XOR_S;
3283 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3284 instr->alu.dst.n_bits = fdst->n_bits;
3285 instr->alu.dst.offset = fdst->offset / 8;
3286 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3287 instr->alu.src.n_bits = fsrc->n_bits;
3288 instr->alu.src.offset = fsrc->offset / 8;
3293 src_val = strtoul(src, &src, 0);
3294 CHECK(!src[0], EINVAL);
3297 src_val = htonl(src_val);
3299 instr->type = INSTR_ALU_XOR_I;
3300 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3301 instr->alu.dst.n_bits = fdst->n_bits;
3302 instr->alu.dst.offset = fdst->offset / 8;
3303 instr->alu.src_val = (uint32_t)src_val;
3308 instr_alu_add_exec(struct rte_swx_pipeline *p)
3310 struct thread *t = &p->threads[p->thread_id];
3311 struct instruction *ip = t->ip;
3313 TRACE("[Thread %2u] add\n", p->thread_id);
3323 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3325 struct thread *t = &p->threads[p->thread_id];
3326 struct instruction *ip = t->ip;
3328 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3338 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3340 struct thread *t = &p->threads[p->thread_id];
3341 struct instruction *ip = t->ip;
3343 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3353 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3355 struct thread *t = &p->threads[p->thread_id];
3356 struct instruction *ip = t->ip;
3358 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3368 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3370 struct thread *t = &p->threads[p->thread_id];
3371 struct instruction *ip = t->ip;
3373 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3383 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3385 struct thread *t = &p->threads[p->thread_id];
3386 struct instruction *ip = t->ip;
3388 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3398 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3400 struct thread *t = &p->threads[p->thread_id];
3401 struct instruction *ip = t->ip;
3403 TRACE("[Thread %2u] sub\n", p->thread_id);
3413 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3415 struct thread *t = &p->threads[p->thread_id];
3416 struct instruction *ip = t->ip;
3418 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3428 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3430 struct thread *t = &p->threads[p->thread_id];
3431 struct instruction *ip = t->ip;
3433 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3443 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3445 struct thread *t = &p->threads[p->thread_id];
3446 struct instruction *ip = t->ip;
3448 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3458 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3460 struct thread *t = &p->threads[p->thread_id];
3461 struct instruction *ip = t->ip;
3463 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3473 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3475 struct thread *t = &p->threads[p->thread_id];
3476 struct instruction *ip = t->ip;
3478 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3488 instr_alu_shl_exec(struct rte_swx_pipeline *p)
3490 struct thread *t = &p->threads[p->thread_id];
3491 struct instruction *ip = t->ip;
3493 TRACE("[Thread %2u] shl\n", p->thread_id);
3503 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
3505 struct thread *t = &p->threads[p->thread_id];
3506 struct instruction *ip = t->ip;
3508 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
3518 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
3520 struct thread *t = &p->threads[p->thread_id];
3521 struct instruction *ip = t->ip;
3523 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
3533 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
3535 struct thread *t = &p->threads[p->thread_id];
3536 struct instruction *ip = t->ip;
3538 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
3548 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
3550 struct thread *t = &p->threads[p->thread_id];
3551 struct instruction *ip = t->ip;
3553 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
3563 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
3565 struct thread *t = &p->threads[p->thread_id];
3566 struct instruction *ip = t->ip;
3568 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
3578 instr_alu_and_exec(struct rte_swx_pipeline *p)
3580 struct thread *t = &p->threads[p->thread_id];
3581 struct instruction *ip = t->ip;
3583 TRACE("[Thread %2u] and\n", p->thread_id);
3593 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
3595 struct thread *t = &p->threads[p->thread_id];
3596 struct instruction *ip = t->ip;
3598 TRACE("[Thread %2u] and (s)\n", p->thread_id);
3608 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
3610 struct thread *t = &p->threads[p->thread_id];
3611 struct instruction *ip = t->ip;
3613 TRACE("[Thread %2u] and (i)\n", p->thread_id);
3623 instr_alu_or_exec(struct rte_swx_pipeline *p)
3625 struct thread *t = &p->threads[p->thread_id];
3626 struct instruction *ip = t->ip;
3628 TRACE("[Thread %2u] or\n", p->thread_id);
3638 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
3640 struct thread *t = &p->threads[p->thread_id];
3641 struct instruction *ip = t->ip;
3643 TRACE("[Thread %2u] or (s)\n", p->thread_id);
3653 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
3655 struct thread *t = &p->threads[p->thread_id];
3656 struct instruction *ip = t->ip;
3658 TRACE("[Thread %2u] or (i)\n", p->thread_id);
3668 instr_alu_xor_exec(struct rte_swx_pipeline *p)
3670 struct thread *t = &p->threads[p->thread_id];
3671 struct instruction *ip = t->ip;
3673 TRACE("[Thread %2u] xor\n", p->thread_id);
3683 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
3685 struct thread *t = &p->threads[p->thread_id];
3686 struct instruction *ip = t->ip;
3688 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
3698 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
3700 struct thread *t = &p->threads[p->thread_id];
3701 struct instruction *ip = t->ip;
3703 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
3713 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
3715 struct thread *t = &p->threads[p->thread_id];
3716 struct instruction *ip = t->ip;
3717 uint8_t *dst_struct, *src_struct;
3718 uint16_t *dst16_ptr, dst;
3719 uint64_t *src64_ptr, src64, src64_mask, src;
3722 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3725 dst_struct = t->structs[ip->alu.dst.struct_id];
3726 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3729 src_struct = t->structs[ip->alu.src.struct_id];
3730 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3732 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3733 src = src64 & src64_mask;
3738 /* The first input (r) is a 16-bit number. The second and the third
3739 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3740 * three numbers (output r) is a 34-bit number.
3742 r += (src >> 32) + (src & 0xFFFFFFFF);
3744 /* The first input is a 16-bit number. The second input is an 18-bit
3745 * number. In the worst case scenario, the sum of the two numbers is a
3748 r = (r & 0xFFFF) + (r >> 16);
3750 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3751 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3753 r = (r & 0xFFFF) + (r >> 16);
3755 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3756 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3757 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
3758 * therefore the output r is always a 16-bit number.
3760 r = (r & 0xFFFF) + (r >> 16);
3765 *dst16_ptr = (uint16_t)r;
3772 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
3774 struct thread *t = &p->threads[p->thread_id];
3775 struct instruction *ip = t->ip;
3776 uint8_t *dst_struct, *src_struct;
3777 uint16_t *dst16_ptr, dst;
3778 uint64_t *src64_ptr, src64, src64_mask, src;
3781 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
3784 dst_struct = t->structs[ip->alu.dst.struct_id];
3785 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3788 src_struct = t->structs[ip->alu.src.struct_id];
3789 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3791 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3792 src = src64 & src64_mask;
3797 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
3798 * the following sequence of operations in 2's complement arithmetic:
3799 * a '- b = (a - b) % 0xFFFF.
3801 * In order to prevent an underflow for the below subtraction, in which
3802 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
3803 * minuend), we first add a multiple of the 0xFFFF modulus to the
3804 * minuend. The number we add to the minuend needs to be a 34-bit number
3805 * or higher, so for readability reasons we picked the 36-bit multiple.
3806 * We are effectively turning the 16-bit minuend into a 36-bit number:
3807 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
3809 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
3811 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
3812 * result (the output r) is a 36-bit number.
3814 r -= (src >> 32) + (src & 0xFFFFFFFF);
3816 /* The first input is a 16-bit number. The second input is a 20-bit
3817 * number. Their sum is a 21-bit number.
3819 r = (r & 0xFFFF) + (r >> 16);
3821 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3822 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
3824 r = (r & 0xFFFF) + (r >> 16);
3826 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3827 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3828 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3829 * generated, therefore the output r is always a 16-bit number.
3831 r = (r & 0xFFFF) + (r >> 16);
3836 *dst16_ptr = (uint16_t)r;
3843 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
3845 struct thread *t = &p->threads[p->thread_id];
3846 struct instruction *ip = t->ip;
3847 uint8_t *dst_struct, *src_struct;
3848 uint16_t *dst16_ptr;
3849 uint32_t *src32_ptr;
3852 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3855 dst_struct = t->structs[ip->alu.dst.struct_id];
3856 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3858 src_struct = t->structs[ip->alu.src.struct_id];
3859 src32_ptr = (uint32_t *)&src_struct[0];
3861 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
3862 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
3863 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
3864 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
3865 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
3867 /* The first input is a 16-bit number. The second input is a 19-bit
3868 * number. Their sum is a 20-bit number.
3870 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3872 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3873 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
3875 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3877 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3878 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3879 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
3880 * generated, therefore the output r is always a 16-bit number.
3882 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3885 r0 = r0 ? r0 : 0xFFFF;
3887 *dst16_ptr = (uint16_t)r0;
3894 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
3896 struct thread *t = &p->threads[p->thread_id];
3897 struct instruction *ip = t->ip;
3898 uint8_t *dst_struct, *src_struct;
3899 uint16_t *dst16_ptr;
3900 uint32_t *src32_ptr;
3904 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
3907 dst_struct = t->structs[ip->alu.dst.struct_id];
3908 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3910 src_struct = t->structs[ip->alu.src.struct_id];
3911 src32_ptr = (uint32_t *)&src_struct[0];
3913 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
3914 * Therefore, in the worst case scenario, a 35-bit number is added to a
3915 * 16-bit number (the input r), so the output r is 36-bit number.
3917 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
3920 /* The first input is a 16-bit number. The second input is a 20-bit
3921 * number. Their sum is a 21-bit number.
3923 r = (r & 0xFFFF) + (r >> 16);
3925 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3926 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
3928 r = (r & 0xFFFF) + (r >> 16);
3930 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3931 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3932 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3933 * generated, therefore the output r is always a 16-bit number.
3935 r = (r & 0xFFFF) + (r >> 16);
3940 *dst16_ptr = (uint16_t)r;
3946 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
3949 instr_translate(struct rte_swx_pipeline *p,
3950 struct action *action,
3952 struct instruction *instr,
3953 struct instruction_data *data)
3955 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
3956 int n_tokens = 0, tpos = 0;
3958 /* Parse the instruction string into tokens. */
3962 token = strtok_r(string, " \t\v", &string);
3966 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
3968 tokens[n_tokens] = token;
3972 CHECK(n_tokens, EINVAL);
3974 /* Handle the optional instruction label. */
3975 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
3976 strcpy(data->label, tokens[0]);
3979 CHECK(n_tokens - tpos, EINVAL);
3982 /* Identify the instruction type. */
3983 if (!strcmp(tokens[tpos], "rx"))
3984 return instr_rx_translate(p,
3991 if (!strcmp(tokens[tpos], "tx"))
3992 return instr_tx_translate(p,
3999 if (!strcmp(tokens[tpos], "extract"))
4000 return instr_hdr_extract_translate(p,
4007 if (!strcmp(tokens[tpos], "emit"))
4008 return instr_hdr_emit_translate(p,
4015 if (!strcmp(tokens[tpos], "validate"))
4016 return instr_hdr_validate_translate(p,
4023 if (!strcmp(tokens[tpos], "invalidate"))
4024 return instr_hdr_invalidate_translate(p,
4031 if (!strcmp(tokens[tpos], "mov"))
4032 return instr_mov_translate(p,
4039 if (!strcmp(tokens[tpos], "dma"))
4040 return instr_dma_translate(p,
4047 if (!strcmp(tokens[tpos], "add"))
4048 return instr_alu_add_translate(p,
4055 if (!strcmp(tokens[tpos], "sub"))
4056 return instr_alu_sub_translate(p,
4063 if (!strcmp(tokens[tpos], "ckadd"))
4064 return instr_alu_ckadd_translate(p,
4071 if (!strcmp(tokens[tpos], "cksub"))
4072 return instr_alu_cksub_translate(p,
4079 if (!strcmp(tokens[tpos], "and"))
4080 return instr_alu_and_translate(p,
4087 if (!strcmp(tokens[tpos], "or"))
4088 return instr_alu_or_translate(p,
4095 if (!strcmp(tokens[tpos], "xor"))
4096 return instr_alu_xor_translate(p,
4103 if (!strcmp(tokens[tpos], "shl"))
4104 return instr_alu_shl_translate(p,
4115 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
4117 uint32_t count = 0, i;
4122 for (i = 0; i < n; i++)
4123 if (!strcmp(label, data[i].jmp_label))
4130 instr_label_check(struct instruction_data *instruction_data,
4131 uint32_t n_instructions)
4135 /* Check that all instruction labels are unique. */
4136 for (i = 0; i < n_instructions; i++) {
4137 struct instruction_data *data = &instruction_data[i];
4138 char *label = data->label;
4144 for (j = i + 1; j < n_instructions; j++)
4145 CHECK(strcmp(label, data[j].label), EINVAL);
4148 /* Get users for each instruction label. */
4149 for (i = 0; i < n_instructions; i++) {
4150 struct instruction_data *data = &instruction_data[i];
4151 char *label = data->label;
4153 data->n_users = label_is_used(instruction_data,
4162 instruction_config(struct rte_swx_pipeline *p,
4164 const char **instructions,
4165 uint32_t n_instructions)
4167 struct instruction *instr = NULL;
4168 struct instruction_data *data = NULL;
4169 char *string = NULL;
4173 CHECK(n_instructions, EINVAL);
4174 CHECK(instructions, EINVAL);
4175 for (i = 0; i < n_instructions; i++)
4176 CHECK(instructions[i], EINVAL);
4178 /* Memory allocation. */
4179 instr = calloc(n_instructions, sizeof(struct instruction));
4185 data = calloc(n_instructions, sizeof(struct instruction_data));
4191 for (i = 0; i < n_instructions; i++) {
4192 string = strdup(instructions[i]);
4198 err = instr_translate(p, a, string, &instr[i], &data[i]);
4205 err = instr_label_check(data, n_instructions);
4212 a->instructions = instr;
4213 a->n_instructions = n_instructions;
4215 p->instructions = instr;
4216 p->n_instructions = n_instructions;
4228 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
4230 static instr_exec_t instruction_table[] = {
4231 [INSTR_RX] = instr_rx_exec,
4232 [INSTR_TX] = instr_tx_exec,
4234 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
4235 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
4236 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
4237 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
4238 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
4239 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
4240 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
4241 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
4243 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
4244 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
4245 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
4246 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
4247 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
4248 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
4249 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
4250 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
4251 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
4253 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
4254 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
4256 [INSTR_MOV] = instr_mov_exec,
4257 [INSTR_MOV_S] = instr_mov_s_exec,
4258 [INSTR_MOV_I] = instr_mov_i_exec,
4260 [INSTR_DMA_HT] = instr_dma_ht_exec,
4261 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
4262 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
4263 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
4264 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
4265 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
4266 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
4267 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
4269 [INSTR_ALU_ADD] = instr_alu_add_exec,
4270 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
4271 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
4272 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
4273 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
4274 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
4276 [INSTR_ALU_SUB] = instr_alu_sub_exec,
4277 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
4278 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
4279 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
4280 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
4281 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
4283 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
4284 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
4285 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
4286 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
4288 [INSTR_ALU_AND] = instr_alu_and_exec,
4289 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
4290 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
4292 [INSTR_ALU_OR] = instr_alu_or_exec,
4293 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
4294 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
4296 [INSTR_ALU_XOR] = instr_alu_xor_exec,
4297 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
4298 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
4300 [INSTR_ALU_SHL] = instr_alu_shl_exec,
4301 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
4302 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
4303 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
4304 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
4305 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
4309 instr_exec(struct rte_swx_pipeline *p)
4311 struct thread *t = &p->threads[p->thread_id];
4312 struct instruction *ip = t->ip;
4313 instr_exec_t instr = instruction_table[ip->type];
4321 static struct action *
4322 action_find(struct rte_swx_pipeline *p, const char *name)
4324 struct action *elem;
4329 TAILQ_FOREACH(elem, &p->actions, node)
4330 if (strcmp(elem->name, name) == 0)
4336 static struct field *
4337 action_field_find(struct action *a, const char *name)
4339 return a->st ? struct_type_field_find(a->st, name) : NULL;
4342 static struct field *
4343 action_field_parse(struct action *action, const char *name)
4345 if (name[0] != 't' || name[1] != '.')
4348 return action_field_find(action, &name[2]);
4352 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
4354 const char *args_struct_type_name,
4355 const char **instructions,
4356 uint32_t n_instructions)
4358 struct struct_type *args_struct_type;
4364 CHECK_NAME(name, EINVAL);
4365 CHECK(!action_find(p, name), EEXIST);
4367 if (args_struct_type_name) {
4368 CHECK_NAME(args_struct_type_name, EINVAL);
4369 args_struct_type = struct_type_find(p, args_struct_type_name);
4370 CHECK(args_struct_type, EINVAL);
4372 args_struct_type = NULL;
4375 /* Node allocation. */
4376 a = calloc(1, sizeof(struct action));
4379 /* Node initialization. */
4380 strcpy(a->name, name);
4381 a->st = args_struct_type;
4382 a->id = p->n_actions;
4384 /* Instruction translation. */
4385 err = instruction_config(p, a, instructions, n_instructions);
4391 /* Node add to tailq. */
4392 TAILQ_INSERT_TAIL(&p->actions, a, node);
4399 action_build(struct rte_swx_pipeline *p)
4401 struct action *action;
4403 p->action_instructions = calloc(p->n_actions,
4404 sizeof(struct instruction *));
4405 CHECK(p->action_instructions, ENOMEM);
4407 TAILQ_FOREACH(action, &p->actions, node)
4408 p->action_instructions[action->id] = action->instructions;
4414 action_build_free(struct rte_swx_pipeline *p)
4416 free(p->action_instructions);
4417 p->action_instructions = NULL;
4421 action_free(struct rte_swx_pipeline *p)
4423 action_build_free(p);
4426 struct action *action;
4428 action = TAILQ_FIRST(&p->actions);
4432 TAILQ_REMOVE(&p->actions, action, node);
4433 free(action->instructions);
4441 static struct table_type *
4442 table_type_find(struct rte_swx_pipeline *p, const char *name)
4444 struct table_type *elem;
4446 TAILQ_FOREACH(elem, &p->table_types, node)
4447 if (strcmp(elem->name, name) == 0)
4453 static struct table_type *
4454 table_type_resolve(struct rte_swx_pipeline *p,
4455 const char *recommended_type_name,
4456 enum rte_swx_table_match_type match_type)
4458 struct table_type *elem;
4460 /* Only consider the recommended type if the match type is correct. */
4461 if (recommended_type_name)
4462 TAILQ_FOREACH(elem, &p->table_types, node)
4463 if (!strcmp(elem->name, recommended_type_name) &&
4464 (elem->match_type == match_type))
4467 /* Ignore the recommended type and get the first element with this match
4470 TAILQ_FOREACH(elem, &p->table_types, node)
4471 if (elem->match_type == match_type)
4477 static struct table *
4478 table_find(struct rte_swx_pipeline *p, const char *name)
4482 TAILQ_FOREACH(elem, &p->tables, node)
4483 if (strcmp(elem->name, name) == 0)
4489 static struct table *
4490 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4492 struct table *table = NULL;
4494 TAILQ_FOREACH(table, &p->tables, node)
4495 if (table->id == id)
4502 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4504 enum rte_swx_table_match_type match_type,
4505 struct rte_swx_table_ops *ops)
4507 struct table_type *elem;
4511 CHECK_NAME(name, EINVAL);
4512 CHECK(!table_type_find(p, name), EEXIST);
4515 CHECK(ops->create, EINVAL);
4516 CHECK(ops->lkp, EINVAL);
4517 CHECK(ops->free, EINVAL);
4519 /* Node allocation. */
4520 elem = calloc(1, sizeof(struct table_type));
4521 CHECK(elem, ENOMEM);
4523 /* Node initialization. */
4524 strcpy(elem->name, name);
4525 elem->match_type = match_type;
4526 memcpy(&elem->ops, ops, sizeof(*ops));
4528 /* Node add to tailq. */
4529 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4534 static enum rte_swx_table_match_type
4535 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4540 for (i = 0; i < n_fields; i++)
4541 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4545 return RTE_SWX_TABLE_MATCH_EXACT;
4547 if ((i == n_fields - 1) &&
4548 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4549 return RTE_SWX_TABLE_MATCH_LPM;
4551 return RTE_SWX_TABLE_MATCH_WILDCARD;
4555 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
4557 struct rte_swx_pipeline_table_params *params,
4558 const char *recommended_table_type_name,
4562 struct table_type *type;
4564 struct action *default_action;
4565 struct header *header = NULL;
4567 uint32_t offset_prev = 0, action_data_size_max = 0, i;
4571 CHECK_NAME(name, EINVAL);
4572 CHECK(!table_find(p, name), EEXIST);
4574 CHECK(params, EINVAL);
4577 CHECK(!params->n_fields || params->fields, EINVAL);
4578 for (i = 0; i < params->n_fields; i++) {
4579 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4581 struct field *hf, *mf;
4584 CHECK_NAME(field->name, EINVAL);
4586 hf = header_field_parse(p, field->name, &h);
4587 mf = metadata_field_parse(p, field->name);
4588 CHECK(hf || mf, EINVAL);
4590 offset = hf ? hf->offset : mf->offset;
4593 is_header = hf ? 1 : 0;
4594 header = hf ? h : NULL;
4595 offset_prev = offset;
4600 CHECK((is_header && hf && (h->id == header->id)) ||
4601 (!is_header && mf), EINVAL);
4603 CHECK(offset > offset_prev, EINVAL);
4604 offset_prev = offset;
4607 /* Action checks. */
4608 CHECK(params->n_actions, EINVAL);
4609 CHECK(params->action_names, EINVAL);
4610 for (i = 0; i < params->n_actions; i++) {
4611 const char *action_name = params->action_names[i];
4613 uint32_t action_data_size;
4615 CHECK(action_name, EINVAL);
4617 a = action_find(p, action_name);
4620 action_data_size = a->st ? a->st->n_bits / 8 : 0;
4621 if (action_data_size > action_data_size_max)
4622 action_data_size_max = action_data_size;
4625 CHECK(params->default_action_name, EINVAL);
4626 for (i = 0; i < p->n_actions; i++)
4627 if (!strcmp(params->action_names[i],
4628 params->default_action_name))
4630 CHECK(i < params->n_actions, EINVAL);
4631 default_action = action_find(p, params->default_action_name);
4632 CHECK((default_action->st && params->default_action_data) ||
4633 !params->default_action_data, EINVAL);
4635 /* Table type checks. */
4636 if (params->n_fields) {
4637 enum rte_swx_table_match_type match_type;
4639 match_type = table_match_type_resolve(params->fields,
4641 type = table_type_resolve(p,
4642 recommended_table_type_name,
4644 CHECK(type, EINVAL);
4649 /* Memory allocation. */
4650 t = calloc(1, sizeof(struct table));
4653 t->fields = calloc(params->n_fields, sizeof(struct match_field));
4659 t->actions = calloc(params->n_actions, sizeof(struct action *));
4666 if (action_data_size_max) {
4667 t->default_action_data = calloc(1, action_data_size_max);
4668 if (!t->default_action_data) {
4676 /* Node initialization. */
4677 strcpy(t->name, name);
4678 if (args && args[0])
4679 strcpy(t->args, args);
4682 for (i = 0; i < params->n_fields; i++) {
4683 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4684 struct match_field *f = &t->fields[i];
4686 f->match_type = field->match_type;
4687 f->field = is_header ?
4688 header_field_parse(p, field->name, NULL) :
4689 metadata_field_parse(p, field->name);
4691 t->n_fields = params->n_fields;
4692 t->is_header = is_header;
4695 for (i = 0; i < params->n_actions; i++)
4696 t->actions[i] = action_find(p, params->action_names[i]);
4697 t->default_action = default_action;
4698 if (default_action->st)
4699 memcpy(t->default_action_data,
4700 params->default_action_data,
4701 default_action->st->n_bits / 8);
4702 t->n_actions = params->n_actions;
4703 t->default_action_is_const = params->default_action_is_const;
4704 t->action_data_size_max = action_data_size_max;
4707 t->id = p->n_tables;
4709 /* Node add to tailq. */
4710 TAILQ_INSERT_TAIL(&p->tables, t, node);
4716 static struct rte_swx_table_params *
4717 table_params_get(struct table *table)
4719 struct rte_swx_table_params *params;
4720 struct field *first, *last;
4722 uint32_t key_size, key_offset, action_data_size, i;
4724 /* Memory allocation. */
4725 params = calloc(1, sizeof(struct rte_swx_table_params));
4729 /* Key offset and size. */
4730 first = table->fields[0].field;
4731 last = table->fields[table->n_fields - 1].field;
4732 key_offset = first->offset / 8;
4733 key_size = (last->offset + last->n_bits - first->offset) / 8;
4735 /* Memory allocation. */
4736 key_mask = calloc(1, key_size);
4743 for (i = 0; i < table->n_fields; i++) {
4744 struct field *f = table->fields[i].field;
4745 uint32_t start = (f->offset - first->offset) / 8;
4746 size_t size = f->n_bits / 8;
4748 memset(&key_mask[start], 0xFF, size);
4751 /* Action data size. */
4752 action_data_size = 0;
4753 for (i = 0; i < table->n_actions; i++) {
4754 struct action *action = table->actions[i];
4755 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
4757 if (ads > action_data_size)
4758 action_data_size = ads;
4762 params->match_type = table->type->match_type;
4763 params->key_size = key_size;
4764 params->key_offset = key_offset;
4765 params->key_mask0 = key_mask;
4766 params->action_data_size = action_data_size;
4767 params->n_keys_max = table->size;
4773 table_params_free(struct rte_swx_table_params *params)
4778 free(params->key_mask0);
4783 table_state_build(struct rte_swx_pipeline *p)
4785 struct table *table;
4787 p->table_state = calloc(p->n_tables,
4788 sizeof(struct rte_swx_table_state));
4789 CHECK(p->table_state, ENOMEM);
4791 TAILQ_FOREACH(table, &p->tables, node) {
4792 struct rte_swx_table_state *ts = &p->table_state[table->id];
4795 struct rte_swx_table_params *params;
4798 params = table_params_get(table);
4799 CHECK(params, ENOMEM);
4801 ts->obj = table->type->ops.create(params,
4806 table_params_free(params);
4807 CHECK(ts->obj, ENODEV);
4810 /* ts->default_action_data. */
4811 if (table->action_data_size_max) {
4812 ts->default_action_data =
4813 malloc(table->action_data_size_max);
4814 CHECK(ts->default_action_data, ENOMEM);
4816 memcpy(ts->default_action_data,
4817 table->default_action_data,
4818 table->action_data_size_max);
4821 /* ts->default_action_id. */
4822 ts->default_action_id = table->default_action->id;
4829 table_state_build_free(struct rte_swx_pipeline *p)
4833 if (!p->table_state)
4836 for (i = 0; i < p->n_tables; i++) {
4837 struct rte_swx_table_state *ts = &p->table_state[i];
4838 struct table *table = table_find_by_id(p, i);
4841 if (table->type && ts->obj)
4842 table->type->ops.free(ts->obj);
4844 /* ts->default_action_data. */
4845 free(ts->default_action_data);
4848 free(p->table_state);
4849 p->table_state = NULL;
4853 table_state_free(struct rte_swx_pipeline *p)
4855 table_state_build_free(p);
4859 table_stub_lkp(void *table __rte_unused,
4860 void *mailbox __rte_unused,
4861 uint8_t **key __rte_unused,
4862 uint64_t *action_id __rte_unused,
4863 uint8_t **action_data __rte_unused,
4867 return 1; /* DONE. */
4871 table_build(struct rte_swx_pipeline *p)
4875 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4876 struct thread *t = &p->threads[i];
4877 struct table *table;
4879 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
4880 CHECK(t->tables, ENOMEM);
4882 TAILQ_FOREACH(table, &p->tables, node) {
4883 struct table_runtime *r = &t->tables[table->id];
4888 size = table->type->ops.mailbox_size_get();
4891 r->func = table->type->ops.lkp;
4895 r->mailbox = calloc(1, size);
4896 CHECK(r->mailbox, ENOMEM);
4900 r->key = table->is_header ?
4901 &t->structs[table->header->struct_id] :
4902 &t->structs[p->metadata_struct_id];
4904 r->func = table_stub_lkp;
4913 table_build_free(struct rte_swx_pipeline *p)
4917 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4918 struct thread *t = &p->threads[i];
4924 for (j = 0; j < p->n_tables; j++) {
4925 struct table_runtime *r = &t->tables[j];
4936 table_free(struct rte_swx_pipeline *p)
4938 table_build_free(p);
4944 elem = TAILQ_FIRST(&p->tables);
4948 TAILQ_REMOVE(&p->tables, elem, node);
4950 free(elem->actions);
4951 free(elem->default_action_data);
4957 struct table_type *elem;
4959 elem = TAILQ_FIRST(&p->table_types);
4963 TAILQ_REMOVE(&p->table_types, elem, node);
4972 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
4974 struct rte_swx_pipeline *pipeline;
4976 /* Check input parameters. */
4979 /* Memory allocation. */
4980 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
4981 CHECK(pipeline, ENOMEM);
4983 /* Initialization. */
4984 TAILQ_INIT(&pipeline->struct_types);
4985 TAILQ_INIT(&pipeline->port_in_types);
4986 TAILQ_INIT(&pipeline->ports_in);
4987 TAILQ_INIT(&pipeline->port_out_types);
4988 TAILQ_INIT(&pipeline->ports_out);
4989 TAILQ_INIT(&pipeline->extern_types);
4990 TAILQ_INIT(&pipeline->extern_objs);
4991 TAILQ_INIT(&pipeline->extern_funcs);
4992 TAILQ_INIT(&pipeline->headers);
4993 TAILQ_INIT(&pipeline->actions);
4994 TAILQ_INIT(&pipeline->table_types);
4995 TAILQ_INIT(&pipeline->tables);
4997 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
4998 pipeline->numa_node = numa_node;
5005 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
5010 free(p->instructions);
5012 table_state_free(p);
5017 extern_func_free(p);
5027 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
5028 const char **instructions,
5029 uint32_t n_instructions)
5034 err = instruction_config(p, NULL, instructions, n_instructions);
5038 /* Thread instruction pointer reset. */
5039 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5040 struct thread *t = &p->threads[i];
5042 thread_ip_reset(p, t);
5049 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
5054 CHECK(p->build_done == 0, EEXIST);
5056 status = port_in_build(p);
5060 status = port_out_build(p);
5064 status = struct_build(p);
5068 status = extern_obj_build(p);
5072 status = extern_func_build(p);
5076 status = header_build(p);
5080 status = metadata_build(p);
5084 status = action_build(p);
5088 status = table_build(p);
5092 status = table_state_build(p);
5100 table_state_build_free(p);
5101 table_build_free(p);
5102 action_build_free(p);
5103 metadata_build_free(p);
5104 header_build_free(p);
5105 extern_func_build_free(p);
5106 extern_obj_build_free(p);
5107 port_out_build_free(p);
5108 port_in_build_free(p);
5109 struct_build_free(p);
5115 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
5119 for (i = 0; i < n_instructions; i++)
5127 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
5128 struct rte_swx_table_state **table_state)
5130 if (!p || !table_state || !p->build_done)
5133 *table_state = p->table_state;
5138 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
5139 struct rte_swx_table_state *table_state)
5141 if (!p || !table_state || !p->build_done)
5144 p->table_state = table_state;