1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
328 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
329 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
332 struct instr_operand {
347 uint8_t header_id[8];
348 uint8_t struct_id[8];
353 struct instr_hdr_validity {
357 struct instr_dst_src {
358 struct instr_operand dst;
360 struct instr_operand src;
367 uint8_t header_id[8];
368 uint8_t struct_id[8];
379 enum instruction_type type;
382 struct instr_hdr_validity valid;
383 struct instr_dst_src mov;
384 struct instr_dma dma;
385 struct instr_dst_src alu;
389 struct instruction_data {
390 char label[RTE_SWX_NAME_SIZE];
391 char jmp_label[RTE_SWX_NAME_SIZE];
392 uint32_t n_users; /* user = jmp instruction to this instruction. */
400 TAILQ_ENTRY(action) node;
401 char name[RTE_SWX_NAME_SIZE];
402 struct struct_type *st;
403 struct instruction *instructions;
404 uint32_t n_instructions;
408 TAILQ_HEAD(action_tailq, action);
414 TAILQ_ENTRY(table_type) node;
415 char name[RTE_SWX_NAME_SIZE];
416 enum rte_swx_table_match_type match_type;
417 struct rte_swx_table_ops ops;
420 TAILQ_HEAD(table_type_tailq, table_type);
423 enum rte_swx_table_match_type match_type;
428 TAILQ_ENTRY(table) node;
429 char name[RTE_SWX_NAME_SIZE];
430 char args[RTE_SWX_NAME_SIZE];
431 struct table_type *type; /* NULL when n_fields == 0. */
434 struct match_field *fields;
436 int is_header; /* Only valid when n_fields > 0. */
437 struct header *header; /* Only valid when n_fields > 0. */
440 struct action **actions;
441 struct action *default_action;
442 uint8_t *default_action_data;
444 int default_action_is_const;
445 uint32_t action_data_size_max;
451 TAILQ_HEAD(table_tailq, table);
453 struct table_runtime {
454 rte_swx_table_lookup_t func;
464 struct rte_swx_pkt pkt;
470 /* Packet headers. */
471 struct header_runtime *headers; /* Extracted or generated headers. */
472 struct header_out_runtime *headers_out; /* Emitted headers. */
473 uint8_t *header_storage;
474 uint8_t *header_out_storage;
475 uint64_t valid_headers;
476 uint32_t n_headers_out;
478 /* Packet meta-data. */
482 struct table_runtime *tables;
483 struct rte_swx_table_state *table_state;
485 int hit; /* 0 = Miss, 1 = Hit. */
487 /* Extern objects and functions. */
488 struct extern_obj_runtime *extern_objs;
489 struct extern_func_runtime *extern_funcs;
492 struct instruction *ip;
493 struct instruction *ret;
496 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
497 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
498 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
500 #define ALU(thread, ip, operator) \
502 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
503 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
504 uint64_t dst64 = *dst64_ptr; \
505 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
506 uint64_t dst = dst64 & dst64_mask; \
508 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
509 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
510 uint64_t src64 = *src64_ptr; \
511 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
512 uint64_t src = src64 & src64_mask; \
514 uint64_t result = dst operator src; \
516 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
519 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
521 #define ALU_S(thread, ip, operator) \
523 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
524 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
525 uint64_t dst64 = *dst64_ptr; \
526 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
527 uint64_t dst = dst64 & dst64_mask; \
529 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
530 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
531 uint64_t src64 = *src64_ptr; \
532 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
534 uint64_t result = dst operator src; \
536 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
541 #define ALU_HM(thread, ip, operator) \
543 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
544 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
545 uint64_t dst64 = *dst64_ptr; \
546 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
547 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
549 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
550 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
551 uint64_t src64 = *src64_ptr; \
552 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
553 uint64_t src = src64 & src64_mask; \
555 uint64_t result = dst operator src; \
556 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
558 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
561 #define ALU_HH(thread, ip, operator) \
563 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
564 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
565 uint64_t dst64 = *dst64_ptr; \
566 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
567 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
569 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
570 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
571 uint64_t src64 = *src64_ptr; \
572 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
574 uint64_t result = dst operator src; \
575 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
577 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
589 #define ALU_I(thread, ip, operator) \
591 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
592 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
593 uint64_t dst64 = *dst64_ptr; \
594 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
595 uint64_t dst = dst64 & dst64_mask; \
597 uint64_t src = (ip)->alu.src_val; \
599 uint64_t result = dst operator src; \
601 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
606 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
608 #define ALU_HI(thread, ip, operator) \
610 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
611 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
612 uint64_t dst64 = *dst64_ptr; \
613 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
614 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
616 uint64_t src = (ip)->alu.src_val; \
618 uint64_t result = dst operator src; \
619 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
621 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
630 #define MOV(thread, ip) \
632 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
633 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
634 uint64_t dst64 = *dst64_ptr; \
635 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
637 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
638 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
639 uint64_t src64 = *src64_ptr; \
640 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
641 uint64_t src = src64 & src64_mask; \
643 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
646 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
648 #define MOV_S(thread, ip) \
650 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
651 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
652 uint64_t dst64 = *dst64_ptr; \
653 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
655 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
656 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
657 uint64_t src64 = *src64_ptr; \
658 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
660 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
669 #define MOV_I(thread, ip) \
671 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
672 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
673 uint64_t dst64 = *dst64_ptr; \
674 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
676 uint64_t src = (ip)->mov.src_val; \
678 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
681 #define METADATA_READ(thread, offset, n_bits) \
683 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
684 uint64_t m64 = *m64_ptr; \
685 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
689 #define METADATA_WRITE(thread, offset, n_bits, value) \
691 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
692 uint64_t m64 = *m64_ptr; \
693 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
695 uint64_t m_new = value; \
697 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
700 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
701 #define RTE_SWX_PIPELINE_THREADS_MAX 16
704 struct rte_swx_pipeline {
705 struct struct_type_tailq struct_types;
706 struct port_in_type_tailq port_in_types;
707 struct port_in_tailq ports_in;
708 struct port_out_type_tailq port_out_types;
709 struct port_out_tailq ports_out;
710 struct extern_type_tailq extern_types;
711 struct extern_obj_tailq extern_objs;
712 struct extern_func_tailq extern_funcs;
713 struct header_tailq headers;
714 struct struct_type *metadata_st;
715 uint32_t metadata_struct_id;
716 struct action_tailq actions;
717 struct table_type_tailq table_types;
718 struct table_tailq tables;
720 struct port_in_runtime *in;
721 struct port_out_runtime *out;
722 struct instruction **action_instructions;
723 struct rte_swx_table_state *table_state;
724 struct instruction *instructions;
725 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
729 uint32_t n_ports_out;
730 uint32_t n_extern_objs;
731 uint32_t n_extern_funcs;
737 uint32_t n_instructions;
745 static struct struct_type *
746 struct_type_find(struct rte_swx_pipeline *p, const char *name)
748 struct struct_type *elem;
750 TAILQ_FOREACH(elem, &p->struct_types, node)
751 if (strcmp(elem->name, name) == 0)
757 static struct field *
758 struct_type_field_find(struct struct_type *st, const char *name)
762 for (i = 0; i < st->n_fields; i++) {
763 struct field *f = &st->fields[i];
765 if (strcmp(f->name, name) == 0)
773 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
775 struct rte_swx_field_params *fields,
778 struct struct_type *st;
782 CHECK_NAME(name, EINVAL);
783 CHECK(fields, EINVAL);
784 CHECK(n_fields, EINVAL);
786 for (i = 0; i < n_fields; i++) {
787 struct rte_swx_field_params *f = &fields[i];
790 CHECK_NAME(f->name, EINVAL);
791 CHECK(f->n_bits, EINVAL);
792 CHECK(f->n_bits <= 64, EINVAL);
793 CHECK((f->n_bits & 7) == 0, EINVAL);
795 for (j = 0; j < i; j++) {
796 struct rte_swx_field_params *f_prev = &fields[j];
798 CHECK(strcmp(f->name, f_prev->name), EINVAL);
802 CHECK(!struct_type_find(p, name), EEXIST);
804 /* Node allocation. */
805 st = calloc(1, sizeof(struct struct_type));
808 st->fields = calloc(n_fields, sizeof(struct field));
814 /* Node initialization. */
815 strcpy(st->name, name);
816 for (i = 0; i < n_fields; i++) {
817 struct field *dst = &st->fields[i];
818 struct rte_swx_field_params *src = &fields[i];
820 strcpy(dst->name, src->name);
821 dst->n_bits = src->n_bits;
822 dst->offset = st->n_bits;
824 st->n_bits += src->n_bits;
826 st->n_fields = n_fields;
828 /* Node add to tailq. */
829 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
835 struct_build(struct rte_swx_pipeline *p)
839 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
840 struct thread *t = &p->threads[i];
842 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
843 CHECK(t->structs, ENOMEM);
850 struct_build_free(struct rte_swx_pipeline *p)
854 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
855 struct thread *t = &p->threads[i];
863 struct_free(struct rte_swx_pipeline *p)
865 struct_build_free(p);
869 struct struct_type *elem;
871 elem = TAILQ_FIRST(&p->struct_types);
875 TAILQ_REMOVE(&p->struct_types, elem, node);
884 static struct port_in_type *
885 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
887 struct port_in_type *elem;
892 TAILQ_FOREACH(elem, &p->port_in_types, node)
893 if (strcmp(elem->name, name) == 0)
900 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
902 struct rte_swx_port_in_ops *ops)
904 struct port_in_type *elem;
907 CHECK_NAME(name, EINVAL);
909 CHECK(ops->create, EINVAL);
910 CHECK(ops->free, EINVAL);
911 CHECK(ops->pkt_rx, EINVAL);
912 CHECK(ops->stats_read, EINVAL);
914 CHECK(!port_in_type_find(p, name), EEXIST);
916 /* Node allocation. */
917 elem = calloc(1, sizeof(struct port_in_type));
920 /* Node initialization. */
921 strcpy(elem->name, name);
922 memcpy(&elem->ops, ops, sizeof(*ops));
924 /* Node add to tailq. */
925 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
930 static struct port_in *
931 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
933 struct port_in *port;
935 TAILQ_FOREACH(port, &p->ports_in, node)
936 if (port->id == port_id)
943 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
945 const char *port_type_name,
948 struct port_in_type *type = NULL;
949 struct port_in *port = NULL;
954 CHECK(!port_in_find(p, port_id), EINVAL);
956 CHECK_NAME(port_type_name, EINVAL);
957 type = port_in_type_find(p, port_type_name);
960 obj = type->ops.create(args);
963 /* Node allocation. */
964 port = calloc(1, sizeof(struct port_in));
967 /* Node initialization. */
972 /* Node add to tailq. */
973 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
974 if (p->n_ports_in < port_id + 1)
975 p->n_ports_in = port_id + 1;
981 port_in_build(struct rte_swx_pipeline *p)
983 struct port_in *port;
986 CHECK(p->n_ports_in, EINVAL);
987 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
989 for (i = 0; i < p->n_ports_in; i++)
990 CHECK(port_in_find(p, i), EINVAL);
992 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
993 CHECK(p->in, ENOMEM);
995 TAILQ_FOREACH(port, &p->ports_in, node) {
996 struct port_in_runtime *in = &p->in[port->id];
998 in->pkt_rx = port->type->ops.pkt_rx;
1006 port_in_build_free(struct rte_swx_pipeline *p)
1013 port_in_free(struct rte_swx_pipeline *p)
1015 port_in_build_free(p);
1019 struct port_in *port;
1021 port = TAILQ_FIRST(&p->ports_in);
1025 TAILQ_REMOVE(&p->ports_in, port, node);
1026 port->type->ops.free(port->obj);
1030 /* Input port types. */
1032 struct port_in_type *elem;
1034 elem = TAILQ_FIRST(&p->port_in_types);
1038 TAILQ_REMOVE(&p->port_in_types, elem, node);
1046 static struct port_out_type *
1047 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1049 struct port_out_type *elem;
1054 TAILQ_FOREACH(elem, &p->port_out_types, node)
1055 if (!strcmp(elem->name, name))
1062 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1064 struct rte_swx_port_out_ops *ops)
1066 struct port_out_type *elem;
1069 CHECK_NAME(name, EINVAL);
1071 CHECK(ops->create, EINVAL);
1072 CHECK(ops->free, EINVAL);
1073 CHECK(ops->pkt_tx, EINVAL);
1074 CHECK(ops->stats_read, EINVAL);
1076 CHECK(!port_out_type_find(p, name), EEXIST);
1078 /* Node allocation. */
1079 elem = calloc(1, sizeof(struct port_out_type));
1080 CHECK(elem, ENOMEM);
1082 /* Node initialization. */
1083 strcpy(elem->name, name);
1084 memcpy(&elem->ops, ops, sizeof(*ops));
1086 /* Node add to tailq. */
1087 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1092 static struct port_out *
1093 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1095 struct port_out *port;
1097 TAILQ_FOREACH(port, &p->ports_out, node)
1098 if (port->id == port_id)
1105 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1107 const char *port_type_name,
1110 struct port_out_type *type = NULL;
1111 struct port_out *port = NULL;
1116 CHECK(!port_out_find(p, port_id), EINVAL);
1118 CHECK_NAME(port_type_name, EINVAL);
1119 type = port_out_type_find(p, port_type_name);
1120 CHECK(type, EINVAL);
1122 obj = type->ops.create(args);
1125 /* Node allocation. */
1126 port = calloc(1, sizeof(struct port_out));
1127 CHECK(port, ENOMEM);
1129 /* Node initialization. */
1134 /* Node add to tailq. */
1135 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1136 if (p->n_ports_out < port_id + 1)
1137 p->n_ports_out = port_id + 1;
1143 port_out_build(struct rte_swx_pipeline *p)
1145 struct port_out *port;
1148 CHECK(p->n_ports_out, EINVAL);
1150 for (i = 0; i < p->n_ports_out; i++)
1151 CHECK(port_out_find(p, i), EINVAL);
1153 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1154 CHECK(p->out, ENOMEM);
1156 TAILQ_FOREACH(port, &p->ports_out, node) {
1157 struct port_out_runtime *out = &p->out[port->id];
1159 out->pkt_tx = port->type->ops.pkt_tx;
1160 out->flush = port->type->ops.flush;
1161 out->obj = port->obj;
1168 port_out_build_free(struct rte_swx_pipeline *p)
1175 port_out_free(struct rte_swx_pipeline *p)
1177 port_out_build_free(p);
1181 struct port_out *port;
1183 port = TAILQ_FIRST(&p->ports_out);
1187 TAILQ_REMOVE(&p->ports_out, port, node);
1188 port->type->ops.free(port->obj);
1192 /* Output port types. */
1194 struct port_out_type *elem;
1196 elem = TAILQ_FIRST(&p->port_out_types);
1200 TAILQ_REMOVE(&p->port_out_types, elem, node);
1208 static struct extern_type *
1209 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1211 struct extern_type *elem;
1213 TAILQ_FOREACH(elem, &p->extern_types, node)
1214 if (strcmp(elem->name, name) == 0)
1220 static struct extern_type_member_func *
1221 extern_type_member_func_find(struct extern_type *type, const char *name)
1223 struct extern_type_member_func *elem;
1225 TAILQ_FOREACH(elem, &type->funcs, node)
1226 if (strcmp(elem->name, name) == 0)
1232 static struct extern_obj *
1233 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1235 struct extern_obj *elem;
1237 TAILQ_FOREACH(elem, &p->extern_objs, node)
1238 if (strcmp(elem->name, name) == 0)
1244 static struct field *
1245 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1247 struct extern_obj **object)
1249 struct extern_obj *obj;
1251 char *obj_name, *field_name;
1253 if ((name[0] != 'e') || (name[1] != '.'))
1256 obj_name = strdup(&name[2]);
1260 field_name = strchr(obj_name, '.');
1269 obj = extern_obj_find(p, obj_name);
1275 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1289 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1291 const char *mailbox_struct_type_name,
1292 rte_swx_extern_type_constructor_t constructor,
1293 rte_swx_extern_type_destructor_t destructor)
1295 struct extern_type *elem;
1296 struct struct_type *mailbox_struct_type;
1300 CHECK_NAME(name, EINVAL);
1301 CHECK(!extern_type_find(p, name), EEXIST);
1303 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1304 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1305 CHECK(mailbox_struct_type, EINVAL);
1307 CHECK(constructor, EINVAL);
1308 CHECK(destructor, EINVAL);
1310 /* Node allocation. */
1311 elem = calloc(1, sizeof(struct extern_type));
1312 CHECK(elem, ENOMEM);
1314 /* Node initialization. */
1315 strcpy(elem->name, name);
1316 elem->mailbox_struct_type = mailbox_struct_type;
1317 elem->constructor = constructor;
1318 elem->destructor = destructor;
1319 TAILQ_INIT(&elem->funcs);
1321 /* Node add to tailq. */
1322 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1328 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1329 const char *extern_type_name,
1331 rte_swx_extern_type_member_func_t member_func)
1333 struct extern_type *type;
1334 struct extern_type_member_func *type_member;
1338 CHECK(extern_type_name, EINVAL);
1339 type = extern_type_find(p, extern_type_name);
1340 CHECK(type, EINVAL);
1341 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1343 CHECK(name, EINVAL);
1344 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1346 CHECK(member_func, EINVAL);
1348 /* Node allocation. */
1349 type_member = calloc(1, sizeof(struct extern_type_member_func));
1350 CHECK(type_member, ENOMEM);
1352 /* Node initialization. */
1353 strcpy(type_member->name, name);
1354 type_member->func = member_func;
1355 type_member->id = type->n_funcs;
1357 /* Node add to tailq. */
1358 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1365 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1366 const char *extern_type_name,
1370 struct extern_type *type;
1371 struct extern_obj *obj;
1376 CHECK_NAME(extern_type_name, EINVAL);
1377 type = extern_type_find(p, extern_type_name);
1378 CHECK(type, EINVAL);
1380 CHECK_NAME(name, EINVAL);
1381 CHECK(!extern_obj_find(p, name), EEXIST);
1383 /* Node allocation. */
1384 obj = calloc(1, sizeof(struct extern_obj));
1387 /* Object construction. */
1388 obj_handle = type->constructor(args);
1394 /* Node initialization. */
1395 strcpy(obj->name, name);
1397 obj->obj = obj_handle;
1398 obj->struct_id = p->n_structs;
1399 obj->id = p->n_extern_objs;
1401 /* Node add to tailq. */
1402 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1410 extern_obj_build(struct rte_swx_pipeline *p)
1414 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1415 struct thread *t = &p->threads[i];
1416 struct extern_obj *obj;
1418 t->extern_objs = calloc(p->n_extern_objs,
1419 sizeof(struct extern_obj_runtime));
1420 CHECK(t->extern_objs, ENOMEM);
1422 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1423 struct extern_obj_runtime *r =
1424 &t->extern_objs[obj->id];
1425 struct extern_type_member_func *func;
1426 uint32_t mailbox_size =
1427 obj->type->mailbox_struct_type->n_bits / 8;
1431 r->mailbox = calloc(1, mailbox_size);
1432 CHECK(r->mailbox, ENOMEM);
1434 TAILQ_FOREACH(func, &obj->type->funcs, node)
1435 r->funcs[func->id] = func->func;
1437 t->structs[obj->struct_id] = r->mailbox;
1445 extern_obj_build_free(struct rte_swx_pipeline *p)
1449 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1450 struct thread *t = &p->threads[i];
1453 if (!t->extern_objs)
1456 for (j = 0; j < p->n_extern_objs; j++) {
1457 struct extern_obj_runtime *r = &t->extern_objs[j];
1462 free(t->extern_objs);
1463 t->extern_objs = NULL;
1468 extern_obj_free(struct rte_swx_pipeline *p)
1470 extern_obj_build_free(p);
1472 /* Extern objects. */
1474 struct extern_obj *elem;
1476 elem = TAILQ_FIRST(&p->extern_objs);
1480 TAILQ_REMOVE(&p->extern_objs, elem, node);
1482 elem->type->destructor(elem->obj);
1488 struct extern_type *elem;
1490 elem = TAILQ_FIRST(&p->extern_types);
1494 TAILQ_REMOVE(&p->extern_types, elem, node);
1497 struct extern_type_member_func *func;
1499 func = TAILQ_FIRST(&elem->funcs);
1503 TAILQ_REMOVE(&elem->funcs, func, node);
1514 static struct extern_func *
1515 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1517 struct extern_func *elem;
1519 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1520 if (strcmp(elem->name, name) == 0)
1526 static struct field *
1527 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1529 struct extern_func **function)
1531 struct extern_func *func;
1533 char *func_name, *field_name;
1535 if ((name[0] != 'f') || (name[1] != '.'))
1538 func_name = strdup(&name[2]);
1542 field_name = strchr(func_name, '.');
1551 func = extern_func_find(p, func_name);
1557 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1571 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1573 const char *mailbox_struct_type_name,
1574 rte_swx_extern_func_t func)
1576 struct extern_func *f;
1577 struct struct_type *mailbox_struct_type;
1581 CHECK_NAME(name, EINVAL);
1582 CHECK(!extern_func_find(p, name), EEXIST);
1584 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1585 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1586 CHECK(mailbox_struct_type, EINVAL);
1588 CHECK(func, EINVAL);
1590 /* Node allocation. */
1591 f = calloc(1, sizeof(struct extern_func));
1592 CHECK(func, ENOMEM);
1594 /* Node initialization. */
1595 strcpy(f->name, name);
1596 f->mailbox_struct_type = mailbox_struct_type;
1598 f->struct_id = p->n_structs;
1599 f->id = p->n_extern_funcs;
1601 /* Node add to tailq. */
1602 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1603 p->n_extern_funcs++;
1610 extern_func_build(struct rte_swx_pipeline *p)
1614 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1615 struct thread *t = &p->threads[i];
1616 struct extern_func *func;
1618 /* Memory allocation. */
1619 t->extern_funcs = calloc(p->n_extern_funcs,
1620 sizeof(struct extern_func_runtime));
1621 CHECK(t->extern_funcs, ENOMEM);
1623 /* Extern function. */
1624 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1625 struct extern_func_runtime *r =
1626 &t->extern_funcs[func->id];
1627 uint32_t mailbox_size =
1628 func->mailbox_struct_type->n_bits / 8;
1630 r->func = func->func;
1632 r->mailbox = calloc(1, mailbox_size);
1633 CHECK(r->mailbox, ENOMEM);
1635 t->structs[func->struct_id] = r->mailbox;
1643 extern_func_build_free(struct rte_swx_pipeline *p)
1647 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1648 struct thread *t = &p->threads[i];
1651 if (!t->extern_funcs)
1654 for (j = 0; j < p->n_extern_funcs; j++) {
1655 struct extern_func_runtime *r = &t->extern_funcs[j];
1660 free(t->extern_funcs);
1661 t->extern_funcs = NULL;
1666 extern_func_free(struct rte_swx_pipeline *p)
1668 extern_func_build_free(p);
1671 struct extern_func *elem;
1673 elem = TAILQ_FIRST(&p->extern_funcs);
1677 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1685 static struct header *
1686 header_find(struct rte_swx_pipeline *p, const char *name)
1688 struct header *elem;
1690 TAILQ_FOREACH(elem, &p->headers, node)
1691 if (strcmp(elem->name, name) == 0)
1697 static struct header *
1698 header_parse(struct rte_swx_pipeline *p,
1701 if (name[0] != 'h' || name[1] != '.')
1704 return header_find(p, &name[2]);
1707 static struct field *
1708 header_field_parse(struct rte_swx_pipeline *p,
1710 struct header **header)
1714 char *header_name, *field_name;
1716 if ((name[0] != 'h') || (name[1] != '.'))
1719 header_name = strdup(&name[2]);
1723 field_name = strchr(header_name, '.');
1732 h = header_find(p, header_name);
1738 f = struct_type_field_find(h->st, field_name);
1752 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1754 const char *struct_type_name)
1756 struct struct_type *st;
1758 size_t n_headers_max;
1761 CHECK_NAME(name, EINVAL);
1762 CHECK_NAME(struct_type_name, EINVAL);
1764 CHECK(!header_find(p, name), EEXIST);
1766 st = struct_type_find(p, struct_type_name);
1769 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1770 CHECK(p->n_headers < n_headers_max, ENOSPC);
1772 /* Node allocation. */
1773 h = calloc(1, sizeof(struct header));
1776 /* Node initialization. */
1777 strcpy(h->name, name);
1779 h->struct_id = p->n_structs;
1780 h->id = p->n_headers;
1782 /* Node add to tailq. */
1783 TAILQ_INSERT_TAIL(&p->headers, h, node);
1791 header_build(struct rte_swx_pipeline *p)
1794 uint32_t n_bytes = 0, i;
1796 TAILQ_FOREACH(h, &p->headers, node) {
1797 n_bytes += h->st->n_bits / 8;
1800 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1801 struct thread *t = &p->threads[i];
1802 uint32_t offset = 0;
1804 t->headers = calloc(p->n_headers,
1805 sizeof(struct header_runtime));
1806 CHECK(t->headers, ENOMEM);
1808 t->headers_out = calloc(p->n_headers,
1809 sizeof(struct header_out_runtime));
1810 CHECK(t->headers_out, ENOMEM);
1812 t->header_storage = calloc(1, n_bytes);
1813 CHECK(t->header_storage, ENOMEM);
1815 t->header_out_storage = calloc(1, n_bytes);
1816 CHECK(t->header_out_storage, ENOMEM);
1818 TAILQ_FOREACH(h, &p->headers, node) {
1819 uint8_t *header_storage;
1821 header_storage = &t->header_storage[offset];
1822 offset += h->st->n_bits / 8;
1824 t->headers[h->id].ptr0 = header_storage;
1825 t->structs[h->struct_id] = header_storage;
1833 header_build_free(struct rte_swx_pipeline *p)
1837 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1838 struct thread *t = &p->threads[i];
1840 free(t->headers_out);
1841 t->headers_out = NULL;
1846 free(t->header_out_storage);
1847 t->header_out_storage = NULL;
1849 free(t->header_storage);
1850 t->header_storage = NULL;
1855 header_free(struct rte_swx_pipeline *p)
1857 header_build_free(p);
1860 struct header *elem;
1862 elem = TAILQ_FIRST(&p->headers);
1866 TAILQ_REMOVE(&p->headers, elem, node);
1874 static struct field *
1875 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1877 if (!p->metadata_st)
1880 if (name[0] != 'm' || name[1] != '.')
1883 return struct_type_field_find(p->metadata_st, &name[2]);
1887 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1888 const char *struct_type_name)
1890 struct struct_type *st = NULL;
1894 CHECK_NAME(struct_type_name, EINVAL);
1895 st = struct_type_find(p, struct_type_name);
1897 CHECK(!p->metadata_st, EINVAL);
1899 p->metadata_st = st;
1900 p->metadata_struct_id = p->n_structs;
1908 metadata_build(struct rte_swx_pipeline *p)
1910 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1913 /* Thread-level initialization. */
1914 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1915 struct thread *t = &p->threads[i];
1918 metadata = calloc(1, n_bytes);
1919 CHECK(metadata, ENOMEM);
1921 t->metadata = metadata;
1922 t->structs[p->metadata_struct_id] = metadata;
1929 metadata_build_free(struct rte_swx_pipeline *p)
1933 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1934 struct thread *t = &p->threads[i];
1942 metadata_free(struct rte_swx_pipeline *p)
1944 metadata_build_free(p);
1950 static struct field *
1951 action_field_parse(struct action *action, const char *name);
1953 static struct field *
1954 struct_field_parse(struct rte_swx_pipeline *p,
1955 struct action *action,
1957 uint32_t *struct_id)
1964 struct header *header;
1966 f = header_field_parse(p, name, &header);
1970 *struct_id = header->struct_id;
1976 f = metadata_field_parse(p, name);
1980 *struct_id = p->metadata_struct_id;
1989 f = action_field_parse(action, name);
1999 struct extern_obj *obj;
2001 f = extern_obj_mailbox_field_parse(p, name, &obj);
2005 *struct_id = obj->struct_id;
2011 struct extern_func *func;
2013 f = extern_func_mailbox_field_parse(p, name, &func);
2017 *struct_id = func->struct_id;
2027 pipeline_port_inc(struct rte_swx_pipeline *p)
2029 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2033 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2035 t->ip = p->instructions;
2039 thread_ip_inc(struct rte_swx_pipeline *p);
2042 thread_ip_inc(struct rte_swx_pipeline *p)
2044 struct thread *t = &p->threads[p->thread_id];
2050 thread_ip_inc_cond(struct thread *t, int cond)
2056 thread_yield(struct rte_swx_pipeline *p)
2058 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2065 instr_rx_translate(struct rte_swx_pipeline *p,
2066 struct action *action,
2069 struct instruction *instr,
2070 struct instruction_data *data __rte_unused)
2074 CHECK(!action, EINVAL);
2075 CHECK(n_tokens == 2, EINVAL);
2077 f = metadata_field_parse(p, tokens[1]);
2080 instr->type = INSTR_RX;
2081 instr->io.io.offset = f->offset / 8;
2082 instr->io.io.n_bits = f->n_bits;
2087 instr_rx_exec(struct rte_swx_pipeline *p);
2090 instr_rx_exec(struct rte_swx_pipeline *p)
2092 struct thread *t = &p->threads[p->thread_id];
2093 struct instruction *ip = t->ip;
2094 struct port_in_runtime *port = &p->in[p->port_id];
2095 struct rte_swx_pkt *pkt = &t->pkt;
2099 pkt_received = port->pkt_rx(port->obj, pkt);
2100 t->ptr = &pkt->pkt[pkt->offset];
2101 rte_prefetch0(t->ptr);
2103 TRACE("[Thread %2u] rx %s from port %u\n",
2105 pkt_received ? "1 pkt" : "0 pkts",
2109 t->valid_headers = 0;
2110 t->n_headers_out = 0;
2113 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2116 t->table_state = p->table_state;
2119 pipeline_port_inc(p);
2120 thread_ip_inc_cond(t, pkt_received);
2128 instr_tx_translate(struct rte_swx_pipeline *p,
2129 struct action *action __rte_unused,
2132 struct instruction *instr,
2133 struct instruction_data *data __rte_unused)
2137 CHECK(n_tokens == 2, EINVAL);
2139 f = metadata_field_parse(p, tokens[1]);
2142 instr->type = INSTR_TX;
2143 instr->io.io.offset = f->offset / 8;
2144 instr->io.io.n_bits = f->n_bits;
2149 emit_handler(struct thread *t)
2151 struct header_out_runtime *h0 = &t->headers_out[0];
2152 struct header_out_runtime *h1 = &t->headers_out[1];
2153 uint32_t offset = 0, i;
2155 /* No header change or header decapsulation. */
2156 if ((t->n_headers_out == 1) &&
2157 (h0->ptr + h0->n_bytes == t->ptr)) {
2158 TRACE("Emit handler: no header change or header decap.\n");
2160 t->pkt.offset -= h0->n_bytes;
2161 t->pkt.length += h0->n_bytes;
2166 /* Header encapsulation (optionally, with prior header decasulation). */
2167 if ((t->n_headers_out == 2) &&
2168 (h1->ptr + h1->n_bytes == t->ptr) &&
2169 (h0->ptr == h0->ptr0)) {
2172 TRACE("Emit handler: header encapsulation.\n");
2174 offset = h0->n_bytes + h1->n_bytes;
2175 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2176 t->pkt.offset -= offset;
2177 t->pkt.length += offset;
2182 /* Header insertion. */
2185 /* Header extraction. */
2188 /* For any other case. */
2189 TRACE("Emit handler: complex case.\n");
2191 for (i = 0; i < t->n_headers_out; i++) {
2192 struct header_out_runtime *h = &t->headers_out[i];
2194 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2195 offset += h->n_bytes;
2199 memcpy(t->ptr - offset, t->header_out_storage, offset);
2200 t->pkt.offset -= offset;
2201 t->pkt.length += offset;
2206 instr_tx_exec(struct rte_swx_pipeline *p);
2209 instr_tx_exec(struct rte_swx_pipeline *p)
2211 struct thread *t = &p->threads[p->thread_id];
2212 struct instruction *ip = t->ip;
2213 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2214 struct port_out_runtime *port = &p->out[port_id];
2215 struct rte_swx_pkt *pkt = &t->pkt;
2217 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2225 port->pkt_tx(port->obj, pkt);
2228 thread_ip_reset(p, t);
2236 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2237 struct action *action,
2240 struct instruction *instr,
2241 struct instruction_data *data __rte_unused)
2245 CHECK(!action, EINVAL);
2246 CHECK(n_tokens == 2, EINVAL);
2248 h = header_parse(p, tokens[1]);
2251 instr->type = INSTR_HDR_EXTRACT;
2252 instr->io.hdr.header_id[0] = h->id;
2253 instr->io.hdr.struct_id[0] = h->struct_id;
2254 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2259 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2262 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2264 struct thread *t = &p->threads[p->thread_id];
2265 struct instruction *ip = t->ip;
2266 uint64_t valid_headers = t->valid_headers;
2267 uint8_t *ptr = t->ptr;
2268 uint32_t offset = t->pkt.offset;
2269 uint32_t length = t->pkt.length;
2272 for (i = 0; i < n_extract; i++) {
2273 uint32_t header_id = ip->io.hdr.header_id[i];
2274 uint32_t struct_id = ip->io.hdr.struct_id[i];
2275 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2277 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2283 t->structs[struct_id] = ptr;
2284 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2293 t->valid_headers = valid_headers;
2296 t->pkt.offset = offset;
2297 t->pkt.length = length;
2302 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2304 __instr_hdr_extract_exec(p, 1);
2311 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2313 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2316 __instr_hdr_extract_exec(p, 2);
2323 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2325 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2328 __instr_hdr_extract_exec(p, 3);
2335 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2337 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2340 __instr_hdr_extract_exec(p, 4);
2347 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2349 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2352 __instr_hdr_extract_exec(p, 5);
2359 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2361 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2364 __instr_hdr_extract_exec(p, 6);
2371 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2373 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2376 __instr_hdr_extract_exec(p, 7);
2383 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2385 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2388 __instr_hdr_extract_exec(p, 8);
2398 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2399 struct action *action __rte_unused,
2402 struct instruction *instr,
2403 struct instruction_data *data __rte_unused)
2407 CHECK(n_tokens == 2, EINVAL);
2409 h = header_parse(p, tokens[1]);
2412 instr->type = INSTR_HDR_EMIT;
2413 instr->io.hdr.header_id[0] = h->id;
2414 instr->io.hdr.struct_id[0] = h->struct_id;
2415 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2420 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2423 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2425 struct thread *t = &p->threads[p->thread_id];
2426 struct instruction *ip = t->ip;
2427 uint32_t n_headers_out = t->n_headers_out;
2428 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2429 uint8_t *ho_ptr = NULL;
2430 uint32_t ho_nbytes = 0, i;
2432 for (i = 0; i < n_emit; i++) {
2433 uint32_t header_id = ip->io.hdr.header_id[i];
2434 uint32_t struct_id = ip->io.hdr.struct_id[i];
2435 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2437 struct header_runtime *hi = &t->headers[header_id];
2438 uint8_t *hi_ptr = t->structs[struct_id];
2440 TRACE("[Thread %2u]: emit header %u\n",
2446 if (!t->n_headers_out) {
2447 ho = &t->headers_out[0];
2449 ho->ptr0 = hi->ptr0;
2453 ho_nbytes = n_bytes;
2460 ho_nbytes = ho->n_bytes;
2464 if (ho_ptr + ho_nbytes == hi_ptr) {
2465 ho_nbytes += n_bytes;
2467 ho->n_bytes = ho_nbytes;
2470 ho->ptr0 = hi->ptr0;
2474 ho_nbytes = n_bytes;
2480 ho->n_bytes = ho_nbytes;
2481 t->n_headers_out = n_headers_out;
2485 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2487 __instr_hdr_emit_exec(p, 1);
2494 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2496 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2499 __instr_hdr_emit_exec(p, 1);
2504 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2506 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2509 __instr_hdr_emit_exec(p, 2);
2514 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2516 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2519 __instr_hdr_emit_exec(p, 3);
2524 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2526 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2529 __instr_hdr_emit_exec(p, 4);
2534 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2536 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2539 __instr_hdr_emit_exec(p, 5);
2544 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2546 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2549 __instr_hdr_emit_exec(p, 6);
2554 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2556 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2559 __instr_hdr_emit_exec(p, 7);
2564 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2566 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2569 __instr_hdr_emit_exec(p, 8);
2577 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2578 struct action *action __rte_unused,
2581 struct instruction *instr,
2582 struct instruction_data *data __rte_unused)
2586 CHECK(n_tokens == 2, EINVAL);
2588 h = header_parse(p, tokens[1]);
2591 instr->type = INSTR_HDR_VALIDATE;
2592 instr->valid.header_id = h->id;
2597 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2599 struct thread *t = &p->threads[p->thread_id];
2600 struct instruction *ip = t->ip;
2601 uint32_t header_id = ip->valid.header_id;
2603 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2606 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2616 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2617 struct action *action __rte_unused,
2620 struct instruction *instr,
2621 struct instruction_data *data __rte_unused)
2625 CHECK(n_tokens == 2, EINVAL);
2627 h = header_parse(p, tokens[1]);
2630 instr->type = INSTR_HDR_INVALIDATE;
2631 instr->valid.header_id = h->id;
2636 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2638 struct thread *t = &p->threads[p->thread_id];
2639 struct instruction *ip = t->ip;
2640 uint32_t header_id = ip->valid.header_id;
2642 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2645 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2655 instr_mov_translate(struct rte_swx_pipeline *p,
2656 struct action *action,
2659 struct instruction *instr,
2660 struct instruction_data *data __rte_unused)
2662 char *dst = tokens[1], *src = tokens[2];
2663 struct field *fdst, *fsrc;
2664 uint32_t dst_struct_id, src_struct_id, src_val;
2666 CHECK(n_tokens == 3, EINVAL);
2668 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2669 CHECK(fdst, EINVAL);
2672 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2674 instr->type = INSTR_MOV;
2675 if ((dst[0] == 'h' && src[0] != 'h') ||
2676 (dst[0] != 'h' && src[0] == 'h'))
2677 instr->type = INSTR_MOV_S;
2679 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2680 instr->mov.dst.n_bits = fdst->n_bits;
2681 instr->mov.dst.offset = fdst->offset / 8;
2682 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2683 instr->mov.src.n_bits = fsrc->n_bits;
2684 instr->mov.src.offset = fsrc->offset / 8;
2689 src_val = strtoul(src, &src, 0);
2690 CHECK(!src[0], EINVAL);
2693 src_val = htonl(src_val);
2695 instr->type = INSTR_MOV_I;
2696 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2697 instr->mov.dst.n_bits = fdst->n_bits;
2698 instr->mov.dst.offset = fdst->offset / 8;
2699 instr->mov.src_val = (uint32_t)src_val;
2704 instr_mov_exec(struct rte_swx_pipeline *p)
2706 struct thread *t = &p->threads[p->thread_id];
2707 struct instruction *ip = t->ip;
2709 TRACE("[Thread %2u] mov\n",
2719 instr_mov_s_exec(struct rte_swx_pipeline *p)
2721 struct thread *t = &p->threads[p->thread_id];
2722 struct instruction *ip = t->ip;
2724 TRACE("[Thread %2u] mov (s)\n",
2734 instr_mov_i_exec(struct rte_swx_pipeline *p)
2736 struct thread *t = &p->threads[p->thread_id];
2737 struct instruction *ip = t->ip;
2739 TRACE("[Thread %2u] mov m.f %x\n",
2753 instr_dma_translate(struct rte_swx_pipeline *p,
2754 struct action *action,
2757 struct instruction *instr,
2758 struct instruction_data *data __rte_unused)
2760 char *dst = tokens[1];
2761 char *src = tokens[2];
2765 CHECK(action, EINVAL);
2766 CHECK(n_tokens == 3, EINVAL);
2768 h = header_parse(p, dst);
2771 tf = action_field_parse(action, src);
2774 instr->type = INSTR_DMA_HT;
2775 instr->dma.dst.header_id[0] = h->id;
2776 instr->dma.dst.struct_id[0] = h->struct_id;
2777 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2778 instr->dma.src.offset[0] = tf->offset / 8;
2784 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2787 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2789 struct thread *t = &p->threads[p->thread_id];
2790 struct instruction *ip = t->ip;
2791 uint8_t *action_data = t->structs[0];
2792 uint64_t valid_headers = t->valid_headers;
2795 for (i = 0; i < n_dma; i++) {
2796 uint32_t header_id = ip->dma.dst.header_id[i];
2797 uint32_t struct_id = ip->dma.dst.struct_id[i];
2798 uint32_t offset = ip->dma.src.offset[i];
2799 uint32_t n_bytes = ip->dma.n_bytes[i];
2801 struct header_runtime *h = &t->headers[header_id];
2802 uint8_t *h_ptr0 = h->ptr0;
2803 uint8_t *h_ptr = t->structs[struct_id];
2805 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2807 void *src = &action_data[offset];
2809 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2812 memcpy(dst, src, n_bytes);
2813 t->structs[struct_id] = dst;
2814 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2817 t->valid_headers = valid_headers;
2821 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2823 __instr_dma_ht_exec(p, 1);
2830 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2832 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2835 __instr_dma_ht_exec(p, 2);
2842 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2844 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2847 __instr_dma_ht_exec(p, 3);
2854 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2856 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2859 __instr_dma_ht_exec(p, 4);
2866 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2868 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2871 __instr_dma_ht_exec(p, 5);
2878 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2880 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2883 __instr_dma_ht_exec(p, 6);
2890 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2892 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2895 __instr_dma_ht_exec(p, 7);
2902 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2904 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2907 __instr_dma_ht_exec(p, 8);
2917 instr_alu_add_translate(struct rte_swx_pipeline *p,
2918 struct action *action,
2921 struct instruction *instr,
2922 struct instruction_data *data __rte_unused)
2924 char *dst = tokens[1], *src = tokens[2];
2925 struct field *fdst, *fsrc;
2926 uint32_t dst_struct_id, src_struct_id, src_val;
2928 CHECK(n_tokens == 3, EINVAL);
2930 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2931 CHECK(fdst, EINVAL);
2933 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2934 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2936 instr->type = INSTR_ALU_ADD;
2937 if (dst[0] == 'h' && src[0] == 'm')
2938 instr->type = INSTR_ALU_ADD_HM;
2939 if (dst[0] == 'm' && src[0] == 'h')
2940 instr->type = INSTR_ALU_ADD_MH;
2941 if (dst[0] == 'h' && src[0] == 'h')
2942 instr->type = INSTR_ALU_ADD_HH;
2944 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2945 instr->alu.dst.n_bits = fdst->n_bits;
2946 instr->alu.dst.offset = fdst->offset / 8;
2947 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2948 instr->alu.src.n_bits = fsrc->n_bits;
2949 instr->alu.src.offset = fsrc->offset / 8;
2953 /* ADD_MI, ADD_HI. */
2954 src_val = strtoul(src, &src, 0);
2955 CHECK(!src[0], EINVAL);
2957 instr->type = INSTR_ALU_ADD_MI;
2959 instr->type = INSTR_ALU_ADD_HI;
2961 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2962 instr->alu.dst.n_bits = fdst->n_bits;
2963 instr->alu.dst.offset = fdst->offset / 8;
2964 instr->alu.src_val = (uint32_t)src_val;
2969 instr_alu_sub_translate(struct rte_swx_pipeline *p,
2970 struct action *action,
2973 struct instruction *instr,
2974 struct instruction_data *data __rte_unused)
2976 char *dst = tokens[1], *src = tokens[2];
2977 struct field *fdst, *fsrc;
2978 uint32_t dst_struct_id, src_struct_id, src_val;
2980 CHECK(n_tokens == 3, EINVAL);
2982 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2983 CHECK(fdst, EINVAL);
2985 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
2986 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2988 instr->type = INSTR_ALU_SUB;
2989 if (dst[0] == 'h' && src[0] == 'm')
2990 instr->type = INSTR_ALU_SUB_HM;
2991 if (dst[0] == 'm' && src[0] == 'h')
2992 instr->type = INSTR_ALU_SUB_MH;
2993 if (dst[0] == 'h' && src[0] == 'h')
2994 instr->type = INSTR_ALU_SUB_HH;
2996 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2997 instr->alu.dst.n_bits = fdst->n_bits;
2998 instr->alu.dst.offset = fdst->offset / 8;
2999 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3000 instr->alu.src.n_bits = fsrc->n_bits;
3001 instr->alu.src.offset = fsrc->offset / 8;
3005 /* SUB_MI, SUB_HI. */
3006 src_val = strtoul(src, &src, 0);
3007 CHECK(!src[0], EINVAL);
3009 instr->type = INSTR_ALU_SUB_MI;
3011 instr->type = INSTR_ALU_SUB_HI;
3013 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3014 instr->alu.dst.n_bits = fdst->n_bits;
3015 instr->alu.dst.offset = fdst->offset / 8;
3016 instr->alu.src_val = (uint32_t)src_val;
3021 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3022 struct action *action __rte_unused,
3025 struct instruction *instr,
3026 struct instruction_data *data __rte_unused)
3028 char *dst = tokens[1], *src = tokens[2];
3029 struct header *hdst, *hsrc;
3030 struct field *fdst, *fsrc;
3032 CHECK(n_tokens == 3, EINVAL);
3034 fdst = header_field_parse(p, dst, &hdst);
3035 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3038 fsrc = header_field_parse(p, src, &hsrc);
3040 instr->type = INSTR_ALU_CKADD_FIELD;
3041 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3042 instr->alu.dst.n_bits = fdst->n_bits;
3043 instr->alu.dst.offset = fdst->offset / 8;
3044 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3045 instr->alu.src.n_bits = fsrc->n_bits;
3046 instr->alu.src.offset = fsrc->offset / 8;
3050 /* CKADD_STRUCT, CKADD_STRUCT20. */
3051 hsrc = header_parse(p, src);
3052 CHECK(hsrc, EINVAL);
3054 instr->type = INSTR_ALU_CKADD_STRUCT;
3055 if ((hsrc->st->n_bits / 8) == 20)
3056 instr->type = INSTR_ALU_CKADD_STRUCT20;
3058 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3059 instr->alu.dst.n_bits = fdst->n_bits;
3060 instr->alu.dst.offset = fdst->offset / 8;
3061 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3062 instr->alu.src.n_bits = hsrc->st->n_bits;
3063 instr->alu.src.offset = 0; /* Unused. */
3068 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3069 struct action *action __rte_unused,
3072 struct instruction *instr,
3073 struct instruction_data *data __rte_unused)
3075 char *dst = tokens[1], *src = tokens[2];
3076 struct header *hdst, *hsrc;
3077 struct field *fdst, *fsrc;
3079 CHECK(n_tokens == 3, EINVAL);
3081 fdst = header_field_parse(p, dst, &hdst);
3082 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3084 fsrc = header_field_parse(p, src, &hsrc);
3085 CHECK(fsrc, EINVAL);
3087 instr->type = INSTR_ALU_CKSUB_FIELD;
3088 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3089 instr->alu.dst.n_bits = fdst->n_bits;
3090 instr->alu.dst.offset = fdst->offset / 8;
3091 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3092 instr->alu.src.n_bits = fsrc->n_bits;
3093 instr->alu.src.offset = fsrc->offset / 8;
3098 instr_alu_and_translate(struct rte_swx_pipeline *p,
3099 struct action *action,
3102 struct instruction *instr,
3103 struct instruction_data *data __rte_unused)
3105 char *dst = tokens[1], *src = tokens[2];
3106 struct field *fdst, *fsrc;
3107 uint32_t dst_struct_id, src_struct_id, src_val;
3109 CHECK(n_tokens == 3, EINVAL);
3111 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3112 CHECK(fdst, EINVAL);
3115 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3117 instr->type = INSTR_ALU_AND;
3118 if ((dst[0] == 'h' && src[0] != 'h') ||
3119 (dst[0] != 'h' && src[0] == 'h'))
3120 instr->type = INSTR_ALU_AND_S;
3122 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3123 instr->alu.dst.n_bits = fdst->n_bits;
3124 instr->alu.dst.offset = fdst->offset / 8;
3125 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3126 instr->alu.src.n_bits = fsrc->n_bits;
3127 instr->alu.src.offset = fsrc->offset / 8;
3132 src_val = strtoul(src, &src, 0);
3133 CHECK(!src[0], EINVAL);
3136 src_val = htonl(src_val);
3138 instr->type = INSTR_ALU_AND_I;
3139 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3140 instr->alu.dst.n_bits = fdst->n_bits;
3141 instr->alu.dst.offset = fdst->offset / 8;
3142 instr->alu.src_val = (uint32_t)src_val;
3147 instr_alu_or_translate(struct rte_swx_pipeline *p,
3148 struct action *action,
3151 struct instruction *instr,
3152 struct instruction_data *data __rte_unused)
3154 char *dst = tokens[1], *src = tokens[2];
3155 struct field *fdst, *fsrc;
3156 uint32_t dst_struct_id, src_struct_id, src_val;
3158 CHECK(n_tokens == 3, EINVAL);
3160 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3161 CHECK(fdst, EINVAL);
3164 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3166 instr->type = INSTR_ALU_OR;
3167 if ((dst[0] == 'h' && src[0] != 'h') ||
3168 (dst[0] != 'h' && src[0] == 'h'))
3169 instr->type = INSTR_ALU_OR_S;
3171 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3172 instr->alu.dst.n_bits = fdst->n_bits;
3173 instr->alu.dst.offset = fdst->offset / 8;
3174 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3175 instr->alu.src.n_bits = fsrc->n_bits;
3176 instr->alu.src.offset = fsrc->offset / 8;
3181 src_val = strtoul(src, &src, 0);
3182 CHECK(!src[0], EINVAL);
3185 src_val = htonl(src_val);
3187 instr->type = INSTR_ALU_OR_I;
3188 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3189 instr->alu.dst.n_bits = fdst->n_bits;
3190 instr->alu.dst.offset = fdst->offset / 8;
3191 instr->alu.src_val = (uint32_t)src_val;
3196 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3197 struct action *action,
3200 struct instruction *instr,
3201 struct instruction_data *data __rte_unused)
3203 char *dst = tokens[1], *src = tokens[2];
3204 struct field *fdst, *fsrc;
3205 uint32_t dst_struct_id, src_struct_id, src_val;
3207 CHECK(n_tokens == 3, EINVAL);
3209 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3210 CHECK(fdst, EINVAL);
3213 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3215 instr->type = INSTR_ALU_XOR;
3216 if ((dst[0] == 'h' && src[0] != 'h') ||
3217 (dst[0] != 'h' && src[0] == 'h'))
3218 instr->type = INSTR_ALU_XOR_S;
3220 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3221 instr->alu.dst.n_bits = fdst->n_bits;
3222 instr->alu.dst.offset = fdst->offset / 8;
3223 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3224 instr->alu.src.n_bits = fsrc->n_bits;
3225 instr->alu.src.offset = fsrc->offset / 8;
3230 src_val = strtoul(src, &src, 0);
3231 CHECK(!src[0], EINVAL);
3234 src_val = htonl(src_val);
3236 instr->type = INSTR_ALU_XOR_I;
3237 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3238 instr->alu.dst.n_bits = fdst->n_bits;
3239 instr->alu.dst.offset = fdst->offset / 8;
3240 instr->alu.src_val = (uint32_t)src_val;
3245 instr_alu_add_exec(struct rte_swx_pipeline *p)
3247 struct thread *t = &p->threads[p->thread_id];
3248 struct instruction *ip = t->ip;
3250 TRACE("[Thread %2u] add\n", p->thread_id);
3260 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3262 struct thread *t = &p->threads[p->thread_id];
3263 struct instruction *ip = t->ip;
3265 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3275 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3277 struct thread *t = &p->threads[p->thread_id];
3278 struct instruction *ip = t->ip;
3280 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3290 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3292 struct thread *t = &p->threads[p->thread_id];
3293 struct instruction *ip = t->ip;
3295 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3305 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3307 struct thread *t = &p->threads[p->thread_id];
3308 struct instruction *ip = t->ip;
3310 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3320 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3322 struct thread *t = &p->threads[p->thread_id];
3323 struct instruction *ip = t->ip;
3325 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3335 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3337 struct thread *t = &p->threads[p->thread_id];
3338 struct instruction *ip = t->ip;
3340 TRACE("[Thread %2u] sub\n", p->thread_id);
3350 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3352 struct thread *t = &p->threads[p->thread_id];
3353 struct instruction *ip = t->ip;
3355 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3365 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3367 struct thread *t = &p->threads[p->thread_id];
3368 struct instruction *ip = t->ip;
3370 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3380 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3382 struct thread *t = &p->threads[p->thread_id];
3383 struct instruction *ip = t->ip;
3385 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3395 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3397 struct thread *t = &p->threads[p->thread_id];
3398 struct instruction *ip = t->ip;
3400 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3410 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3412 struct thread *t = &p->threads[p->thread_id];
3413 struct instruction *ip = t->ip;
3415 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3425 instr_alu_and_exec(struct rte_swx_pipeline *p)
3427 struct thread *t = &p->threads[p->thread_id];
3428 struct instruction *ip = t->ip;
3430 TRACE("[Thread %2u] and\n", p->thread_id);
3440 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
3442 struct thread *t = &p->threads[p->thread_id];
3443 struct instruction *ip = t->ip;
3445 TRACE("[Thread %2u] and (s)\n", p->thread_id);
3455 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
3457 struct thread *t = &p->threads[p->thread_id];
3458 struct instruction *ip = t->ip;
3460 TRACE("[Thread %2u] and (i)\n", p->thread_id);
3470 instr_alu_or_exec(struct rte_swx_pipeline *p)
3472 struct thread *t = &p->threads[p->thread_id];
3473 struct instruction *ip = t->ip;
3475 TRACE("[Thread %2u] or\n", p->thread_id);
3485 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
3487 struct thread *t = &p->threads[p->thread_id];
3488 struct instruction *ip = t->ip;
3490 TRACE("[Thread %2u] or (s)\n", p->thread_id);
3500 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
3502 struct thread *t = &p->threads[p->thread_id];
3503 struct instruction *ip = t->ip;
3505 TRACE("[Thread %2u] or (i)\n", p->thread_id);
3515 instr_alu_xor_exec(struct rte_swx_pipeline *p)
3517 struct thread *t = &p->threads[p->thread_id];
3518 struct instruction *ip = t->ip;
3520 TRACE("[Thread %2u] xor\n", p->thread_id);
3530 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
3532 struct thread *t = &p->threads[p->thread_id];
3533 struct instruction *ip = t->ip;
3535 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
3545 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
3547 struct thread *t = &p->threads[p->thread_id];
3548 struct instruction *ip = t->ip;
3550 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
3560 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
3562 struct thread *t = &p->threads[p->thread_id];
3563 struct instruction *ip = t->ip;
3564 uint8_t *dst_struct, *src_struct;
3565 uint16_t *dst16_ptr, dst;
3566 uint64_t *src64_ptr, src64, src64_mask, src;
3569 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3572 dst_struct = t->structs[ip->alu.dst.struct_id];
3573 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3576 src_struct = t->structs[ip->alu.src.struct_id];
3577 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3579 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3580 src = src64 & src64_mask;
3585 /* The first input (r) is a 16-bit number. The second and the third
3586 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3587 * three numbers (output r) is a 34-bit number.
3589 r += (src >> 32) + (src & 0xFFFFFFFF);
3591 /* The first input is a 16-bit number. The second input is an 18-bit
3592 * number. In the worst case scenario, the sum of the two numbers is a
3595 r = (r & 0xFFFF) + (r >> 16);
3597 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3598 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3600 r = (r & 0xFFFF) + (r >> 16);
3602 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3603 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3604 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
3605 * therefore the output r is always a 16-bit number.
3607 r = (r & 0xFFFF) + (r >> 16);
3612 *dst16_ptr = (uint16_t)r;
3619 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
3621 struct thread *t = &p->threads[p->thread_id];
3622 struct instruction *ip = t->ip;
3623 uint8_t *dst_struct, *src_struct;
3624 uint16_t *dst16_ptr, dst;
3625 uint64_t *src64_ptr, src64, src64_mask, src;
3628 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
3631 dst_struct = t->structs[ip->alu.dst.struct_id];
3632 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3635 src_struct = t->structs[ip->alu.src.struct_id];
3636 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3638 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3639 src = src64 & src64_mask;
3644 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
3645 * the following sequence of operations in 2's complement arithmetic:
3646 * a '- b = (a - b) % 0xFFFF.
3648 * In order to prevent an underflow for the below subtraction, in which
3649 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
3650 * minuend), we first add a multiple of the 0xFFFF modulus to the
3651 * minuend. The number we add to the minuend needs to be a 34-bit number
3652 * or higher, so for readability reasons we picked the 36-bit multiple.
3653 * We are effectively turning the 16-bit minuend into a 36-bit number:
3654 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
3656 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
3658 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
3659 * result (the output r) is a 36-bit number.
3661 r -= (src >> 32) + (src & 0xFFFFFFFF);
3663 /* The first input is a 16-bit number. The second input is a 20-bit
3664 * number. Their sum is a 21-bit number.
3666 r = (r & 0xFFFF) + (r >> 16);
3668 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3669 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
3671 r = (r & 0xFFFF) + (r >> 16);
3673 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3674 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3675 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3676 * generated, therefore the output r is always a 16-bit number.
3678 r = (r & 0xFFFF) + (r >> 16);
3683 *dst16_ptr = (uint16_t)r;
3690 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
3692 struct thread *t = &p->threads[p->thread_id];
3693 struct instruction *ip = t->ip;
3694 uint8_t *dst_struct, *src_struct;
3695 uint16_t *dst16_ptr;
3696 uint32_t *src32_ptr;
3699 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3702 dst_struct = t->structs[ip->alu.dst.struct_id];
3703 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3705 src_struct = t->structs[ip->alu.src.struct_id];
3706 src32_ptr = (uint32_t *)&src_struct[0];
3708 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
3709 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
3710 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
3711 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
3712 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
3714 /* The first input is a 16-bit number. The second input is a 19-bit
3715 * number. Their sum is a 20-bit number.
3717 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3719 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3720 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
3722 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3724 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3725 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3726 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
3727 * generated, therefore the output r is always a 16-bit number.
3729 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3732 r0 = r0 ? r0 : 0xFFFF;
3734 *dst16_ptr = (uint16_t)r0;
3741 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
3743 struct thread *t = &p->threads[p->thread_id];
3744 struct instruction *ip = t->ip;
3745 uint8_t *dst_struct, *src_struct;
3746 uint16_t *dst16_ptr;
3747 uint32_t *src32_ptr;
3751 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
3754 dst_struct = t->structs[ip->alu.dst.struct_id];
3755 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3757 src_struct = t->structs[ip->alu.src.struct_id];
3758 src32_ptr = (uint32_t *)&src_struct[0];
3760 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
3761 * Therefore, in the worst case scenario, a 35-bit number is added to a
3762 * 16-bit number (the input r), so the output r is 36-bit number.
3764 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
3767 /* The first input is a 16-bit number. The second input is a 20-bit
3768 * number. Their sum is a 21-bit number.
3770 r = (r & 0xFFFF) + (r >> 16);
3772 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3773 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
3775 r = (r & 0xFFFF) + (r >> 16);
3777 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3778 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3779 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3780 * generated, therefore the output r is always a 16-bit number.
3782 r = (r & 0xFFFF) + (r >> 16);
3787 *dst16_ptr = (uint16_t)r;
3793 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
3796 instr_translate(struct rte_swx_pipeline *p,
3797 struct action *action,
3799 struct instruction *instr,
3800 struct instruction_data *data)
3802 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
3803 int n_tokens = 0, tpos = 0;
3805 /* Parse the instruction string into tokens. */
3809 token = strtok_r(string, " \t\v", &string);
3813 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
3815 tokens[n_tokens] = token;
3819 CHECK(n_tokens, EINVAL);
3821 /* Handle the optional instruction label. */
3822 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
3823 strcpy(data->label, tokens[0]);
3826 CHECK(n_tokens - tpos, EINVAL);
3829 /* Identify the instruction type. */
3830 if (!strcmp(tokens[tpos], "rx"))
3831 return instr_rx_translate(p,
3838 if (!strcmp(tokens[tpos], "tx"))
3839 return instr_tx_translate(p,
3846 if (!strcmp(tokens[tpos], "extract"))
3847 return instr_hdr_extract_translate(p,
3854 if (!strcmp(tokens[tpos], "emit"))
3855 return instr_hdr_emit_translate(p,
3862 if (!strcmp(tokens[tpos], "validate"))
3863 return instr_hdr_validate_translate(p,
3870 if (!strcmp(tokens[tpos], "invalidate"))
3871 return instr_hdr_invalidate_translate(p,
3878 if (!strcmp(tokens[tpos], "mov"))
3879 return instr_mov_translate(p,
3886 if (!strcmp(tokens[tpos], "dma"))
3887 return instr_dma_translate(p,
3894 if (!strcmp(tokens[tpos], "add"))
3895 return instr_alu_add_translate(p,
3902 if (!strcmp(tokens[tpos], "sub"))
3903 return instr_alu_sub_translate(p,
3910 if (!strcmp(tokens[tpos], "ckadd"))
3911 return instr_alu_ckadd_translate(p,
3918 if (!strcmp(tokens[tpos], "cksub"))
3919 return instr_alu_cksub_translate(p,
3926 if (!strcmp(tokens[tpos], "and"))
3927 return instr_alu_and_translate(p,
3934 if (!strcmp(tokens[tpos], "or"))
3935 return instr_alu_or_translate(p,
3942 if (!strcmp(tokens[tpos], "xor"))
3943 return instr_alu_xor_translate(p,
3954 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
3956 uint32_t count = 0, i;
3961 for (i = 0; i < n; i++)
3962 if (!strcmp(label, data[i].jmp_label))
3969 instr_label_check(struct instruction_data *instruction_data,
3970 uint32_t n_instructions)
3974 /* Check that all instruction labels are unique. */
3975 for (i = 0; i < n_instructions; i++) {
3976 struct instruction_data *data = &instruction_data[i];
3977 char *label = data->label;
3983 for (j = i + 1; j < n_instructions; j++)
3984 CHECK(strcmp(label, data[j].label), EINVAL);
3987 /* Get users for each instruction label. */
3988 for (i = 0; i < n_instructions; i++) {
3989 struct instruction_data *data = &instruction_data[i];
3990 char *label = data->label;
3992 data->n_users = label_is_used(instruction_data,
4001 instruction_config(struct rte_swx_pipeline *p,
4003 const char **instructions,
4004 uint32_t n_instructions)
4006 struct instruction *instr = NULL;
4007 struct instruction_data *data = NULL;
4008 char *string = NULL;
4012 CHECK(n_instructions, EINVAL);
4013 CHECK(instructions, EINVAL);
4014 for (i = 0; i < n_instructions; i++)
4015 CHECK(instructions[i], EINVAL);
4017 /* Memory allocation. */
4018 instr = calloc(n_instructions, sizeof(struct instruction));
4024 data = calloc(n_instructions, sizeof(struct instruction_data));
4030 for (i = 0; i < n_instructions; i++) {
4031 string = strdup(instructions[i]);
4037 err = instr_translate(p, a, string, &instr[i], &data[i]);
4044 err = instr_label_check(data, n_instructions);
4051 a->instructions = instr;
4052 a->n_instructions = n_instructions;
4054 p->instructions = instr;
4055 p->n_instructions = n_instructions;
4067 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
4069 static instr_exec_t instruction_table[] = {
4070 [INSTR_RX] = instr_rx_exec,
4071 [INSTR_TX] = instr_tx_exec,
4073 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
4074 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
4075 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
4076 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
4077 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
4078 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
4079 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
4080 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
4082 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
4083 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
4084 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
4085 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
4086 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
4087 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
4088 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
4089 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
4090 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
4092 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
4093 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
4095 [INSTR_MOV] = instr_mov_exec,
4096 [INSTR_MOV_S] = instr_mov_s_exec,
4097 [INSTR_MOV_I] = instr_mov_i_exec,
4099 [INSTR_DMA_HT] = instr_dma_ht_exec,
4100 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
4101 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
4102 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
4103 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
4104 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
4105 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
4106 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
4108 [INSTR_ALU_ADD] = instr_alu_add_exec,
4109 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
4110 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
4111 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
4112 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
4113 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
4115 [INSTR_ALU_SUB] = instr_alu_sub_exec,
4116 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
4117 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
4118 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
4119 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
4120 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
4122 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
4123 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
4124 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
4125 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
4127 [INSTR_ALU_AND] = instr_alu_and_exec,
4128 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
4129 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
4131 [INSTR_ALU_OR] = instr_alu_or_exec,
4132 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
4133 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
4135 [INSTR_ALU_XOR] = instr_alu_xor_exec,
4136 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
4137 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
4141 instr_exec(struct rte_swx_pipeline *p)
4143 struct thread *t = &p->threads[p->thread_id];
4144 struct instruction *ip = t->ip;
4145 instr_exec_t instr = instruction_table[ip->type];
4153 static struct action *
4154 action_find(struct rte_swx_pipeline *p, const char *name)
4156 struct action *elem;
4161 TAILQ_FOREACH(elem, &p->actions, node)
4162 if (strcmp(elem->name, name) == 0)
4168 static struct field *
4169 action_field_find(struct action *a, const char *name)
4171 return a->st ? struct_type_field_find(a->st, name) : NULL;
4174 static struct field *
4175 action_field_parse(struct action *action, const char *name)
4177 if (name[0] != 't' || name[1] != '.')
4180 return action_field_find(action, &name[2]);
4184 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
4186 const char *args_struct_type_name,
4187 const char **instructions,
4188 uint32_t n_instructions)
4190 struct struct_type *args_struct_type;
4196 CHECK_NAME(name, EINVAL);
4197 CHECK(!action_find(p, name), EEXIST);
4199 if (args_struct_type_name) {
4200 CHECK_NAME(args_struct_type_name, EINVAL);
4201 args_struct_type = struct_type_find(p, args_struct_type_name);
4202 CHECK(args_struct_type, EINVAL);
4204 args_struct_type = NULL;
4207 /* Node allocation. */
4208 a = calloc(1, sizeof(struct action));
4211 /* Node initialization. */
4212 strcpy(a->name, name);
4213 a->st = args_struct_type;
4214 a->id = p->n_actions;
4216 /* Instruction translation. */
4217 err = instruction_config(p, a, instructions, n_instructions);
4223 /* Node add to tailq. */
4224 TAILQ_INSERT_TAIL(&p->actions, a, node);
4231 action_build(struct rte_swx_pipeline *p)
4233 struct action *action;
4235 p->action_instructions = calloc(p->n_actions,
4236 sizeof(struct instruction *));
4237 CHECK(p->action_instructions, ENOMEM);
4239 TAILQ_FOREACH(action, &p->actions, node)
4240 p->action_instructions[action->id] = action->instructions;
4246 action_build_free(struct rte_swx_pipeline *p)
4248 free(p->action_instructions);
4249 p->action_instructions = NULL;
4253 action_free(struct rte_swx_pipeline *p)
4255 action_build_free(p);
4258 struct action *action;
4260 action = TAILQ_FIRST(&p->actions);
4264 TAILQ_REMOVE(&p->actions, action, node);
4265 free(action->instructions);
4273 static struct table_type *
4274 table_type_find(struct rte_swx_pipeline *p, const char *name)
4276 struct table_type *elem;
4278 TAILQ_FOREACH(elem, &p->table_types, node)
4279 if (strcmp(elem->name, name) == 0)
4285 static struct table_type *
4286 table_type_resolve(struct rte_swx_pipeline *p,
4287 const char *recommended_type_name,
4288 enum rte_swx_table_match_type match_type)
4290 struct table_type *elem;
4292 /* Only consider the recommended type if the match type is correct. */
4293 if (recommended_type_name)
4294 TAILQ_FOREACH(elem, &p->table_types, node)
4295 if (!strcmp(elem->name, recommended_type_name) &&
4296 (elem->match_type == match_type))
4299 /* Ignore the recommended type and get the first element with this match
4302 TAILQ_FOREACH(elem, &p->table_types, node)
4303 if (elem->match_type == match_type)
4309 static struct table *
4310 table_find(struct rte_swx_pipeline *p, const char *name)
4314 TAILQ_FOREACH(elem, &p->tables, node)
4315 if (strcmp(elem->name, name) == 0)
4321 static struct table *
4322 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4324 struct table *table = NULL;
4326 TAILQ_FOREACH(table, &p->tables, node)
4327 if (table->id == id)
4334 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4336 enum rte_swx_table_match_type match_type,
4337 struct rte_swx_table_ops *ops)
4339 struct table_type *elem;
4343 CHECK_NAME(name, EINVAL);
4344 CHECK(!table_type_find(p, name), EEXIST);
4347 CHECK(ops->create, EINVAL);
4348 CHECK(ops->lkp, EINVAL);
4349 CHECK(ops->free, EINVAL);
4351 /* Node allocation. */
4352 elem = calloc(1, sizeof(struct table_type));
4353 CHECK(elem, ENOMEM);
4355 /* Node initialization. */
4356 strcpy(elem->name, name);
4357 elem->match_type = match_type;
4358 memcpy(&elem->ops, ops, sizeof(*ops));
4360 /* Node add to tailq. */
4361 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4366 static enum rte_swx_table_match_type
4367 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4372 for (i = 0; i < n_fields; i++)
4373 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4377 return RTE_SWX_TABLE_MATCH_EXACT;
4379 if ((i == n_fields - 1) &&
4380 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4381 return RTE_SWX_TABLE_MATCH_LPM;
4383 return RTE_SWX_TABLE_MATCH_WILDCARD;
4387 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
4389 struct rte_swx_pipeline_table_params *params,
4390 const char *recommended_table_type_name,
4394 struct table_type *type;
4396 struct action *default_action;
4397 struct header *header = NULL;
4399 uint32_t offset_prev = 0, action_data_size_max = 0, i;
4403 CHECK_NAME(name, EINVAL);
4404 CHECK(!table_find(p, name), EEXIST);
4406 CHECK(params, EINVAL);
4409 CHECK(!params->n_fields || params->fields, EINVAL);
4410 for (i = 0; i < params->n_fields; i++) {
4411 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4413 struct field *hf, *mf;
4416 CHECK_NAME(field->name, EINVAL);
4418 hf = header_field_parse(p, field->name, &h);
4419 mf = metadata_field_parse(p, field->name);
4420 CHECK(hf || mf, EINVAL);
4422 offset = hf ? hf->offset : mf->offset;
4425 is_header = hf ? 1 : 0;
4426 header = hf ? h : NULL;
4427 offset_prev = offset;
4432 CHECK((is_header && hf && (h->id == header->id)) ||
4433 (!is_header && mf), EINVAL);
4435 CHECK(offset > offset_prev, EINVAL);
4436 offset_prev = offset;
4439 /* Action checks. */
4440 CHECK(params->n_actions, EINVAL);
4441 CHECK(params->action_names, EINVAL);
4442 for (i = 0; i < params->n_actions; i++) {
4443 const char *action_name = params->action_names[i];
4445 uint32_t action_data_size;
4447 CHECK(action_name, EINVAL);
4449 a = action_find(p, action_name);
4452 action_data_size = a->st ? a->st->n_bits / 8 : 0;
4453 if (action_data_size > action_data_size_max)
4454 action_data_size_max = action_data_size;
4457 CHECK(params->default_action_name, EINVAL);
4458 for (i = 0; i < p->n_actions; i++)
4459 if (!strcmp(params->action_names[i],
4460 params->default_action_name))
4462 CHECK(i < params->n_actions, EINVAL);
4463 default_action = action_find(p, params->default_action_name);
4464 CHECK((default_action->st && params->default_action_data) ||
4465 !params->default_action_data, EINVAL);
4467 /* Table type checks. */
4468 if (params->n_fields) {
4469 enum rte_swx_table_match_type match_type;
4471 match_type = table_match_type_resolve(params->fields,
4473 type = table_type_resolve(p,
4474 recommended_table_type_name,
4476 CHECK(type, EINVAL);
4481 /* Memory allocation. */
4482 t = calloc(1, sizeof(struct table));
4485 t->fields = calloc(params->n_fields, sizeof(struct match_field));
4491 t->actions = calloc(params->n_actions, sizeof(struct action *));
4498 if (action_data_size_max) {
4499 t->default_action_data = calloc(1, action_data_size_max);
4500 if (!t->default_action_data) {
4508 /* Node initialization. */
4509 strcpy(t->name, name);
4510 if (args && args[0])
4511 strcpy(t->args, args);
4514 for (i = 0; i < params->n_fields; i++) {
4515 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4516 struct match_field *f = &t->fields[i];
4518 f->match_type = field->match_type;
4519 f->field = is_header ?
4520 header_field_parse(p, field->name, NULL) :
4521 metadata_field_parse(p, field->name);
4523 t->n_fields = params->n_fields;
4524 t->is_header = is_header;
4527 for (i = 0; i < params->n_actions; i++)
4528 t->actions[i] = action_find(p, params->action_names[i]);
4529 t->default_action = default_action;
4530 if (default_action->st)
4531 memcpy(t->default_action_data,
4532 params->default_action_data,
4533 default_action->st->n_bits / 8);
4534 t->n_actions = params->n_actions;
4535 t->default_action_is_const = params->default_action_is_const;
4536 t->action_data_size_max = action_data_size_max;
4539 t->id = p->n_tables;
4541 /* Node add to tailq. */
4542 TAILQ_INSERT_TAIL(&p->tables, t, node);
4548 static struct rte_swx_table_params *
4549 table_params_get(struct table *table)
4551 struct rte_swx_table_params *params;
4552 struct field *first, *last;
4554 uint32_t key_size, key_offset, action_data_size, i;
4556 /* Memory allocation. */
4557 params = calloc(1, sizeof(struct rte_swx_table_params));
4561 /* Key offset and size. */
4562 first = table->fields[0].field;
4563 last = table->fields[table->n_fields - 1].field;
4564 key_offset = first->offset / 8;
4565 key_size = (last->offset + last->n_bits - first->offset) / 8;
4567 /* Memory allocation. */
4568 key_mask = calloc(1, key_size);
4575 for (i = 0; i < table->n_fields; i++) {
4576 struct field *f = table->fields[i].field;
4577 uint32_t start = (f->offset - first->offset) / 8;
4578 size_t size = f->n_bits / 8;
4580 memset(&key_mask[start], 0xFF, size);
4583 /* Action data size. */
4584 action_data_size = 0;
4585 for (i = 0; i < table->n_actions; i++) {
4586 struct action *action = table->actions[i];
4587 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
4589 if (ads > action_data_size)
4590 action_data_size = ads;
4594 params->match_type = table->type->match_type;
4595 params->key_size = key_size;
4596 params->key_offset = key_offset;
4597 params->key_mask0 = key_mask;
4598 params->action_data_size = action_data_size;
4599 params->n_keys_max = table->size;
4605 table_params_free(struct rte_swx_table_params *params)
4610 free(params->key_mask0);
4615 table_state_build(struct rte_swx_pipeline *p)
4617 struct table *table;
4619 p->table_state = calloc(p->n_tables,
4620 sizeof(struct rte_swx_table_state));
4621 CHECK(p->table_state, ENOMEM);
4623 TAILQ_FOREACH(table, &p->tables, node) {
4624 struct rte_swx_table_state *ts = &p->table_state[table->id];
4627 struct rte_swx_table_params *params;
4630 params = table_params_get(table);
4631 CHECK(params, ENOMEM);
4633 ts->obj = table->type->ops.create(params,
4638 table_params_free(params);
4639 CHECK(ts->obj, ENODEV);
4642 /* ts->default_action_data. */
4643 if (table->action_data_size_max) {
4644 ts->default_action_data =
4645 malloc(table->action_data_size_max);
4646 CHECK(ts->default_action_data, ENOMEM);
4648 memcpy(ts->default_action_data,
4649 table->default_action_data,
4650 table->action_data_size_max);
4653 /* ts->default_action_id. */
4654 ts->default_action_id = table->default_action->id;
4661 table_state_build_free(struct rte_swx_pipeline *p)
4665 if (!p->table_state)
4668 for (i = 0; i < p->n_tables; i++) {
4669 struct rte_swx_table_state *ts = &p->table_state[i];
4670 struct table *table = table_find_by_id(p, i);
4673 if (table->type && ts->obj)
4674 table->type->ops.free(ts->obj);
4676 /* ts->default_action_data. */
4677 free(ts->default_action_data);
4680 free(p->table_state);
4681 p->table_state = NULL;
4685 table_state_free(struct rte_swx_pipeline *p)
4687 table_state_build_free(p);
4691 table_stub_lkp(void *table __rte_unused,
4692 void *mailbox __rte_unused,
4693 uint8_t **key __rte_unused,
4694 uint64_t *action_id __rte_unused,
4695 uint8_t **action_data __rte_unused,
4699 return 1; /* DONE. */
4703 table_build(struct rte_swx_pipeline *p)
4707 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4708 struct thread *t = &p->threads[i];
4709 struct table *table;
4711 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
4712 CHECK(t->tables, ENOMEM);
4714 TAILQ_FOREACH(table, &p->tables, node) {
4715 struct table_runtime *r = &t->tables[table->id];
4720 size = table->type->ops.mailbox_size_get();
4723 r->func = table->type->ops.lkp;
4727 r->mailbox = calloc(1, size);
4728 CHECK(r->mailbox, ENOMEM);
4732 r->key = table->is_header ?
4733 &t->structs[table->header->struct_id] :
4734 &t->structs[p->metadata_struct_id];
4736 r->func = table_stub_lkp;
4745 table_build_free(struct rte_swx_pipeline *p)
4749 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4750 struct thread *t = &p->threads[i];
4756 for (j = 0; j < p->n_tables; j++) {
4757 struct table_runtime *r = &t->tables[j];
4768 table_free(struct rte_swx_pipeline *p)
4770 table_build_free(p);
4776 elem = TAILQ_FIRST(&p->tables);
4780 TAILQ_REMOVE(&p->tables, elem, node);
4782 free(elem->actions);
4783 free(elem->default_action_data);
4789 struct table_type *elem;
4791 elem = TAILQ_FIRST(&p->table_types);
4795 TAILQ_REMOVE(&p->table_types, elem, node);
4804 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
4806 struct rte_swx_pipeline *pipeline;
4808 /* Check input parameters. */
4811 /* Memory allocation. */
4812 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
4813 CHECK(pipeline, ENOMEM);
4815 /* Initialization. */
4816 TAILQ_INIT(&pipeline->struct_types);
4817 TAILQ_INIT(&pipeline->port_in_types);
4818 TAILQ_INIT(&pipeline->ports_in);
4819 TAILQ_INIT(&pipeline->port_out_types);
4820 TAILQ_INIT(&pipeline->ports_out);
4821 TAILQ_INIT(&pipeline->extern_types);
4822 TAILQ_INIT(&pipeline->extern_objs);
4823 TAILQ_INIT(&pipeline->extern_funcs);
4824 TAILQ_INIT(&pipeline->headers);
4825 TAILQ_INIT(&pipeline->actions);
4826 TAILQ_INIT(&pipeline->table_types);
4827 TAILQ_INIT(&pipeline->tables);
4829 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
4830 pipeline->numa_node = numa_node;
4837 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
4842 free(p->instructions);
4844 table_state_free(p);
4849 extern_func_free(p);
4859 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
4860 const char **instructions,
4861 uint32_t n_instructions)
4866 err = instruction_config(p, NULL, instructions, n_instructions);
4870 /* Thread instruction pointer reset. */
4871 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4872 struct thread *t = &p->threads[i];
4874 thread_ip_reset(p, t);
4881 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
4886 CHECK(p->build_done == 0, EEXIST);
4888 status = port_in_build(p);
4892 status = port_out_build(p);
4896 status = struct_build(p);
4900 status = extern_obj_build(p);
4904 status = extern_func_build(p);
4908 status = header_build(p);
4912 status = metadata_build(p);
4916 status = action_build(p);
4920 status = table_build(p);
4924 status = table_state_build(p);
4932 table_state_build_free(p);
4933 table_build_free(p);
4934 action_build_free(p);
4935 metadata_build_free(p);
4936 header_build_free(p);
4937 extern_func_build_free(p);
4938 extern_obj_build_free(p);
4939 port_out_build_free(p);
4940 port_in_build_free(p);
4941 struct_build_free(p);
4947 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
4951 for (i = 0; i < n_instructions; i++)
4959 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
4960 struct rte_swx_table_state **table_state)
4962 if (!p || !table_state || !p->build_done)
4965 *table_state = p->table_state;
4970 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
4971 struct rte_swx_table_state *table_state)
4973 if (!p || !table_state || !p->build_done)
4976 p->table_state = table_state;