1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 struct instr_operand {
309 uint8_t header_id[8];
310 uint8_t struct_id[8];
315 struct instr_hdr_validity {
319 struct instr_dst_src {
320 struct instr_operand dst;
322 struct instr_operand src;
329 uint8_t header_id[8];
330 uint8_t struct_id[8];
341 enum instruction_type type;
344 struct instr_hdr_validity valid;
345 struct instr_dst_src mov;
346 struct instr_dma dma;
347 struct instr_dst_src alu;
351 struct instruction_data {
352 char label[RTE_SWX_NAME_SIZE];
353 char jmp_label[RTE_SWX_NAME_SIZE];
354 uint32_t n_users; /* user = jmp instruction to this instruction. */
362 TAILQ_ENTRY(action) node;
363 char name[RTE_SWX_NAME_SIZE];
364 struct struct_type *st;
365 struct instruction *instructions;
366 uint32_t n_instructions;
370 TAILQ_HEAD(action_tailq, action);
376 TAILQ_ENTRY(table_type) node;
377 char name[RTE_SWX_NAME_SIZE];
378 enum rte_swx_table_match_type match_type;
379 struct rte_swx_table_ops ops;
382 TAILQ_HEAD(table_type_tailq, table_type);
385 enum rte_swx_table_match_type match_type;
390 TAILQ_ENTRY(table) node;
391 char name[RTE_SWX_NAME_SIZE];
392 char args[RTE_SWX_NAME_SIZE];
393 struct table_type *type; /* NULL when n_fields == 0. */
396 struct match_field *fields;
398 int is_header; /* Only valid when n_fields > 0. */
399 struct header *header; /* Only valid when n_fields > 0. */
402 struct action **actions;
403 struct action *default_action;
404 uint8_t *default_action_data;
406 int default_action_is_const;
407 uint32_t action_data_size_max;
413 TAILQ_HEAD(table_tailq, table);
415 struct table_runtime {
416 rte_swx_table_lookup_t func;
426 struct rte_swx_pkt pkt;
432 /* Packet headers. */
433 struct header_runtime *headers; /* Extracted or generated headers. */
434 struct header_out_runtime *headers_out; /* Emitted headers. */
435 uint8_t *header_storage;
436 uint8_t *header_out_storage;
437 uint64_t valid_headers;
438 uint32_t n_headers_out;
440 /* Packet meta-data. */
444 struct table_runtime *tables;
445 struct rte_swx_table_state *table_state;
447 int hit; /* 0 = Miss, 1 = Hit. */
449 /* Extern objects and functions. */
450 struct extern_obj_runtime *extern_objs;
451 struct extern_func_runtime *extern_funcs;
454 struct instruction *ip;
455 struct instruction *ret;
458 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
459 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
460 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
462 #define ALU(thread, ip, operator) \
464 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
465 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
466 uint64_t dst64 = *dst64_ptr; \
467 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
468 uint64_t dst = dst64 & dst64_mask; \
470 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
471 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
472 uint64_t src64 = *src64_ptr; \
473 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
474 uint64_t src = src64 & src64_mask; \
476 uint64_t result = dst operator src; \
478 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
481 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
483 #define ALU_S(thread, ip, operator) \
485 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
486 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
487 uint64_t dst64 = *dst64_ptr; \
488 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
489 uint64_t dst = dst64 & dst64_mask; \
491 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
492 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
493 uint64_t src64 = *src64_ptr; \
494 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
496 uint64_t result = dst operator src; \
498 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
503 #define ALU_HM(thread, ip, operator) \
505 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
506 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
507 uint64_t dst64 = *dst64_ptr; \
508 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
509 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
511 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
512 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
513 uint64_t src64 = *src64_ptr; \
514 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
515 uint64_t src = src64 & src64_mask; \
517 uint64_t result = dst operator src; \
518 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
520 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
523 #define ALU_HH(thread, ip, operator) \
525 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
526 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
527 uint64_t dst64 = *dst64_ptr; \
528 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
529 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
531 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
532 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
533 uint64_t src64 = *src64_ptr; \
534 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
536 uint64_t result = dst operator src; \
537 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
539 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
551 #define ALU_I(thread, ip, operator) \
553 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
554 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
555 uint64_t dst64 = *dst64_ptr; \
556 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
557 uint64_t dst = dst64 & dst64_mask; \
559 uint64_t src = (ip)->alu.src_val; \
561 uint64_t result = dst operator src; \
563 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
568 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
570 #define ALU_HI(thread, ip, operator) \
572 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
573 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
574 uint64_t dst64 = *dst64_ptr; \
575 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
576 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
578 uint64_t src = (ip)->alu.src_val; \
580 uint64_t result = dst operator src; \
581 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
583 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
592 #define MOV(thread, ip) \
594 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
595 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
596 uint64_t dst64 = *dst64_ptr; \
597 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
599 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
600 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
601 uint64_t src64 = *src64_ptr; \
602 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
603 uint64_t src = src64 & src64_mask; \
605 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
608 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
610 #define MOV_S(thread, ip) \
612 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
613 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
614 uint64_t dst64 = *dst64_ptr; \
615 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
617 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
618 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
619 uint64_t src64 = *src64_ptr; \
620 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
622 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
631 #define MOV_I(thread, ip) \
633 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
634 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
635 uint64_t dst64 = *dst64_ptr; \
636 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
638 uint64_t src = (ip)->mov.src_val; \
640 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
643 #define METADATA_READ(thread, offset, n_bits) \
645 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
646 uint64_t m64 = *m64_ptr; \
647 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
651 #define METADATA_WRITE(thread, offset, n_bits, value) \
653 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
654 uint64_t m64 = *m64_ptr; \
655 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
657 uint64_t m_new = value; \
659 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
662 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
663 #define RTE_SWX_PIPELINE_THREADS_MAX 16
666 struct rte_swx_pipeline {
667 struct struct_type_tailq struct_types;
668 struct port_in_type_tailq port_in_types;
669 struct port_in_tailq ports_in;
670 struct port_out_type_tailq port_out_types;
671 struct port_out_tailq ports_out;
672 struct extern_type_tailq extern_types;
673 struct extern_obj_tailq extern_objs;
674 struct extern_func_tailq extern_funcs;
675 struct header_tailq headers;
676 struct struct_type *metadata_st;
677 uint32_t metadata_struct_id;
678 struct action_tailq actions;
679 struct table_type_tailq table_types;
680 struct table_tailq tables;
682 struct port_in_runtime *in;
683 struct port_out_runtime *out;
684 struct instruction **action_instructions;
685 struct rte_swx_table_state *table_state;
686 struct instruction *instructions;
687 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
691 uint32_t n_ports_out;
692 uint32_t n_extern_objs;
693 uint32_t n_extern_funcs;
699 uint32_t n_instructions;
707 static struct struct_type *
708 struct_type_find(struct rte_swx_pipeline *p, const char *name)
710 struct struct_type *elem;
712 TAILQ_FOREACH(elem, &p->struct_types, node)
713 if (strcmp(elem->name, name) == 0)
719 static struct field *
720 struct_type_field_find(struct struct_type *st, const char *name)
724 for (i = 0; i < st->n_fields; i++) {
725 struct field *f = &st->fields[i];
727 if (strcmp(f->name, name) == 0)
735 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
737 struct rte_swx_field_params *fields,
740 struct struct_type *st;
744 CHECK_NAME(name, EINVAL);
745 CHECK(fields, EINVAL);
746 CHECK(n_fields, EINVAL);
748 for (i = 0; i < n_fields; i++) {
749 struct rte_swx_field_params *f = &fields[i];
752 CHECK_NAME(f->name, EINVAL);
753 CHECK(f->n_bits, EINVAL);
754 CHECK(f->n_bits <= 64, EINVAL);
755 CHECK((f->n_bits & 7) == 0, EINVAL);
757 for (j = 0; j < i; j++) {
758 struct rte_swx_field_params *f_prev = &fields[j];
760 CHECK(strcmp(f->name, f_prev->name), EINVAL);
764 CHECK(!struct_type_find(p, name), EEXIST);
766 /* Node allocation. */
767 st = calloc(1, sizeof(struct struct_type));
770 st->fields = calloc(n_fields, sizeof(struct field));
776 /* Node initialization. */
777 strcpy(st->name, name);
778 for (i = 0; i < n_fields; i++) {
779 struct field *dst = &st->fields[i];
780 struct rte_swx_field_params *src = &fields[i];
782 strcpy(dst->name, src->name);
783 dst->n_bits = src->n_bits;
784 dst->offset = st->n_bits;
786 st->n_bits += src->n_bits;
788 st->n_fields = n_fields;
790 /* Node add to tailq. */
791 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
797 struct_build(struct rte_swx_pipeline *p)
801 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
802 struct thread *t = &p->threads[i];
804 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
805 CHECK(t->structs, ENOMEM);
812 struct_build_free(struct rte_swx_pipeline *p)
816 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
817 struct thread *t = &p->threads[i];
825 struct_free(struct rte_swx_pipeline *p)
827 struct_build_free(p);
831 struct struct_type *elem;
833 elem = TAILQ_FIRST(&p->struct_types);
837 TAILQ_REMOVE(&p->struct_types, elem, node);
846 static struct port_in_type *
847 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
849 struct port_in_type *elem;
854 TAILQ_FOREACH(elem, &p->port_in_types, node)
855 if (strcmp(elem->name, name) == 0)
862 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
864 struct rte_swx_port_in_ops *ops)
866 struct port_in_type *elem;
869 CHECK_NAME(name, EINVAL);
871 CHECK(ops->create, EINVAL);
872 CHECK(ops->free, EINVAL);
873 CHECK(ops->pkt_rx, EINVAL);
874 CHECK(ops->stats_read, EINVAL);
876 CHECK(!port_in_type_find(p, name), EEXIST);
878 /* Node allocation. */
879 elem = calloc(1, sizeof(struct port_in_type));
882 /* Node initialization. */
883 strcpy(elem->name, name);
884 memcpy(&elem->ops, ops, sizeof(*ops));
886 /* Node add to tailq. */
887 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
892 static struct port_in *
893 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
895 struct port_in *port;
897 TAILQ_FOREACH(port, &p->ports_in, node)
898 if (port->id == port_id)
905 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
907 const char *port_type_name,
910 struct port_in_type *type = NULL;
911 struct port_in *port = NULL;
916 CHECK(!port_in_find(p, port_id), EINVAL);
918 CHECK_NAME(port_type_name, EINVAL);
919 type = port_in_type_find(p, port_type_name);
922 obj = type->ops.create(args);
925 /* Node allocation. */
926 port = calloc(1, sizeof(struct port_in));
929 /* Node initialization. */
934 /* Node add to tailq. */
935 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
936 if (p->n_ports_in < port_id + 1)
937 p->n_ports_in = port_id + 1;
943 port_in_build(struct rte_swx_pipeline *p)
945 struct port_in *port;
948 CHECK(p->n_ports_in, EINVAL);
949 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
951 for (i = 0; i < p->n_ports_in; i++)
952 CHECK(port_in_find(p, i), EINVAL);
954 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
955 CHECK(p->in, ENOMEM);
957 TAILQ_FOREACH(port, &p->ports_in, node) {
958 struct port_in_runtime *in = &p->in[port->id];
960 in->pkt_rx = port->type->ops.pkt_rx;
968 port_in_build_free(struct rte_swx_pipeline *p)
975 port_in_free(struct rte_swx_pipeline *p)
977 port_in_build_free(p);
981 struct port_in *port;
983 port = TAILQ_FIRST(&p->ports_in);
987 TAILQ_REMOVE(&p->ports_in, port, node);
988 port->type->ops.free(port->obj);
992 /* Input port types. */
994 struct port_in_type *elem;
996 elem = TAILQ_FIRST(&p->port_in_types);
1000 TAILQ_REMOVE(&p->port_in_types, elem, node);
1008 static struct port_out_type *
1009 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1011 struct port_out_type *elem;
1016 TAILQ_FOREACH(elem, &p->port_out_types, node)
1017 if (!strcmp(elem->name, name))
1024 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1026 struct rte_swx_port_out_ops *ops)
1028 struct port_out_type *elem;
1031 CHECK_NAME(name, EINVAL);
1033 CHECK(ops->create, EINVAL);
1034 CHECK(ops->free, EINVAL);
1035 CHECK(ops->pkt_tx, EINVAL);
1036 CHECK(ops->stats_read, EINVAL);
1038 CHECK(!port_out_type_find(p, name), EEXIST);
1040 /* Node allocation. */
1041 elem = calloc(1, sizeof(struct port_out_type));
1042 CHECK(elem, ENOMEM);
1044 /* Node initialization. */
1045 strcpy(elem->name, name);
1046 memcpy(&elem->ops, ops, sizeof(*ops));
1048 /* Node add to tailq. */
1049 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1054 static struct port_out *
1055 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1057 struct port_out *port;
1059 TAILQ_FOREACH(port, &p->ports_out, node)
1060 if (port->id == port_id)
1067 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1069 const char *port_type_name,
1072 struct port_out_type *type = NULL;
1073 struct port_out *port = NULL;
1078 CHECK(!port_out_find(p, port_id), EINVAL);
1080 CHECK_NAME(port_type_name, EINVAL);
1081 type = port_out_type_find(p, port_type_name);
1082 CHECK(type, EINVAL);
1084 obj = type->ops.create(args);
1087 /* Node allocation. */
1088 port = calloc(1, sizeof(struct port_out));
1089 CHECK(port, ENOMEM);
1091 /* Node initialization. */
1096 /* Node add to tailq. */
1097 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1098 if (p->n_ports_out < port_id + 1)
1099 p->n_ports_out = port_id + 1;
1105 port_out_build(struct rte_swx_pipeline *p)
1107 struct port_out *port;
1110 CHECK(p->n_ports_out, EINVAL);
1112 for (i = 0; i < p->n_ports_out; i++)
1113 CHECK(port_out_find(p, i), EINVAL);
1115 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1116 CHECK(p->out, ENOMEM);
1118 TAILQ_FOREACH(port, &p->ports_out, node) {
1119 struct port_out_runtime *out = &p->out[port->id];
1121 out->pkt_tx = port->type->ops.pkt_tx;
1122 out->flush = port->type->ops.flush;
1123 out->obj = port->obj;
1130 port_out_build_free(struct rte_swx_pipeline *p)
1137 port_out_free(struct rte_swx_pipeline *p)
1139 port_out_build_free(p);
1143 struct port_out *port;
1145 port = TAILQ_FIRST(&p->ports_out);
1149 TAILQ_REMOVE(&p->ports_out, port, node);
1150 port->type->ops.free(port->obj);
1154 /* Output port types. */
1156 struct port_out_type *elem;
1158 elem = TAILQ_FIRST(&p->port_out_types);
1162 TAILQ_REMOVE(&p->port_out_types, elem, node);
1170 static struct extern_type *
1171 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1173 struct extern_type *elem;
1175 TAILQ_FOREACH(elem, &p->extern_types, node)
1176 if (strcmp(elem->name, name) == 0)
1182 static struct extern_type_member_func *
1183 extern_type_member_func_find(struct extern_type *type, const char *name)
1185 struct extern_type_member_func *elem;
1187 TAILQ_FOREACH(elem, &type->funcs, node)
1188 if (strcmp(elem->name, name) == 0)
1194 static struct extern_obj *
1195 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1197 struct extern_obj *elem;
1199 TAILQ_FOREACH(elem, &p->extern_objs, node)
1200 if (strcmp(elem->name, name) == 0)
1206 static struct field *
1207 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1209 struct extern_obj **object)
1211 struct extern_obj *obj;
1213 char *obj_name, *field_name;
1215 if ((name[0] != 'e') || (name[1] != '.'))
1218 obj_name = strdup(&name[2]);
1222 field_name = strchr(obj_name, '.');
1231 obj = extern_obj_find(p, obj_name);
1237 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1251 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1253 const char *mailbox_struct_type_name,
1254 rte_swx_extern_type_constructor_t constructor,
1255 rte_swx_extern_type_destructor_t destructor)
1257 struct extern_type *elem;
1258 struct struct_type *mailbox_struct_type;
1262 CHECK_NAME(name, EINVAL);
1263 CHECK(!extern_type_find(p, name), EEXIST);
1265 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1266 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1267 CHECK(mailbox_struct_type, EINVAL);
1269 CHECK(constructor, EINVAL);
1270 CHECK(destructor, EINVAL);
1272 /* Node allocation. */
1273 elem = calloc(1, sizeof(struct extern_type));
1274 CHECK(elem, ENOMEM);
1276 /* Node initialization. */
1277 strcpy(elem->name, name);
1278 elem->mailbox_struct_type = mailbox_struct_type;
1279 elem->constructor = constructor;
1280 elem->destructor = destructor;
1281 TAILQ_INIT(&elem->funcs);
1283 /* Node add to tailq. */
1284 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1290 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1291 const char *extern_type_name,
1293 rte_swx_extern_type_member_func_t member_func)
1295 struct extern_type *type;
1296 struct extern_type_member_func *type_member;
1300 CHECK(extern_type_name, EINVAL);
1301 type = extern_type_find(p, extern_type_name);
1302 CHECK(type, EINVAL);
1303 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1305 CHECK(name, EINVAL);
1306 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1308 CHECK(member_func, EINVAL);
1310 /* Node allocation. */
1311 type_member = calloc(1, sizeof(struct extern_type_member_func));
1312 CHECK(type_member, ENOMEM);
1314 /* Node initialization. */
1315 strcpy(type_member->name, name);
1316 type_member->func = member_func;
1317 type_member->id = type->n_funcs;
1319 /* Node add to tailq. */
1320 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1327 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1328 const char *extern_type_name,
1332 struct extern_type *type;
1333 struct extern_obj *obj;
1338 CHECK_NAME(extern_type_name, EINVAL);
1339 type = extern_type_find(p, extern_type_name);
1340 CHECK(type, EINVAL);
1342 CHECK_NAME(name, EINVAL);
1343 CHECK(!extern_obj_find(p, name), EEXIST);
1345 /* Node allocation. */
1346 obj = calloc(1, sizeof(struct extern_obj));
1349 /* Object construction. */
1350 obj_handle = type->constructor(args);
1356 /* Node initialization. */
1357 strcpy(obj->name, name);
1359 obj->obj = obj_handle;
1360 obj->struct_id = p->n_structs;
1361 obj->id = p->n_extern_objs;
1363 /* Node add to tailq. */
1364 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1372 extern_obj_build(struct rte_swx_pipeline *p)
1376 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1377 struct thread *t = &p->threads[i];
1378 struct extern_obj *obj;
1380 t->extern_objs = calloc(p->n_extern_objs,
1381 sizeof(struct extern_obj_runtime));
1382 CHECK(t->extern_objs, ENOMEM);
1384 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1385 struct extern_obj_runtime *r =
1386 &t->extern_objs[obj->id];
1387 struct extern_type_member_func *func;
1388 uint32_t mailbox_size =
1389 obj->type->mailbox_struct_type->n_bits / 8;
1393 r->mailbox = calloc(1, mailbox_size);
1394 CHECK(r->mailbox, ENOMEM);
1396 TAILQ_FOREACH(func, &obj->type->funcs, node)
1397 r->funcs[func->id] = func->func;
1399 t->structs[obj->struct_id] = r->mailbox;
1407 extern_obj_build_free(struct rte_swx_pipeline *p)
1411 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1412 struct thread *t = &p->threads[i];
1415 if (!t->extern_objs)
1418 for (j = 0; j < p->n_extern_objs; j++) {
1419 struct extern_obj_runtime *r = &t->extern_objs[j];
1424 free(t->extern_objs);
1425 t->extern_objs = NULL;
1430 extern_obj_free(struct rte_swx_pipeline *p)
1432 extern_obj_build_free(p);
1434 /* Extern objects. */
1436 struct extern_obj *elem;
1438 elem = TAILQ_FIRST(&p->extern_objs);
1442 TAILQ_REMOVE(&p->extern_objs, elem, node);
1444 elem->type->destructor(elem->obj);
1450 struct extern_type *elem;
1452 elem = TAILQ_FIRST(&p->extern_types);
1456 TAILQ_REMOVE(&p->extern_types, elem, node);
1459 struct extern_type_member_func *func;
1461 func = TAILQ_FIRST(&elem->funcs);
1465 TAILQ_REMOVE(&elem->funcs, func, node);
1476 static struct extern_func *
1477 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1479 struct extern_func *elem;
1481 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1482 if (strcmp(elem->name, name) == 0)
1488 static struct field *
1489 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1491 struct extern_func **function)
1493 struct extern_func *func;
1495 char *func_name, *field_name;
1497 if ((name[0] != 'f') || (name[1] != '.'))
1500 func_name = strdup(&name[2]);
1504 field_name = strchr(func_name, '.');
1513 func = extern_func_find(p, func_name);
1519 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1533 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1535 const char *mailbox_struct_type_name,
1536 rte_swx_extern_func_t func)
1538 struct extern_func *f;
1539 struct struct_type *mailbox_struct_type;
1543 CHECK_NAME(name, EINVAL);
1544 CHECK(!extern_func_find(p, name), EEXIST);
1546 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1547 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1548 CHECK(mailbox_struct_type, EINVAL);
1550 CHECK(func, EINVAL);
1552 /* Node allocation. */
1553 f = calloc(1, sizeof(struct extern_func));
1554 CHECK(func, ENOMEM);
1556 /* Node initialization. */
1557 strcpy(f->name, name);
1558 f->mailbox_struct_type = mailbox_struct_type;
1560 f->struct_id = p->n_structs;
1561 f->id = p->n_extern_funcs;
1563 /* Node add to tailq. */
1564 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1565 p->n_extern_funcs++;
1572 extern_func_build(struct rte_swx_pipeline *p)
1576 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1577 struct thread *t = &p->threads[i];
1578 struct extern_func *func;
1580 /* Memory allocation. */
1581 t->extern_funcs = calloc(p->n_extern_funcs,
1582 sizeof(struct extern_func_runtime));
1583 CHECK(t->extern_funcs, ENOMEM);
1585 /* Extern function. */
1586 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1587 struct extern_func_runtime *r =
1588 &t->extern_funcs[func->id];
1589 uint32_t mailbox_size =
1590 func->mailbox_struct_type->n_bits / 8;
1592 r->func = func->func;
1594 r->mailbox = calloc(1, mailbox_size);
1595 CHECK(r->mailbox, ENOMEM);
1597 t->structs[func->struct_id] = r->mailbox;
1605 extern_func_build_free(struct rte_swx_pipeline *p)
1609 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1610 struct thread *t = &p->threads[i];
1613 if (!t->extern_funcs)
1616 for (j = 0; j < p->n_extern_funcs; j++) {
1617 struct extern_func_runtime *r = &t->extern_funcs[j];
1622 free(t->extern_funcs);
1623 t->extern_funcs = NULL;
1628 extern_func_free(struct rte_swx_pipeline *p)
1630 extern_func_build_free(p);
1633 struct extern_func *elem;
1635 elem = TAILQ_FIRST(&p->extern_funcs);
1639 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1647 static struct header *
1648 header_find(struct rte_swx_pipeline *p, const char *name)
1650 struct header *elem;
1652 TAILQ_FOREACH(elem, &p->headers, node)
1653 if (strcmp(elem->name, name) == 0)
1659 static struct header *
1660 header_parse(struct rte_swx_pipeline *p,
1663 if (name[0] != 'h' || name[1] != '.')
1666 return header_find(p, &name[2]);
1669 static struct field *
1670 header_field_parse(struct rte_swx_pipeline *p,
1672 struct header **header)
1676 char *header_name, *field_name;
1678 if ((name[0] != 'h') || (name[1] != '.'))
1681 header_name = strdup(&name[2]);
1685 field_name = strchr(header_name, '.');
1694 h = header_find(p, header_name);
1700 f = struct_type_field_find(h->st, field_name);
1714 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1716 const char *struct_type_name)
1718 struct struct_type *st;
1720 size_t n_headers_max;
1723 CHECK_NAME(name, EINVAL);
1724 CHECK_NAME(struct_type_name, EINVAL);
1726 CHECK(!header_find(p, name), EEXIST);
1728 st = struct_type_find(p, struct_type_name);
1731 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1732 CHECK(p->n_headers < n_headers_max, ENOSPC);
1734 /* Node allocation. */
1735 h = calloc(1, sizeof(struct header));
1738 /* Node initialization. */
1739 strcpy(h->name, name);
1741 h->struct_id = p->n_structs;
1742 h->id = p->n_headers;
1744 /* Node add to tailq. */
1745 TAILQ_INSERT_TAIL(&p->headers, h, node);
1753 header_build(struct rte_swx_pipeline *p)
1756 uint32_t n_bytes = 0, i;
1758 TAILQ_FOREACH(h, &p->headers, node) {
1759 n_bytes += h->st->n_bits / 8;
1762 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1763 struct thread *t = &p->threads[i];
1764 uint32_t offset = 0;
1766 t->headers = calloc(p->n_headers,
1767 sizeof(struct header_runtime));
1768 CHECK(t->headers, ENOMEM);
1770 t->headers_out = calloc(p->n_headers,
1771 sizeof(struct header_out_runtime));
1772 CHECK(t->headers_out, ENOMEM);
1774 t->header_storage = calloc(1, n_bytes);
1775 CHECK(t->header_storage, ENOMEM);
1777 t->header_out_storage = calloc(1, n_bytes);
1778 CHECK(t->header_out_storage, ENOMEM);
1780 TAILQ_FOREACH(h, &p->headers, node) {
1781 uint8_t *header_storage;
1783 header_storage = &t->header_storage[offset];
1784 offset += h->st->n_bits / 8;
1786 t->headers[h->id].ptr0 = header_storage;
1787 t->structs[h->struct_id] = header_storage;
1795 header_build_free(struct rte_swx_pipeline *p)
1799 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1800 struct thread *t = &p->threads[i];
1802 free(t->headers_out);
1803 t->headers_out = NULL;
1808 free(t->header_out_storage);
1809 t->header_out_storage = NULL;
1811 free(t->header_storage);
1812 t->header_storage = NULL;
1817 header_free(struct rte_swx_pipeline *p)
1819 header_build_free(p);
1822 struct header *elem;
1824 elem = TAILQ_FIRST(&p->headers);
1828 TAILQ_REMOVE(&p->headers, elem, node);
1836 static struct field *
1837 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1839 if (!p->metadata_st)
1842 if (name[0] != 'm' || name[1] != '.')
1845 return struct_type_field_find(p->metadata_st, &name[2]);
1849 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1850 const char *struct_type_name)
1852 struct struct_type *st = NULL;
1856 CHECK_NAME(struct_type_name, EINVAL);
1857 st = struct_type_find(p, struct_type_name);
1859 CHECK(!p->metadata_st, EINVAL);
1861 p->metadata_st = st;
1862 p->metadata_struct_id = p->n_structs;
1870 metadata_build(struct rte_swx_pipeline *p)
1872 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1875 /* Thread-level initialization. */
1876 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1877 struct thread *t = &p->threads[i];
1880 metadata = calloc(1, n_bytes);
1881 CHECK(metadata, ENOMEM);
1883 t->metadata = metadata;
1884 t->structs[p->metadata_struct_id] = metadata;
1891 metadata_build_free(struct rte_swx_pipeline *p)
1895 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1896 struct thread *t = &p->threads[i];
1904 metadata_free(struct rte_swx_pipeline *p)
1906 metadata_build_free(p);
1912 static struct field *
1913 action_field_parse(struct action *action, const char *name);
1915 static struct field *
1916 struct_field_parse(struct rte_swx_pipeline *p,
1917 struct action *action,
1919 uint32_t *struct_id)
1926 struct header *header;
1928 f = header_field_parse(p, name, &header);
1932 *struct_id = header->struct_id;
1938 f = metadata_field_parse(p, name);
1942 *struct_id = p->metadata_struct_id;
1951 f = action_field_parse(action, name);
1961 struct extern_obj *obj;
1963 f = extern_obj_mailbox_field_parse(p, name, &obj);
1967 *struct_id = obj->struct_id;
1973 struct extern_func *func;
1975 f = extern_func_mailbox_field_parse(p, name, &func);
1979 *struct_id = func->struct_id;
1989 pipeline_port_inc(struct rte_swx_pipeline *p)
1991 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1995 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1997 t->ip = p->instructions;
2001 thread_ip_inc(struct rte_swx_pipeline *p);
2004 thread_ip_inc(struct rte_swx_pipeline *p)
2006 struct thread *t = &p->threads[p->thread_id];
2012 thread_ip_inc_cond(struct thread *t, int cond)
2018 thread_yield(struct rte_swx_pipeline *p)
2020 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2027 instr_rx_translate(struct rte_swx_pipeline *p,
2028 struct action *action,
2031 struct instruction *instr,
2032 struct instruction_data *data __rte_unused)
2036 CHECK(!action, EINVAL);
2037 CHECK(n_tokens == 2, EINVAL);
2039 f = metadata_field_parse(p, tokens[1]);
2042 instr->type = INSTR_RX;
2043 instr->io.io.offset = f->offset / 8;
2044 instr->io.io.n_bits = f->n_bits;
2049 instr_rx_exec(struct rte_swx_pipeline *p);
2052 instr_rx_exec(struct rte_swx_pipeline *p)
2054 struct thread *t = &p->threads[p->thread_id];
2055 struct instruction *ip = t->ip;
2056 struct port_in_runtime *port = &p->in[p->port_id];
2057 struct rte_swx_pkt *pkt = &t->pkt;
2061 pkt_received = port->pkt_rx(port->obj, pkt);
2062 t->ptr = &pkt->pkt[pkt->offset];
2063 rte_prefetch0(t->ptr);
2065 TRACE("[Thread %2u] rx %s from port %u\n",
2067 pkt_received ? "1 pkt" : "0 pkts",
2071 t->valid_headers = 0;
2072 t->n_headers_out = 0;
2075 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2078 t->table_state = p->table_state;
2081 pipeline_port_inc(p);
2082 thread_ip_inc_cond(t, pkt_received);
2090 instr_tx_translate(struct rte_swx_pipeline *p,
2091 struct action *action __rte_unused,
2094 struct instruction *instr,
2095 struct instruction_data *data __rte_unused)
2099 CHECK(n_tokens == 2, EINVAL);
2101 f = metadata_field_parse(p, tokens[1]);
2104 instr->type = INSTR_TX;
2105 instr->io.io.offset = f->offset / 8;
2106 instr->io.io.n_bits = f->n_bits;
2111 emit_handler(struct thread *t)
2113 struct header_out_runtime *h0 = &t->headers_out[0];
2114 struct header_out_runtime *h1 = &t->headers_out[1];
2115 uint32_t offset = 0, i;
2117 /* No header change or header decapsulation. */
2118 if ((t->n_headers_out == 1) &&
2119 (h0->ptr + h0->n_bytes == t->ptr)) {
2120 TRACE("Emit handler: no header change or header decap.\n");
2122 t->pkt.offset -= h0->n_bytes;
2123 t->pkt.length += h0->n_bytes;
2128 /* Header encapsulation (optionally, with prior header decasulation). */
2129 if ((t->n_headers_out == 2) &&
2130 (h1->ptr + h1->n_bytes == t->ptr) &&
2131 (h0->ptr == h0->ptr0)) {
2134 TRACE("Emit handler: header encapsulation.\n");
2136 offset = h0->n_bytes + h1->n_bytes;
2137 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2138 t->pkt.offset -= offset;
2139 t->pkt.length += offset;
2144 /* Header insertion. */
2147 /* Header extraction. */
2150 /* For any other case. */
2151 TRACE("Emit handler: complex case.\n");
2153 for (i = 0; i < t->n_headers_out; i++) {
2154 struct header_out_runtime *h = &t->headers_out[i];
2156 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2157 offset += h->n_bytes;
2161 memcpy(t->ptr - offset, t->header_out_storage, offset);
2162 t->pkt.offset -= offset;
2163 t->pkt.length += offset;
2168 instr_tx_exec(struct rte_swx_pipeline *p);
2171 instr_tx_exec(struct rte_swx_pipeline *p)
2173 struct thread *t = &p->threads[p->thread_id];
2174 struct instruction *ip = t->ip;
2175 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2176 struct port_out_runtime *port = &p->out[port_id];
2177 struct rte_swx_pkt *pkt = &t->pkt;
2179 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2187 port->pkt_tx(port->obj, pkt);
2190 thread_ip_reset(p, t);
2198 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2199 struct action *action,
2202 struct instruction *instr,
2203 struct instruction_data *data __rte_unused)
2207 CHECK(!action, EINVAL);
2208 CHECK(n_tokens == 2, EINVAL);
2210 h = header_parse(p, tokens[1]);
2213 instr->type = INSTR_HDR_EXTRACT;
2214 instr->io.hdr.header_id[0] = h->id;
2215 instr->io.hdr.struct_id[0] = h->struct_id;
2216 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2221 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2224 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2226 struct thread *t = &p->threads[p->thread_id];
2227 struct instruction *ip = t->ip;
2228 uint64_t valid_headers = t->valid_headers;
2229 uint8_t *ptr = t->ptr;
2230 uint32_t offset = t->pkt.offset;
2231 uint32_t length = t->pkt.length;
2234 for (i = 0; i < n_extract; i++) {
2235 uint32_t header_id = ip->io.hdr.header_id[i];
2236 uint32_t struct_id = ip->io.hdr.struct_id[i];
2237 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2239 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2245 t->structs[struct_id] = ptr;
2246 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2255 t->valid_headers = valid_headers;
2258 t->pkt.offset = offset;
2259 t->pkt.length = length;
2264 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2266 __instr_hdr_extract_exec(p, 1);
2273 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2275 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2278 __instr_hdr_extract_exec(p, 2);
2285 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2287 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2290 __instr_hdr_extract_exec(p, 3);
2297 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2299 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2302 __instr_hdr_extract_exec(p, 4);
2309 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2311 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2314 __instr_hdr_extract_exec(p, 5);
2321 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2323 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2326 __instr_hdr_extract_exec(p, 6);
2333 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2335 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2338 __instr_hdr_extract_exec(p, 7);
2345 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2347 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2350 __instr_hdr_extract_exec(p, 8);
2360 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2361 struct action *action __rte_unused,
2364 struct instruction *instr,
2365 struct instruction_data *data __rte_unused)
2369 CHECK(n_tokens == 2, EINVAL);
2371 h = header_parse(p, tokens[1]);
2374 instr->type = INSTR_HDR_EMIT;
2375 instr->io.hdr.header_id[0] = h->id;
2376 instr->io.hdr.struct_id[0] = h->struct_id;
2377 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2382 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2385 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2387 struct thread *t = &p->threads[p->thread_id];
2388 struct instruction *ip = t->ip;
2389 uint32_t n_headers_out = t->n_headers_out;
2390 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2391 uint8_t *ho_ptr = NULL;
2392 uint32_t ho_nbytes = 0, i;
2394 for (i = 0; i < n_emit; i++) {
2395 uint32_t header_id = ip->io.hdr.header_id[i];
2396 uint32_t struct_id = ip->io.hdr.struct_id[i];
2397 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2399 struct header_runtime *hi = &t->headers[header_id];
2400 uint8_t *hi_ptr = t->structs[struct_id];
2402 TRACE("[Thread %2u]: emit header %u\n",
2408 if (!t->n_headers_out) {
2409 ho = &t->headers_out[0];
2411 ho->ptr0 = hi->ptr0;
2415 ho_nbytes = n_bytes;
2422 ho_nbytes = ho->n_bytes;
2426 if (ho_ptr + ho_nbytes == hi_ptr) {
2427 ho_nbytes += n_bytes;
2429 ho->n_bytes = ho_nbytes;
2432 ho->ptr0 = hi->ptr0;
2436 ho_nbytes = n_bytes;
2442 ho->n_bytes = ho_nbytes;
2443 t->n_headers_out = n_headers_out;
2447 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2449 __instr_hdr_emit_exec(p, 1);
2456 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2458 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2461 __instr_hdr_emit_exec(p, 1);
2466 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2468 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2471 __instr_hdr_emit_exec(p, 2);
2476 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2478 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2481 __instr_hdr_emit_exec(p, 3);
2486 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2488 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2491 __instr_hdr_emit_exec(p, 4);
2496 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2498 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2501 __instr_hdr_emit_exec(p, 5);
2506 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2508 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2511 __instr_hdr_emit_exec(p, 6);
2516 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2518 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2521 __instr_hdr_emit_exec(p, 7);
2526 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2528 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2531 __instr_hdr_emit_exec(p, 8);
2539 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2540 struct action *action __rte_unused,
2543 struct instruction *instr,
2544 struct instruction_data *data __rte_unused)
2548 CHECK(n_tokens == 2, EINVAL);
2550 h = header_parse(p, tokens[1]);
2553 instr->type = INSTR_HDR_VALIDATE;
2554 instr->valid.header_id = h->id;
2559 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2561 struct thread *t = &p->threads[p->thread_id];
2562 struct instruction *ip = t->ip;
2563 uint32_t header_id = ip->valid.header_id;
2565 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2568 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2578 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2579 struct action *action __rte_unused,
2582 struct instruction *instr,
2583 struct instruction_data *data __rte_unused)
2587 CHECK(n_tokens == 2, EINVAL);
2589 h = header_parse(p, tokens[1]);
2592 instr->type = INSTR_HDR_INVALIDATE;
2593 instr->valid.header_id = h->id;
2598 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2600 struct thread *t = &p->threads[p->thread_id];
2601 struct instruction *ip = t->ip;
2602 uint32_t header_id = ip->valid.header_id;
2604 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2607 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2617 instr_mov_translate(struct rte_swx_pipeline *p,
2618 struct action *action,
2621 struct instruction *instr,
2622 struct instruction_data *data __rte_unused)
2624 char *dst = tokens[1], *src = tokens[2];
2625 struct field *fdst, *fsrc;
2626 uint32_t dst_struct_id, src_struct_id, src_val;
2628 CHECK(n_tokens == 3, EINVAL);
2630 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2631 CHECK(fdst, EINVAL);
2634 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2636 instr->type = INSTR_MOV;
2637 if ((dst[0] == 'h' && src[0] != 'h') ||
2638 (dst[0] != 'h' && src[0] == 'h'))
2639 instr->type = INSTR_MOV_S;
2641 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2642 instr->mov.dst.n_bits = fdst->n_bits;
2643 instr->mov.dst.offset = fdst->offset / 8;
2644 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2645 instr->mov.src.n_bits = fsrc->n_bits;
2646 instr->mov.src.offset = fsrc->offset / 8;
2651 src_val = strtoul(src, &src, 0);
2652 CHECK(!src[0], EINVAL);
2655 src_val = htonl(src_val);
2657 instr->type = INSTR_MOV_I;
2658 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2659 instr->mov.dst.n_bits = fdst->n_bits;
2660 instr->mov.dst.offset = fdst->offset / 8;
2661 instr->mov.src_val = (uint32_t)src_val;
2666 instr_mov_exec(struct rte_swx_pipeline *p)
2668 struct thread *t = &p->threads[p->thread_id];
2669 struct instruction *ip = t->ip;
2671 TRACE("[Thread %2u] mov\n",
2681 instr_mov_s_exec(struct rte_swx_pipeline *p)
2683 struct thread *t = &p->threads[p->thread_id];
2684 struct instruction *ip = t->ip;
2686 TRACE("[Thread %2u] mov (s)\n",
2696 instr_mov_i_exec(struct rte_swx_pipeline *p)
2698 struct thread *t = &p->threads[p->thread_id];
2699 struct instruction *ip = t->ip;
2701 TRACE("[Thread %2u] mov m.f %x\n",
2715 instr_dma_translate(struct rte_swx_pipeline *p,
2716 struct action *action,
2719 struct instruction *instr,
2720 struct instruction_data *data __rte_unused)
2722 char *dst = tokens[1];
2723 char *src = tokens[2];
2727 CHECK(action, EINVAL);
2728 CHECK(n_tokens == 3, EINVAL);
2730 h = header_parse(p, dst);
2733 tf = action_field_parse(action, src);
2736 instr->type = INSTR_DMA_HT;
2737 instr->dma.dst.header_id[0] = h->id;
2738 instr->dma.dst.struct_id[0] = h->struct_id;
2739 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2740 instr->dma.src.offset[0] = tf->offset / 8;
2746 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2749 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2751 struct thread *t = &p->threads[p->thread_id];
2752 struct instruction *ip = t->ip;
2753 uint8_t *action_data = t->structs[0];
2754 uint64_t valid_headers = t->valid_headers;
2757 for (i = 0; i < n_dma; i++) {
2758 uint32_t header_id = ip->dma.dst.header_id[i];
2759 uint32_t struct_id = ip->dma.dst.struct_id[i];
2760 uint32_t offset = ip->dma.src.offset[i];
2761 uint32_t n_bytes = ip->dma.n_bytes[i];
2763 struct header_runtime *h = &t->headers[header_id];
2764 uint8_t *h_ptr0 = h->ptr0;
2765 uint8_t *h_ptr = t->structs[struct_id];
2767 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2769 void *src = &action_data[offset];
2771 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2774 memcpy(dst, src, n_bytes);
2775 t->structs[struct_id] = dst;
2776 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2779 t->valid_headers = valid_headers;
2783 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2785 __instr_dma_ht_exec(p, 1);
2792 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2794 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2797 __instr_dma_ht_exec(p, 2);
2804 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2806 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2809 __instr_dma_ht_exec(p, 3);
2816 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2818 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2821 __instr_dma_ht_exec(p, 4);
2828 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2830 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2833 __instr_dma_ht_exec(p, 5);
2840 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2842 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2845 __instr_dma_ht_exec(p, 6);
2852 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2854 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2857 __instr_dma_ht_exec(p, 7);
2864 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2866 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2869 __instr_dma_ht_exec(p, 8);
2879 instr_alu_add_translate(struct rte_swx_pipeline *p,
2880 struct action *action,
2883 struct instruction *instr,
2884 struct instruction_data *data __rte_unused)
2886 char *dst = tokens[1], *src = tokens[2];
2887 struct field *fdst, *fsrc;
2888 uint32_t dst_struct_id, src_struct_id, src_val;
2890 CHECK(n_tokens == 3, EINVAL);
2892 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2893 CHECK(fdst, EINVAL);
2895 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2896 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2898 instr->type = INSTR_ALU_ADD;
2899 if (dst[0] == 'h' && src[0] == 'm')
2900 instr->type = INSTR_ALU_ADD_HM;
2901 if (dst[0] == 'm' && src[0] == 'h')
2902 instr->type = INSTR_ALU_ADD_MH;
2903 if (dst[0] == 'h' && src[0] == 'h')
2904 instr->type = INSTR_ALU_ADD_HH;
2906 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2907 instr->alu.dst.n_bits = fdst->n_bits;
2908 instr->alu.dst.offset = fdst->offset / 8;
2909 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2910 instr->alu.src.n_bits = fsrc->n_bits;
2911 instr->alu.src.offset = fsrc->offset / 8;
2915 /* ADD_MI, ADD_HI. */
2916 src_val = strtoul(src, &src, 0);
2917 CHECK(!src[0], EINVAL);
2919 instr->type = INSTR_ALU_ADD_MI;
2921 instr->type = INSTR_ALU_ADD_HI;
2923 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2924 instr->alu.dst.n_bits = fdst->n_bits;
2925 instr->alu.dst.offset = fdst->offset / 8;
2926 instr->alu.src_val = (uint32_t)src_val;
2931 instr_alu_sub_translate(struct rte_swx_pipeline *p,
2932 struct action *action,
2935 struct instruction *instr,
2936 struct instruction_data *data __rte_unused)
2938 char *dst = tokens[1], *src = tokens[2];
2939 struct field *fdst, *fsrc;
2940 uint32_t dst_struct_id, src_struct_id, src_val;
2942 CHECK(n_tokens == 3, EINVAL);
2944 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2945 CHECK(fdst, EINVAL);
2947 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
2948 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2950 instr->type = INSTR_ALU_SUB;
2951 if (dst[0] == 'h' && src[0] == 'm')
2952 instr->type = INSTR_ALU_SUB_HM;
2953 if (dst[0] == 'm' && src[0] == 'h')
2954 instr->type = INSTR_ALU_SUB_MH;
2955 if (dst[0] == 'h' && src[0] == 'h')
2956 instr->type = INSTR_ALU_SUB_HH;
2958 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2959 instr->alu.dst.n_bits = fdst->n_bits;
2960 instr->alu.dst.offset = fdst->offset / 8;
2961 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2962 instr->alu.src.n_bits = fsrc->n_bits;
2963 instr->alu.src.offset = fsrc->offset / 8;
2967 /* SUB_MI, SUB_HI. */
2968 src_val = strtoul(src, &src, 0);
2969 CHECK(!src[0], EINVAL);
2971 instr->type = INSTR_ALU_SUB_MI;
2973 instr->type = INSTR_ALU_SUB_HI;
2975 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2976 instr->alu.dst.n_bits = fdst->n_bits;
2977 instr->alu.dst.offset = fdst->offset / 8;
2978 instr->alu.src_val = (uint32_t)src_val;
2983 instr_alu_add_exec(struct rte_swx_pipeline *p)
2985 struct thread *t = &p->threads[p->thread_id];
2986 struct instruction *ip = t->ip;
2988 TRACE("[Thread %2u] add\n", p->thread_id);
2998 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3000 struct thread *t = &p->threads[p->thread_id];
3001 struct instruction *ip = t->ip;
3003 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3013 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3015 struct thread *t = &p->threads[p->thread_id];
3016 struct instruction *ip = t->ip;
3018 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3028 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3030 struct thread *t = &p->threads[p->thread_id];
3031 struct instruction *ip = t->ip;
3033 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3043 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3045 struct thread *t = &p->threads[p->thread_id];
3046 struct instruction *ip = t->ip;
3048 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3058 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3060 struct thread *t = &p->threads[p->thread_id];
3061 struct instruction *ip = t->ip;
3063 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3073 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3075 struct thread *t = &p->threads[p->thread_id];
3076 struct instruction *ip = t->ip;
3078 TRACE("[Thread %2u] sub\n", p->thread_id);
3088 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3090 struct thread *t = &p->threads[p->thread_id];
3091 struct instruction *ip = t->ip;
3093 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3103 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3105 struct thread *t = &p->threads[p->thread_id];
3106 struct instruction *ip = t->ip;
3108 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3118 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3120 struct thread *t = &p->threads[p->thread_id];
3121 struct instruction *ip = t->ip;
3123 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3133 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3135 struct thread *t = &p->threads[p->thread_id];
3136 struct instruction *ip = t->ip;
3138 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3148 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3150 struct thread *t = &p->threads[p->thread_id];
3151 struct instruction *ip = t->ip;
3153 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3162 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
3165 instr_translate(struct rte_swx_pipeline *p,
3166 struct action *action,
3168 struct instruction *instr,
3169 struct instruction_data *data)
3171 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
3172 int n_tokens = 0, tpos = 0;
3174 /* Parse the instruction string into tokens. */
3178 token = strtok_r(string, " \t\v", &string);
3182 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
3184 tokens[n_tokens] = token;
3188 CHECK(n_tokens, EINVAL);
3190 /* Handle the optional instruction label. */
3191 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
3192 strcpy(data->label, tokens[0]);
3195 CHECK(n_tokens - tpos, EINVAL);
3198 /* Identify the instruction type. */
3199 if (!strcmp(tokens[tpos], "rx"))
3200 return instr_rx_translate(p,
3207 if (!strcmp(tokens[tpos], "tx"))
3208 return instr_tx_translate(p,
3215 if (!strcmp(tokens[tpos], "extract"))
3216 return instr_hdr_extract_translate(p,
3223 if (!strcmp(tokens[tpos], "emit"))
3224 return instr_hdr_emit_translate(p,
3231 if (!strcmp(tokens[tpos], "validate"))
3232 return instr_hdr_validate_translate(p,
3239 if (!strcmp(tokens[tpos], "invalidate"))
3240 return instr_hdr_invalidate_translate(p,
3247 if (!strcmp(tokens[tpos], "mov"))
3248 return instr_mov_translate(p,
3255 if (!strcmp(tokens[tpos], "dma"))
3256 return instr_dma_translate(p,
3263 if (!strcmp(tokens[tpos], "add"))
3264 return instr_alu_add_translate(p,
3271 if (!strcmp(tokens[tpos], "sub"))
3272 return instr_alu_sub_translate(p,
3283 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
3285 uint32_t count = 0, i;
3290 for (i = 0; i < n; i++)
3291 if (!strcmp(label, data[i].jmp_label))
3298 instr_label_check(struct instruction_data *instruction_data,
3299 uint32_t n_instructions)
3303 /* Check that all instruction labels are unique. */
3304 for (i = 0; i < n_instructions; i++) {
3305 struct instruction_data *data = &instruction_data[i];
3306 char *label = data->label;
3312 for (j = i + 1; j < n_instructions; j++)
3313 CHECK(strcmp(label, data[j].label), EINVAL);
3316 /* Get users for each instruction label. */
3317 for (i = 0; i < n_instructions; i++) {
3318 struct instruction_data *data = &instruction_data[i];
3319 char *label = data->label;
3321 data->n_users = label_is_used(instruction_data,
3330 instruction_config(struct rte_swx_pipeline *p,
3332 const char **instructions,
3333 uint32_t n_instructions)
3335 struct instruction *instr = NULL;
3336 struct instruction_data *data = NULL;
3337 char *string = NULL;
3341 CHECK(n_instructions, EINVAL);
3342 CHECK(instructions, EINVAL);
3343 for (i = 0; i < n_instructions; i++)
3344 CHECK(instructions[i], EINVAL);
3346 /* Memory allocation. */
3347 instr = calloc(n_instructions, sizeof(struct instruction));
3353 data = calloc(n_instructions, sizeof(struct instruction_data));
3359 for (i = 0; i < n_instructions; i++) {
3360 string = strdup(instructions[i]);
3366 err = instr_translate(p, a, string, &instr[i], &data[i]);
3373 err = instr_label_check(data, n_instructions);
3380 a->instructions = instr;
3381 a->n_instructions = n_instructions;
3383 p->instructions = instr;
3384 p->n_instructions = n_instructions;
3396 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
3398 static instr_exec_t instruction_table[] = {
3399 [INSTR_RX] = instr_rx_exec,
3400 [INSTR_TX] = instr_tx_exec,
3402 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
3403 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
3404 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
3405 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
3406 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
3407 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
3408 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
3409 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
3411 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
3412 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
3413 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
3414 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
3415 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
3416 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
3417 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
3418 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
3419 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
3421 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
3422 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
3424 [INSTR_MOV] = instr_mov_exec,
3425 [INSTR_MOV_S] = instr_mov_s_exec,
3426 [INSTR_MOV_I] = instr_mov_i_exec,
3428 [INSTR_DMA_HT] = instr_dma_ht_exec,
3429 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
3430 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
3431 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
3432 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
3433 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
3434 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
3435 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
3437 [INSTR_ALU_ADD] = instr_alu_add_exec,
3438 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
3439 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
3440 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
3441 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
3442 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
3444 [INSTR_ALU_SUB] = instr_alu_sub_exec,
3445 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
3446 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
3447 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
3448 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
3449 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
3453 instr_exec(struct rte_swx_pipeline *p)
3455 struct thread *t = &p->threads[p->thread_id];
3456 struct instruction *ip = t->ip;
3457 instr_exec_t instr = instruction_table[ip->type];
3465 static struct action *
3466 action_find(struct rte_swx_pipeline *p, const char *name)
3468 struct action *elem;
3473 TAILQ_FOREACH(elem, &p->actions, node)
3474 if (strcmp(elem->name, name) == 0)
3480 static struct field *
3481 action_field_find(struct action *a, const char *name)
3483 return a->st ? struct_type_field_find(a->st, name) : NULL;
3486 static struct field *
3487 action_field_parse(struct action *action, const char *name)
3489 if (name[0] != 't' || name[1] != '.')
3492 return action_field_find(action, &name[2]);
3496 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
3498 const char *args_struct_type_name,
3499 const char **instructions,
3500 uint32_t n_instructions)
3502 struct struct_type *args_struct_type;
3508 CHECK_NAME(name, EINVAL);
3509 CHECK(!action_find(p, name), EEXIST);
3511 if (args_struct_type_name) {
3512 CHECK_NAME(args_struct_type_name, EINVAL);
3513 args_struct_type = struct_type_find(p, args_struct_type_name);
3514 CHECK(args_struct_type, EINVAL);
3516 args_struct_type = NULL;
3519 /* Node allocation. */
3520 a = calloc(1, sizeof(struct action));
3523 /* Node initialization. */
3524 strcpy(a->name, name);
3525 a->st = args_struct_type;
3526 a->id = p->n_actions;
3528 /* Instruction translation. */
3529 err = instruction_config(p, a, instructions, n_instructions);
3535 /* Node add to tailq. */
3536 TAILQ_INSERT_TAIL(&p->actions, a, node);
3543 action_build(struct rte_swx_pipeline *p)
3545 struct action *action;
3547 p->action_instructions = calloc(p->n_actions,
3548 sizeof(struct instruction *));
3549 CHECK(p->action_instructions, ENOMEM);
3551 TAILQ_FOREACH(action, &p->actions, node)
3552 p->action_instructions[action->id] = action->instructions;
3558 action_build_free(struct rte_swx_pipeline *p)
3560 free(p->action_instructions);
3561 p->action_instructions = NULL;
3565 action_free(struct rte_swx_pipeline *p)
3567 action_build_free(p);
3570 struct action *action;
3572 action = TAILQ_FIRST(&p->actions);
3576 TAILQ_REMOVE(&p->actions, action, node);
3577 free(action->instructions);
3585 static struct table_type *
3586 table_type_find(struct rte_swx_pipeline *p, const char *name)
3588 struct table_type *elem;
3590 TAILQ_FOREACH(elem, &p->table_types, node)
3591 if (strcmp(elem->name, name) == 0)
3597 static struct table_type *
3598 table_type_resolve(struct rte_swx_pipeline *p,
3599 const char *recommended_type_name,
3600 enum rte_swx_table_match_type match_type)
3602 struct table_type *elem;
3604 /* Only consider the recommended type if the match type is correct. */
3605 if (recommended_type_name)
3606 TAILQ_FOREACH(elem, &p->table_types, node)
3607 if (!strcmp(elem->name, recommended_type_name) &&
3608 (elem->match_type == match_type))
3611 /* Ignore the recommended type and get the first element with this match
3614 TAILQ_FOREACH(elem, &p->table_types, node)
3615 if (elem->match_type == match_type)
3621 static struct table *
3622 table_find(struct rte_swx_pipeline *p, const char *name)
3626 TAILQ_FOREACH(elem, &p->tables, node)
3627 if (strcmp(elem->name, name) == 0)
3633 static struct table *
3634 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
3636 struct table *table = NULL;
3638 TAILQ_FOREACH(table, &p->tables, node)
3639 if (table->id == id)
3646 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
3648 enum rte_swx_table_match_type match_type,
3649 struct rte_swx_table_ops *ops)
3651 struct table_type *elem;
3655 CHECK_NAME(name, EINVAL);
3656 CHECK(!table_type_find(p, name), EEXIST);
3659 CHECK(ops->create, EINVAL);
3660 CHECK(ops->lkp, EINVAL);
3661 CHECK(ops->free, EINVAL);
3663 /* Node allocation. */
3664 elem = calloc(1, sizeof(struct table_type));
3665 CHECK(elem, ENOMEM);
3667 /* Node initialization. */
3668 strcpy(elem->name, name);
3669 elem->match_type = match_type;
3670 memcpy(&elem->ops, ops, sizeof(*ops));
3672 /* Node add to tailq. */
3673 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
3678 static enum rte_swx_table_match_type
3679 table_match_type_resolve(struct rte_swx_match_field_params *fields,
3684 for (i = 0; i < n_fields; i++)
3685 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
3689 return RTE_SWX_TABLE_MATCH_EXACT;
3691 if ((i == n_fields - 1) &&
3692 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
3693 return RTE_SWX_TABLE_MATCH_LPM;
3695 return RTE_SWX_TABLE_MATCH_WILDCARD;
3699 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
3701 struct rte_swx_pipeline_table_params *params,
3702 const char *recommended_table_type_name,
3706 struct table_type *type;
3708 struct action *default_action;
3709 struct header *header = NULL;
3711 uint32_t offset_prev = 0, action_data_size_max = 0, i;
3715 CHECK_NAME(name, EINVAL);
3716 CHECK(!table_find(p, name), EEXIST);
3718 CHECK(params, EINVAL);
3721 CHECK(!params->n_fields || params->fields, EINVAL);
3722 for (i = 0; i < params->n_fields; i++) {
3723 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3725 struct field *hf, *mf;
3728 CHECK_NAME(field->name, EINVAL);
3730 hf = header_field_parse(p, field->name, &h);
3731 mf = metadata_field_parse(p, field->name);
3732 CHECK(hf || mf, EINVAL);
3734 offset = hf ? hf->offset : mf->offset;
3737 is_header = hf ? 1 : 0;
3738 header = hf ? h : NULL;
3739 offset_prev = offset;
3744 CHECK((is_header && hf && (h->id == header->id)) ||
3745 (!is_header && mf), EINVAL);
3747 CHECK(offset > offset_prev, EINVAL);
3748 offset_prev = offset;
3751 /* Action checks. */
3752 CHECK(params->n_actions, EINVAL);
3753 CHECK(params->action_names, EINVAL);
3754 for (i = 0; i < params->n_actions; i++) {
3755 const char *action_name = params->action_names[i];
3757 uint32_t action_data_size;
3759 CHECK(action_name, EINVAL);
3761 a = action_find(p, action_name);
3764 action_data_size = a->st ? a->st->n_bits / 8 : 0;
3765 if (action_data_size > action_data_size_max)
3766 action_data_size_max = action_data_size;
3769 CHECK(params->default_action_name, EINVAL);
3770 for (i = 0; i < p->n_actions; i++)
3771 if (!strcmp(params->action_names[i],
3772 params->default_action_name))
3774 CHECK(i < params->n_actions, EINVAL);
3775 default_action = action_find(p, params->default_action_name);
3776 CHECK((default_action->st && params->default_action_data) ||
3777 !params->default_action_data, EINVAL);
3779 /* Table type checks. */
3780 if (params->n_fields) {
3781 enum rte_swx_table_match_type match_type;
3783 match_type = table_match_type_resolve(params->fields,
3785 type = table_type_resolve(p,
3786 recommended_table_type_name,
3788 CHECK(type, EINVAL);
3793 /* Memory allocation. */
3794 t = calloc(1, sizeof(struct table));
3797 t->fields = calloc(params->n_fields, sizeof(struct match_field));
3803 t->actions = calloc(params->n_actions, sizeof(struct action *));
3810 if (action_data_size_max) {
3811 t->default_action_data = calloc(1, action_data_size_max);
3812 if (!t->default_action_data) {
3820 /* Node initialization. */
3821 strcpy(t->name, name);
3822 if (args && args[0])
3823 strcpy(t->args, args);
3826 for (i = 0; i < params->n_fields; i++) {
3827 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3828 struct match_field *f = &t->fields[i];
3830 f->match_type = field->match_type;
3831 f->field = is_header ?
3832 header_field_parse(p, field->name, NULL) :
3833 metadata_field_parse(p, field->name);
3835 t->n_fields = params->n_fields;
3836 t->is_header = is_header;
3839 for (i = 0; i < params->n_actions; i++)
3840 t->actions[i] = action_find(p, params->action_names[i]);
3841 t->default_action = default_action;
3842 if (default_action->st)
3843 memcpy(t->default_action_data,
3844 params->default_action_data,
3845 default_action->st->n_bits / 8);
3846 t->n_actions = params->n_actions;
3847 t->default_action_is_const = params->default_action_is_const;
3848 t->action_data_size_max = action_data_size_max;
3851 t->id = p->n_tables;
3853 /* Node add to tailq. */
3854 TAILQ_INSERT_TAIL(&p->tables, t, node);
3860 static struct rte_swx_table_params *
3861 table_params_get(struct table *table)
3863 struct rte_swx_table_params *params;
3864 struct field *first, *last;
3866 uint32_t key_size, key_offset, action_data_size, i;
3868 /* Memory allocation. */
3869 params = calloc(1, sizeof(struct rte_swx_table_params));
3873 /* Key offset and size. */
3874 first = table->fields[0].field;
3875 last = table->fields[table->n_fields - 1].field;
3876 key_offset = first->offset / 8;
3877 key_size = (last->offset + last->n_bits - first->offset) / 8;
3879 /* Memory allocation. */
3880 key_mask = calloc(1, key_size);
3887 for (i = 0; i < table->n_fields; i++) {
3888 struct field *f = table->fields[i].field;
3889 uint32_t start = (f->offset - first->offset) / 8;
3890 size_t size = f->n_bits / 8;
3892 memset(&key_mask[start], 0xFF, size);
3895 /* Action data size. */
3896 action_data_size = 0;
3897 for (i = 0; i < table->n_actions; i++) {
3898 struct action *action = table->actions[i];
3899 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
3901 if (ads > action_data_size)
3902 action_data_size = ads;
3906 params->match_type = table->type->match_type;
3907 params->key_size = key_size;
3908 params->key_offset = key_offset;
3909 params->key_mask0 = key_mask;
3910 params->action_data_size = action_data_size;
3911 params->n_keys_max = table->size;
3917 table_params_free(struct rte_swx_table_params *params)
3922 free(params->key_mask0);
3927 table_state_build(struct rte_swx_pipeline *p)
3929 struct table *table;
3931 p->table_state = calloc(p->n_tables,
3932 sizeof(struct rte_swx_table_state));
3933 CHECK(p->table_state, ENOMEM);
3935 TAILQ_FOREACH(table, &p->tables, node) {
3936 struct rte_swx_table_state *ts = &p->table_state[table->id];
3939 struct rte_swx_table_params *params;
3942 params = table_params_get(table);
3943 CHECK(params, ENOMEM);
3945 ts->obj = table->type->ops.create(params,
3950 table_params_free(params);
3951 CHECK(ts->obj, ENODEV);
3954 /* ts->default_action_data. */
3955 if (table->action_data_size_max) {
3956 ts->default_action_data =
3957 malloc(table->action_data_size_max);
3958 CHECK(ts->default_action_data, ENOMEM);
3960 memcpy(ts->default_action_data,
3961 table->default_action_data,
3962 table->action_data_size_max);
3965 /* ts->default_action_id. */
3966 ts->default_action_id = table->default_action->id;
3973 table_state_build_free(struct rte_swx_pipeline *p)
3977 if (!p->table_state)
3980 for (i = 0; i < p->n_tables; i++) {
3981 struct rte_swx_table_state *ts = &p->table_state[i];
3982 struct table *table = table_find_by_id(p, i);
3985 if (table->type && ts->obj)
3986 table->type->ops.free(ts->obj);
3988 /* ts->default_action_data. */
3989 free(ts->default_action_data);
3992 free(p->table_state);
3993 p->table_state = NULL;
3997 table_state_free(struct rte_swx_pipeline *p)
3999 table_state_build_free(p);
4003 table_stub_lkp(void *table __rte_unused,
4004 void *mailbox __rte_unused,
4005 uint8_t **key __rte_unused,
4006 uint64_t *action_id __rte_unused,
4007 uint8_t **action_data __rte_unused,
4011 return 1; /* DONE. */
4015 table_build(struct rte_swx_pipeline *p)
4019 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4020 struct thread *t = &p->threads[i];
4021 struct table *table;
4023 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
4024 CHECK(t->tables, ENOMEM);
4026 TAILQ_FOREACH(table, &p->tables, node) {
4027 struct table_runtime *r = &t->tables[table->id];
4032 size = table->type->ops.mailbox_size_get();
4035 r->func = table->type->ops.lkp;
4039 r->mailbox = calloc(1, size);
4040 CHECK(r->mailbox, ENOMEM);
4044 r->key = table->is_header ?
4045 &t->structs[table->header->struct_id] :
4046 &t->structs[p->metadata_struct_id];
4048 r->func = table_stub_lkp;
4057 table_build_free(struct rte_swx_pipeline *p)
4061 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4062 struct thread *t = &p->threads[i];
4068 for (j = 0; j < p->n_tables; j++) {
4069 struct table_runtime *r = &t->tables[j];
4080 table_free(struct rte_swx_pipeline *p)
4082 table_build_free(p);
4088 elem = TAILQ_FIRST(&p->tables);
4092 TAILQ_REMOVE(&p->tables, elem, node);
4094 free(elem->actions);
4095 free(elem->default_action_data);
4101 struct table_type *elem;
4103 elem = TAILQ_FIRST(&p->table_types);
4107 TAILQ_REMOVE(&p->table_types, elem, node);
4116 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
4118 struct rte_swx_pipeline *pipeline;
4120 /* Check input parameters. */
4123 /* Memory allocation. */
4124 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
4125 CHECK(pipeline, ENOMEM);
4127 /* Initialization. */
4128 TAILQ_INIT(&pipeline->struct_types);
4129 TAILQ_INIT(&pipeline->port_in_types);
4130 TAILQ_INIT(&pipeline->ports_in);
4131 TAILQ_INIT(&pipeline->port_out_types);
4132 TAILQ_INIT(&pipeline->ports_out);
4133 TAILQ_INIT(&pipeline->extern_types);
4134 TAILQ_INIT(&pipeline->extern_objs);
4135 TAILQ_INIT(&pipeline->extern_funcs);
4136 TAILQ_INIT(&pipeline->headers);
4137 TAILQ_INIT(&pipeline->actions);
4138 TAILQ_INIT(&pipeline->table_types);
4139 TAILQ_INIT(&pipeline->tables);
4141 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
4142 pipeline->numa_node = numa_node;
4149 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
4154 free(p->instructions);
4156 table_state_free(p);
4161 extern_func_free(p);
4171 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
4172 const char **instructions,
4173 uint32_t n_instructions)
4178 err = instruction_config(p, NULL, instructions, n_instructions);
4182 /* Thread instruction pointer reset. */
4183 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4184 struct thread *t = &p->threads[i];
4186 thread_ip_reset(p, t);
4193 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
4198 CHECK(p->build_done == 0, EEXIST);
4200 status = port_in_build(p);
4204 status = port_out_build(p);
4208 status = struct_build(p);
4212 status = extern_obj_build(p);
4216 status = extern_func_build(p);
4220 status = header_build(p);
4224 status = metadata_build(p);
4228 status = action_build(p);
4232 status = table_build(p);
4236 status = table_state_build(p);
4244 table_state_build_free(p);
4245 table_build_free(p);
4246 action_build_free(p);
4247 metadata_build_free(p);
4248 header_build_free(p);
4249 extern_func_build_free(p);
4250 extern_obj_build_free(p);
4251 port_out_build_free(p);
4252 port_in_build_free(p);
4253 struct_build_free(p);
4259 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
4263 for (i = 0; i < n_instructions; i++)
4271 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
4272 struct rte_swx_table_state **table_state)
4274 if (!p || !table_state || !p->build_done)
4277 *table_state = p->table_state;
4282 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
4283 struct rte_swx_table_state *table_state)
4285 if (!p || !table_state || !p->build_done)
4288 p->table_state = table_state;