1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
283 struct instr_operand {
298 uint8_t header_id[8];
299 uint8_t struct_id[8];
304 struct instr_hdr_validity {
308 struct instr_dst_src {
309 struct instr_operand dst;
311 struct instr_operand src;
318 uint8_t header_id[8];
319 uint8_t struct_id[8];
330 enum instruction_type type;
333 struct instr_hdr_validity valid;
334 struct instr_dst_src mov;
335 struct instr_dma dma;
336 struct instr_dst_src alu;
340 struct instruction_data {
341 char label[RTE_SWX_NAME_SIZE];
342 char jmp_label[RTE_SWX_NAME_SIZE];
343 uint32_t n_users; /* user = jmp instruction to this instruction. */
351 TAILQ_ENTRY(action) node;
352 char name[RTE_SWX_NAME_SIZE];
353 struct struct_type *st;
354 struct instruction *instructions;
355 uint32_t n_instructions;
359 TAILQ_HEAD(action_tailq, action);
365 TAILQ_ENTRY(table_type) node;
366 char name[RTE_SWX_NAME_SIZE];
367 enum rte_swx_table_match_type match_type;
368 struct rte_swx_table_ops ops;
371 TAILQ_HEAD(table_type_tailq, table_type);
374 enum rte_swx_table_match_type match_type;
379 TAILQ_ENTRY(table) node;
380 char name[RTE_SWX_NAME_SIZE];
381 char args[RTE_SWX_NAME_SIZE];
382 struct table_type *type; /* NULL when n_fields == 0. */
385 struct match_field *fields;
387 int is_header; /* Only valid when n_fields > 0. */
388 struct header *header; /* Only valid when n_fields > 0. */
391 struct action **actions;
392 struct action *default_action;
393 uint8_t *default_action_data;
395 int default_action_is_const;
396 uint32_t action_data_size_max;
402 TAILQ_HEAD(table_tailq, table);
404 struct table_runtime {
405 rte_swx_table_lookup_t func;
415 struct rte_swx_pkt pkt;
421 /* Packet headers. */
422 struct header_runtime *headers; /* Extracted or generated headers. */
423 struct header_out_runtime *headers_out; /* Emitted headers. */
424 uint8_t *header_storage;
425 uint8_t *header_out_storage;
426 uint64_t valid_headers;
427 uint32_t n_headers_out;
429 /* Packet meta-data. */
433 struct table_runtime *tables;
434 struct rte_swx_table_state *table_state;
436 int hit; /* 0 = Miss, 1 = Hit. */
438 /* Extern objects and functions. */
439 struct extern_obj_runtime *extern_objs;
440 struct extern_func_runtime *extern_funcs;
443 struct instruction *ip;
444 struct instruction *ret;
447 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
448 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
449 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
451 #define ALU(thread, ip, operator) \
453 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
454 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
455 uint64_t dst64 = *dst64_ptr; \
456 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
457 uint64_t dst = dst64 & dst64_mask; \
459 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
460 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
461 uint64_t src64 = *src64_ptr; \
462 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
463 uint64_t src = src64 & src64_mask; \
465 uint64_t result = dst operator src; \
467 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
470 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
472 #define ALU_S(thread, ip, operator) \
474 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
475 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
476 uint64_t dst64 = *dst64_ptr; \
477 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
478 uint64_t dst = dst64 & dst64_mask; \
480 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
481 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
482 uint64_t src64 = *src64_ptr; \
483 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
485 uint64_t result = dst operator src; \
487 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
492 #define ALU_HM(thread, ip, operator) \
494 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
495 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
496 uint64_t dst64 = *dst64_ptr; \
497 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
498 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
500 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
501 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
502 uint64_t src64 = *src64_ptr; \
503 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
504 uint64_t src = src64 & src64_mask; \
506 uint64_t result = dst operator src; \
507 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
509 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
512 #define ALU_HH(thread, ip, operator) \
514 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
515 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
516 uint64_t dst64 = *dst64_ptr; \
517 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
518 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
520 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
521 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
522 uint64_t src64 = *src64_ptr; \
523 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
525 uint64_t result = dst operator src; \
526 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
528 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
540 #define ALU_I(thread, ip, operator) \
542 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
543 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
544 uint64_t dst64 = *dst64_ptr; \
545 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
546 uint64_t dst = dst64 & dst64_mask; \
548 uint64_t src = (ip)->alu.src_val; \
550 uint64_t result = dst operator src; \
552 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
557 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
559 #define ALU_HI(thread, ip, operator) \
561 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
562 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
563 uint64_t dst64 = *dst64_ptr; \
564 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
565 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
567 uint64_t src = (ip)->alu.src_val; \
569 uint64_t result = dst operator src; \
570 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
572 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
581 #define MOV(thread, ip) \
583 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
584 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
585 uint64_t dst64 = *dst64_ptr; \
586 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
588 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
589 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
590 uint64_t src64 = *src64_ptr; \
591 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
592 uint64_t src = src64 & src64_mask; \
594 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
597 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
599 #define MOV_S(thread, ip) \
601 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
602 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
603 uint64_t dst64 = *dst64_ptr; \
604 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
606 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
607 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
608 uint64_t src64 = *src64_ptr; \
609 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
611 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
620 #define MOV_I(thread, ip) \
622 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
623 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
624 uint64_t dst64 = *dst64_ptr; \
625 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
627 uint64_t src = (ip)->mov.src_val; \
629 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
632 #define METADATA_READ(thread, offset, n_bits) \
634 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
635 uint64_t m64 = *m64_ptr; \
636 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
640 #define METADATA_WRITE(thread, offset, n_bits, value) \
642 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
643 uint64_t m64 = *m64_ptr; \
644 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
646 uint64_t m_new = value; \
648 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
651 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
652 #define RTE_SWX_PIPELINE_THREADS_MAX 16
655 struct rte_swx_pipeline {
656 struct struct_type_tailq struct_types;
657 struct port_in_type_tailq port_in_types;
658 struct port_in_tailq ports_in;
659 struct port_out_type_tailq port_out_types;
660 struct port_out_tailq ports_out;
661 struct extern_type_tailq extern_types;
662 struct extern_obj_tailq extern_objs;
663 struct extern_func_tailq extern_funcs;
664 struct header_tailq headers;
665 struct struct_type *metadata_st;
666 uint32_t metadata_struct_id;
667 struct action_tailq actions;
668 struct table_type_tailq table_types;
669 struct table_tailq tables;
671 struct port_in_runtime *in;
672 struct port_out_runtime *out;
673 struct instruction **action_instructions;
674 struct rte_swx_table_state *table_state;
675 struct instruction *instructions;
676 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
680 uint32_t n_ports_out;
681 uint32_t n_extern_objs;
682 uint32_t n_extern_funcs;
688 uint32_t n_instructions;
696 static struct struct_type *
697 struct_type_find(struct rte_swx_pipeline *p, const char *name)
699 struct struct_type *elem;
701 TAILQ_FOREACH(elem, &p->struct_types, node)
702 if (strcmp(elem->name, name) == 0)
708 static struct field *
709 struct_type_field_find(struct struct_type *st, const char *name)
713 for (i = 0; i < st->n_fields; i++) {
714 struct field *f = &st->fields[i];
716 if (strcmp(f->name, name) == 0)
724 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
726 struct rte_swx_field_params *fields,
729 struct struct_type *st;
733 CHECK_NAME(name, EINVAL);
734 CHECK(fields, EINVAL);
735 CHECK(n_fields, EINVAL);
737 for (i = 0; i < n_fields; i++) {
738 struct rte_swx_field_params *f = &fields[i];
741 CHECK_NAME(f->name, EINVAL);
742 CHECK(f->n_bits, EINVAL);
743 CHECK(f->n_bits <= 64, EINVAL);
744 CHECK((f->n_bits & 7) == 0, EINVAL);
746 for (j = 0; j < i; j++) {
747 struct rte_swx_field_params *f_prev = &fields[j];
749 CHECK(strcmp(f->name, f_prev->name), EINVAL);
753 CHECK(!struct_type_find(p, name), EEXIST);
755 /* Node allocation. */
756 st = calloc(1, sizeof(struct struct_type));
759 st->fields = calloc(n_fields, sizeof(struct field));
765 /* Node initialization. */
766 strcpy(st->name, name);
767 for (i = 0; i < n_fields; i++) {
768 struct field *dst = &st->fields[i];
769 struct rte_swx_field_params *src = &fields[i];
771 strcpy(dst->name, src->name);
772 dst->n_bits = src->n_bits;
773 dst->offset = st->n_bits;
775 st->n_bits += src->n_bits;
777 st->n_fields = n_fields;
779 /* Node add to tailq. */
780 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
786 struct_build(struct rte_swx_pipeline *p)
790 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
791 struct thread *t = &p->threads[i];
793 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
794 CHECK(t->structs, ENOMEM);
801 struct_build_free(struct rte_swx_pipeline *p)
805 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
806 struct thread *t = &p->threads[i];
814 struct_free(struct rte_swx_pipeline *p)
816 struct_build_free(p);
820 struct struct_type *elem;
822 elem = TAILQ_FIRST(&p->struct_types);
826 TAILQ_REMOVE(&p->struct_types, elem, node);
835 static struct port_in_type *
836 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
838 struct port_in_type *elem;
843 TAILQ_FOREACH(elem, &p->port_in_types, node)
844 if (strcmp(elem->name, name) == 0)
851 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
853 struct rte_swx_port_in_ops *ops)
855 struct port_in_type *elem;
858 CHECK_NAME(name, EINVAL);
860 CHECK(ops->create, EINVAL);
861 CHECK(ops->free, EINVAL);
862 CHECK(ops->pkt_rx, EINVAL);
863 CHECK(ops->stats_read, EINVAL);
865 CHECK(!port_in_type_find(p, name), EEXIST);
867 /* Node allocation. */
868 elem = calloc(1, sizeof(struct port_in_type));
871 /* Node initialization. */
872 strcpy(elem->name, name);
873 memcpy(&elem->ops, ops, sizeof(*ops));
875 /* Node add to tailq. */
876 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
881 static struct port_in *
882 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
884 struct port_in *port;
886 TAILQ_FOREACH(port, &p->ports_in, node)
887 if (port->id == port_id)
894 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
896 const char *port_type_name,
899 struct port_in_type *type = NULL;
900 struct port_in *port = NULL;
905 CHECK(!port_in_find(p, port_id), EINVAL);
907 CHECK_NAME(port_type_name, EINVAL);
908 type = port_in_type_find(p, port_type_name);
911 obj = type->ops.create(args);
914 /* Node allocation. */
915 port = calloc(1, sizeof(struct port_in));
918 /* Node initialization. */
923 /* Node add to tailq. */
924 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
925 if (p->n_ports_in < port_id + 1)
926 p->n_ports_in = port_id + 1;
932 port_in_build(struct rte_swx_pipeline *p)
934 struct port_in *port;
937 CHECK(p->n_ports_in, EINVAL);
938 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
940 for (i = 0; i < p->n_ports_in; i++)
941 CHECK(port_in_find(p, i), EINVAL);
943 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
944 CHECK(p->in, ENOMEM);
946 TAILQ_FOREACH(port, &p->ports_in, node) {
947 struct port_in_runtime *in = &p->in[port->id];
949 in->pkt_rx = port->type->ops.pkt_rx;
957 port_in_build_free(struct rte_swx_pipeline *p)
964 port_in_free(struct rte_swx_pipeline *p)
966 port_in_build_free(p);
970 struct port_in *port;
972 port = TAILQ_FIRST(&p->ports_in);
976 TAILQ_REMOVE(&p->ports_in, port, node);
977 port->type->ops.free(port->obj);
981 /* Input port types. */
983 struct port_in_type *elem;
985 elem = TAILQ_FIRST(&p->port_in_types);
989 TAILQ_REMOVE(&p->port_in_types, elem, node);
997 static struct port_out_type *
998 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1000 struct port_out_type *elem;
1005 TAILQ_FOREACH(elem, &p->port_out_types, node)
1006 if (!strcmp(elem->name, name))
1013 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1015 struct rte_swx_port_out_ops *ops)
1017 struct port_out_type *elem;
1020 CHECK_NAME(name, EINVAL);
1022 CHECK(ops->create, EINVAL);
1023 CHECK(ops->free, EINVAL);
1024 CHECK(ops->pkt_tx, EINVAL);
1025 CHECK(ops->stats_read, EINVAL);
1027 CHECK(!port_out_type_find(p, name), EEXIST);
1029 /* Node allocation. */
1030 elem = calloc(1, sizeof(struct port_out_type));
1031 CHECK(elem, ENOMEM);
1033 /* Node initialization. */
1034 strcpy(elem->name, name);
1035 memcpy(&elem->ops, ops, sizeof(*ops));
1037 /* Node add to tailq. */
1038 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1043 static struct port_out *
1044 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1046 struct port_out *port;
1048 TAILQ_FOREACH(port, &p->ports_out, node)
1049 if (port->id == port_id)
1056 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1058 const char *port_type_name,
1061 struct port_out_type *type = NULL;
1062 struct port_out *port = NULL;
1067 CHECK(!port_out_find(p, port_id), EINVAL);
1069 CHECK_NAME(port_type_name, EINVAL);
1070 type = port_out_type_find(p, port_type_name);
1071 CHECK(type, EINVAL);
1073 obj = type->ops.create(args);
1076 /* Node allocation. */
1077 port = calloc(1, sizeof(struct port_out));
1078 CHECK(port, ENOMEM);
1080 /* Node initialization. */
1085 /* Node add to tailq. */
1086 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1087 if (p->n_ports_out < port_id + 1)
1088 p->n_ports_out = port_id + 1;
1094 port_out_build(struct rte_swx_pipeline *p)
1096 struct port_out *port;
1099 CHECK(p->n_ports_out, EINVAL);
1101 for (i = 0; i < p->n_ports_out; i++)
1102 CHECK(port_out_find(p, i), EINVAL);
1104 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1105 CHECK(p->out, ENOMEM);
1107 TAILQ_FOREACH(port, &p->ports_out, node) {
1108 struct port_out_runtime *out = &p->out[port->id];
1110 out->pkt_tx = port->type->ops.pkt_tx;
1111 out->flush = port->type->ops.flush;
1112 out->obj = port->obj;
1119 port_out_build_free(struct rte_swx_pipeline *p)
1126 port_out_free(struct rte_swx_pipeline *p)
1128 port_out_build_free(p);
1132 struct port_out *port;
1134 port = TAILQ_FIRST(&p->ports_out);
1138 TAILQ_REMOVE(&p->ports_out, port, node);
1139 port->type->ops.free(port->obj);
1143 /* Output port types. */
1145 struct port_out_type *elem;
1147 elem = TAILQ_FIRST(&p->port_out_types);
1151 TAILQ_REMOVE(&p->port_out_types, elem, node);
1159 static struct extern_type *
1160 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1162 struct extern_type *elem;
1164 TAILQ_FOREACH(elem, &p->extern_types, node)
1165 if (strcmp(elem->name, name) == 0)
1171 static struct extern_type_member_func *
1172 extern_type_member_func_find(struct extern_type *type, const char *name)
1174 struct extern_type_member_func *elem;
1176 TAILQ_FOREACH(elem, &type->funcs, node)
1177 if (strcmp(elem->name, name) == 0)
1183 static struct extern_obj *
1184 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1186 struct extern_obj *elem;
1188 TAILQ_FOREACH(elem, &p->extern_objs, node)
1189 if (strcmp(elem->name, name) == 0)
1195 static struct field *
1196 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1198 struct extern_obj **object)
1200 struct extern_obj *obj;
1202 char *obj_name, *field_name;
1204 if ((name[0] != 'e') || (name[1] != '.'))
1207 obj_name = strdup(&name[2]);
1211 field_name = strchr(obj_name, '.');
1220 obj = extern_obj_find(p, obj_name);
1226 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1240 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1242 const char *mailbox_struct_type_name,
1243 rte_swx_extern_type_constructor_t constructor,
1244 rte_swx_extern_type_destructor_t destructor)
1246 struct extern_type *elem;
1247 struct struct_type *mailbox_struct_type;
1251 CHECK_NAME(name, EINVAL);
1252 CHECK(!extern_type_find(p, name), EEXIST);
1254 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1255 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1256 CHECK(mailbox_struct_type, EINVAL);
1258 CHECK(constructor, EINVAL);
1259 CHECK(destructor, EINVAL);
1261 /* Node allocation. */
1262 elem = calloc(1, sizeof(struct extern_type));
1263 CHECK(elem, ENOMEM);
1265 /* Node initialization. */
1266 strcpy(elem->name, name);
1267 elem->mailbox_struct_type = mailbox_struct_type;
1268 elem->constructor = constructor;
1269 elem->destructor = destructor;
1270 TAILQ_INIT(&elem->funcs);
1272 /* Node add to tailq. */
1273 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1279 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1280 const char *extern_type_name,
1282 rte_swx_extern_type_member_func_t member_func)
1284 struct extern_type *type;
1285 struct extern_type_member_func *type_member;
1289 CHECK(extern_type_name, EINVAL);
1290 type = extern_type_find(p, extern_type_name);
1291 CHECK(type, EINVAL);
1292 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1294 CHECK(name, EINVAL);
1295 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1297 CHECK(member_func, EINVAL);
1299 /* Node allocation. */
1300 type_member = calloc(1, sizeof(struct extern_type_member_func));
1301 CHECK(type_member, ENOMEM);
1303 /* Node initialization. */
1304 strcpy(type_member->name, name);
1305 type_member->func = member_func;
1306 type_member->id = type->n_funcs;
1308 /* Node add to tailq. */
1309 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1316 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1317 const char *extern_type_name,
1321 struct extern_type *type;
1322 struct extern_obj *obj;
1327 CHECK_NAME(extern_type_name, EINVAL);
1328 type = extern_type_find(p, extern_type_name);
1329 CHECK(type, EINVAL);
1331 CHECK_NAME(name, EINVAL);
1332 CHECK(!extern_obj_find(p, name), EEXIST);
1334 /* Node allocation. */
1335 obj = calloc(1, sizeof(struct extern_obj));
1338 /* Object construction. */
1339 obj_handle = type->constructor(args);
1345 /* Node initialization. */
1346 strcpy(obj->name, name);
1348 obj->obj = obj_handle;
1349 obj->struct_id = p->n_structs;
1350 obj->id = p->n_extern_objs;
1352 /* Node add to tailq. */
1353 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1361 extern_obj_build(struct rte_swx_pipeline *p)
1365 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1366 struct thread *t = &p->threads[i];
1367 struct extern_obj *obj;
1369 t->extern_objs = calloc(p->n_extern_objs,
1370 sizeof(struct extern_obj_runtime));
1371 CHECK(t->extern_objs, ENOMEM);
1373 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1374 struct extern_obj_runtime *r =
1375 &t->extern_objs[obj->id];
1376 struct extern_type_member_func *func;
1377 uint32_t mailbox_size =
1378 obj->type->mailbox_struct_type->n_bits / 8;
1382 r->mailbox = calloc(1, mailbox_size);
1383 CHECK(r->mailbox, ENOMEM);
1385 TAILQ_FOREACH(func, &obj->type->funcs, node)
1386 r->funcs[func->id] = func->func;
1388 t->structs[obj->struct_id] = r->mailbox;
1396 extern_obj_build_free(struct rte_swx_pipeline *p)
1400 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1401 struct thread *t = &p->threads[i];
1404 if (!t->extern_objs)
1407 for (j = 0; j < p->n_extern_objs; j++) {
1408 struct extern_obj_runtime *r = &t->extern_objs[j];
1413 free(t->extern_objs);
1414 t->extern_objs = NULL;
1419 extern_obj_free(struct rte_swx_pipeline *p)
1421 extern_obj_build_free(p);
1423 /* Extern objects. */
1425 struct extern_obj *elem;
1427 elem = TAILQ_FIRST(&p->extern_objs);
1431 TAILQ_REMOVE(&p->extern_objs, elem, node);
1433 elem->type->destructor(elem->obj);
1439 struct extern_type *elem;
1441 elem = TAILQ_FIRST(&p->extern_types);
1445 TAILQ_REMOVE(&p->extern_types, elem, node);
1448 struct extern_type_member_func *func;
1450 func = TAILQ_FIRST(&elem->funcs);
1454 TAILQ_REMOVE(&elem->funcs, func, node);
1465 static struct extern_func *
1466 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1468 struct extern_func *elem;
1470 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1471 if (strcmp(elem->name, name) == 0)
1477 static struct field *
1478 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1480 struct extern_func **function)
1482 struct extern_func *func;
1484 char *func_name, *field_name;
1486 if ((name[0] != 'f') || (name[1] != '.'))
1489 func_name = strdup(&name[2]);
1493 field_name = strchr(func_name, '.');
1502 func = extern_func_find(p, func_name);
1508 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1522 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1524 const char *mailbox_struct_type_name,
1525 rte_swx_extern_func_t func)
1527 struct extern_func *f;
1528 struct struct_type *mailbox_struct_type;
1532 CHECK_NAME(name, EINVAL);
1533 CHECK(!extern_func_find(p, name), EEXIST);
1535 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1536 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1537 CHECK(mailbox_struct_type, EINVAL);
1539 CHECK(func, EINVAL);
1541 /* Node allocation. */
1542 f = calloc(1, sizeof(struct extern_func));
1543 CHECK(func, ENOMEM);
1545 /* Node initialization. */
1546 strcpy(f->name, name);
1547 f->mailbox_struct_type = mailbox_struct_type;
1549 f->struct_id = p->n_structs;
1550 f->id = p->n_extern_funcs;
1552 /* Node add to tailq. */
1553 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1554 p->n_extern_funcs++;
1561 extern_func_build(struct rte_swx_pipeline *p)
1565 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1566 struct thread *t = &p->threads[i];
1567 struct extern_func *func;
1569 /* Memory allocation. */
1570 t->extern_funcs = calloc(p->n_extern_funcs,
1571 sizeof(struct extern_func_runtime));
1572 CHECK(t->extern_funcs, ENOMEM);
1574 /* Extern function. */
1575 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1576 struct extern_func_runtime *r =
1577 &t->extern_funcs[func->id];
1578 uint32_t mailbox_size =
1579 func->mailbox_struct_type->n_bits / 8;
1581 r->func = func->func;
1583 r->mailbox = calloc(1, mailbox_size);
1584 CHECK(r->mailbox, ENOMEM);
1586 t->structs[func->struct_id] = r->mailbox;
1594 extern_func_build_free(struct rte_swx_pipeline *p)
1598 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1599 struct thread *t = &p->threads[i];
1602 if (!t->extern_funcs)
1605 for (j = 0; j < p->n_extern_funcs; j++) {
1606 struct extern_func_runtime *r = &t->extern_funcs[j];
1611 free(t->extern_funcs);
1612 t->extern_funcs = NULL;
1617 extern_func_free(struct rte_swx_pipeline *p)
1619 extern_func_build_free(p);
1622 struct extern_func *elem;
1624 elem = TAILQ_FIRST(&p->extern_funcs);
1628 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1636 static struct header *
1637 header_find(struct rte_swx_pipeline *p, const char *name)
1639 struct header *elem;
1641 TAILQ_FOREACH(elem, &p->headers, node)
1642 if (strcmp(elem->name, name) == 0)
1648 static struct header *
1649 header_parse(struct rte_swx_pipeline *p,
1652 if (name[0] != 'h' || name[1] != '.')
1655 return header_find(p, &name[2]);
1658 static struct field *
1659 header_field_parse(struct rte_swx_pipeline *p,
1661 struct header **header)
1665 char *header_name, *field_name;
1667 if ((name[0] != 'h') || (name[1] != '.'))
1670 header_name = strdup(&name[2]);
1674 field_name = strchr(header_name, '.');
1683 h = header_find(p, header_name);
1689 f = struct_type_field_find(h->st, field_name);
1703 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1705 const char *struct_type_name)
1707 struct struct_type *st;
1709 size_t n_headers_max;
1712 CHECK_NAME(name, EINVAL);
1713 CHECK_NAME(struct_type_name, EINVAL);
1715 CHECK(!header_find(p, name), EEXIST);
1717 st = struct_type_find(p, struct_type_name);
1720 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1721 CHECK(p->n_headers < n_headers_max, ENOSPC);
1723 /* Node allocation. */
1724 h = calloc(1, sizeof(struct header));
1727 /* Node initialization. */
1728 strcpy(h->name, name);
1730 h->struct_id = p->n_structs;
1731 h->id = p->n_headers;
1733 /* Node add to tailq. */
1734 TAILQ_INSERT_TAIL(&p->headers, h, node);
1742 header_build(struct rte_swx_pipeline *p)
1745 uint32_t n_bytes = 0, i;
1747 TAILQ_FOREACH(h, &p->headers, node) {
1748 n_bytes += h->st->n_bits / 8;
1751 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1752 struct thread *t = &p->threads[i];
1753 uint32_t offset = 0;
1755 t->headers = calloc(p->n_headers,
1756 sizeof(struct header_runtime));
1757 CHECK(t->headers, ENOMEM);
1759 t->headers_out = calloc(p->n_headers,
1760 sizeof(struct header_out_runtime));
1761 CHECK(t->headers_out, ENOMEM);
1763 t->header_storage = calloc(1, n_bytes);
1764 CHECK(t->header_storage, ENOMEM);
1766 t->header_out_storage = calloc(1, n_bytes);
1767 CHECK(t->header_out_storage, ENOMEM);
1769 TAILQ_FOREACH(h, &p->headers, node) {
1770 uint8_t *header_storage;
1772 header_storage = &t->header_storage[offset];
1773 offset += h->st->n_bits / 8;
1775 t->headers[h->id].ptr0 = header_storage;
1776 t->structs[h->struct_id] = header_storage;
1784 header_build_free(struct rte_swx_pipeline *p)
1788 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1789 struct thread *t = &p->threads[i];
1791 free(t->headers_out);
1792 t->headers_out = NULL;
1797 free(t->header_out_storage);
1798 t->header_out_storage = NULL;
1800 free(t->header_storage);
1801 t->header_storage = NULL;
1806 header_free(struct rte_swx_pipeline *p)
1808 header_build_free(p);
1811 struct header *elem;
1813 elem = TAILQ_FIRST(&p->headers);
1817 TAILQ_REMOVE(&p->headers, elem, node);
1825 static struct field *
1826 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1828 if (!p->metadata_st)
1831 if (name[0] != 'm' || name[1] != '.')
1834 return struct_type_field_find(p->metadata_st, &name[2]);
1838 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1839 const char *struct_type_name)
1841 struct struct_type *st = NULL;
1845 CHECK_NAME(struct_type_name, EINVAL);
1846 st = struct_type_find(p, struct_type_name);
1848 CHECK(!p->metadata_st, EINVAL);
1850 p->metadata_st = st;
1851 p->metadata_struct_id = p->n_structs;
1859 metadata_build(struct rte_swx_pipeline *p)
1861 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1864 /* Thread-level initialization. */
1865 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1866 struct thread *t = &p->threads[i];
1869 metadata = calloc(1, n_bytes);
1870 CHECK(metadata, ENOMEM);
1872 t->metadata = metadata;
1873 t->structs[p->metadata_struct_id] = metadata;
1880 metadata_build_free(struct rte_swx_pipeline *p)
1884 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1885 struct thread *t = &p->threads[i];
1893 metadata_free(struct rte_swx_pipeline *p)
1895 metadata_build_free(p);
1901 static struct field *
1902 action_field_parse(struct action *action, const char *name);
1904 static struct field *
1905 struct_field_parse(struct rte_swx_pipeline *p,
1906 struct action *action,
1908 uint32_t *struct_id)
1915 struct header *header;
1917 f = header_field_parse(p, name, &header);
1921 *struct_id = header->struct_id;
1927 f = metadata_field_parse(p, name);
1931 *struct_id = p->metadata_struct_id;
1940 f = action_field_parse(action, name);
1950 struct extern_obj *obj;
1952 f = extern_obj_mailbox_field_parse(p, name, &obj);
1956 *struct_id = obj->struct_id;
1962 struct extern_func *func;
1964 f = extern_func_mailbox_field_parse(p, name, &func);
1968 *struct_id = func->struct_id;
1978 pipeline_port_inc(struct rte_swx_pipeline *p)
1980 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1984 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1986 t->ip = p->instructions;
1990 thread_ip_inc(struct rte_swx_pipeline *p);
1993 thread_ip_inc(struct rte_swx_pipeline *p)
1995 struct thread *t = &p->threads[p->thread_id];
2001 thread_ip_inc_cond(struct thread *t, int cond)
2007 thread_yield(struct rte_swx_pipeline *p)
2009 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2016 instr_rx_translate(struct rte_swx_pipeline *p,
2017 struct action *action,
2020 struct instruction *instr,
2021 struct instruction_data *data __rte_unused)
2025 CHECK(!action, EINVAL);
2026 CHECK(n_tokens == 2, EINVAL);
2028 f = metadata_field_parse(p, tokens[1]);
2031 instr->type = INSTR_RX;
2032 instr->io.io.offset = f->offset / 8;
2033 instr->io.io.n_bits = f->n_bits;
2038 instr_rx_exec(struct rte_swx_pipeline *p);
2041 instr_rx_exec(struct rte_swx_pipeline *p)
2043 struct thread *t = &p->threads[p->thread_id];
2044 struct instruction *ip = t->ip;
2045 struct port_in_runtime *port = &p->in[p->port_id];
2046 struct rte_swx_pkt *pkt = &t->pkt;
2050 pkt_received = port->pkt_rx(port->obj, pkt);
2051 t->ptr = &pkt->pkt[pkt->offset];
2052 rte_prefetch0(t->ptr);
2054 TRACE("[Thread %2u] rx %s from port %u\n",
2056 pkt_received ? "1 pkt" : "0 pkts",
2060 t->valid_headers = 0;
2061 t->n_headers_out = 0;
2064 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2067 t->table_state = p->table_state;
2070 pipeline_port_inc(p);
2071 thread_ip_inc_cond(t, pkt_received);
2079 instr_tx_translate(struct rte_swx_pipeline *p,
2080 struct action *action __rte_unused,
2083 struct instruction *instr,
2084 struct instruction_data *data __rte_unused)
2088 CHECK(n_tokens == 2, EINVAL);
2090 f = metadata_field_parse(p, tokens[1]);
2093 instr->type = INSTR_TX;
2094 instr->io.io.offset = f->offset / 8;
2095 instr->io.io.n_bits = f->n_bits;
2100 emit_handler(struct thread *t)
2102 struct header_out_runtime *h0 = &t->headers_out[0];
2103 struct header_out_runtime *h1 = &t->headers_out[1];
2104 uint32_t offset = 0, i;
2106 /* No header change or header decapsulation. */
2107 if ((t->n_headers_out == 1) &&
2108 (h0->ptr + h0->n_bytes == t->ptr)) {
2109 TRACE("Emit handler: no header change or header decap.\n");
2111 t->pkt.offset -= h0->n_bytes;
2112 t->pkt.length += h0->n_bytes;
2117 /* Header encapsulation (optionally, with prior header decasulation). */
2118 if ((t->n_headers_out == 2) &&
2119 (h1->ptr + h1->n_bytes == t->ptr) &&
2120 (h0->ptr == h0->ptr0)) {
2123 TRACE("Emit handler: header encapsulation.\n");
2125 offset = h0->n_bytes + h1->n_bytes;
2126 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2127 t->pkt.offset -= offset;
2128 t->pkt.length += offset;
2133 /* Header insertion. */
2136 /* Header extraction. */
2139 /* For any other case. */
2140 TRACE("Emit handler: complex case.\n");
2142 for (i = 0; i < t->n_headers_out; i++) {
2143 struct header_out_runtime *h = &t->headers_out[i];
2145 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2146 offset += h->n_bytes;
2150 memcpy(t->ptr - offset, t->header_out_storage, offset);
2151 t->pkt.offset -= offset;
2152 t->pkt.length += offset;
2157 instr_tx_exec(struct rte_swx_pipeline *p);
2160 instr_tx_exec(struct rte_swx_pipeline *p)
2162 struct thread *t = &p->threads[p->thread_id];
2163 struct instruction *ip = t->ip;
2164 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2165 struct port_out_runtime *port = &p->out[port_id];
2166 struct rte_swx_pkt *pkt = &t->pkt;
2168 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2176 port->pkt_tx(port->obj, pkt);
2179 thread_ip_reset(p, t);
2187 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2188 struct action *action,
2191 struct instruction *instr,
2192 struct instruction_data *data __rte_unused)
2196 CHECK(!action, EINVAL);
2197 CHECK(n_tokens == 2, EINVAL);
2199 h = header_parse(p, tokens[1]);
2202 instr->type = INSTR_HDR_EXTRACT;
2203 instr->io.hdr.header_id[0] = h->id;
2204 instr->io.hdr.struct_id[0] = h->struct_id;
2205 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2210 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2213 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2215 struct thread *t = &p->threads[p->thread_id];
2216 struct instruction *ip = t->ip;
2217 uint64_t valid_headers = t->valid_headers;
2218 uint8_t *ptr = t->ptr;
2219 uint32_t offset = t->pkt.offset;
2220 uint32_t length = t->pkt.length;
2223 for (i = 0; i < n_extract; i++) {
2224 uint32_t header_id = ip->io.hdr.header_id[i];
2225 uint32_t struct_id = ip->io.hdr.struct_id[i];
2226 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2228 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2234 t->structs[struct_id] = ptr;
2235 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2244 t->valid_headers = valid_headers;
2247 t->pkt.offset = offset;
2248 t->pkt.length = length;
2253 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2255 __instr_hdr_extract_exec(p, 1);
2262 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2264 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2267 __instr_hdr_extract_exec(p, 2);
2274 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2276 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2279 __instr_hdr_extract_exec(p, 3);
2286 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2288 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2291 __instr_hdr_extract_exec(p, 4);
2298 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2300 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2303 __instr_hdr_extract_exec(p, 5);
2310 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2312 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2315 __instr_hdr_extract_exec(p, 6);
2322 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2324 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2327 __instr_hdr_extract_exec(p, 7);
2334 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2336 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2339 __instr_hdr_extract_exec(p, 8);
2349 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2350 struct action *action __rte_unused,
2353 struct instruction *instr,
2354 struct instruction_data *data __rte_unused)
2358 CHECK(n_tokens == 2, EINVAL);
2360 h = header_parse(p, tokens[1]);
2363 instr->type = INSTR_HDR_EMIT;
2364 instr->io.hdr.header_id[0] = h->id;
2365 instr->io.hdr.struct_id[0] = h->struct_id;
2366 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2371 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2374 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2376 struct thread *t = &p->threads[p->thread_id];
2377 struct instruction *ip = t->ip;
2378 uint32_t n_headers_out = t->n_headers_out;
2379 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2380 uint8_t *ho_ptr = NULL;
2381 uint32_t ho_nbytes = 0, i;
2383 for (i = 0; i < n_emit; i++) {
2384 uint32_t header_id = ip->io.hdr.header_id[i];
2385 uint32_t struct_id = ip->io.hdr.struct_id[i];
2386 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2388 struct header_runtime *hi = &t->headers[header_id];
2389 uint8_t *hi_ptr = t->structs[struct_id];
2391 TRACE("[Thread %2u]: emit header %u\n",
2397 if (!t->n_headers_out) {
2398 ho = &t->headers_out[0];
2400 ho->ptr0 = hi->ptr0;
2404 ho_nbytes = n_bytes;
2411 ho_nbytes = ho->n_bytes;
2415 if (ho_ptr + ho_nbytes == hi_ptr) {
2416 ho_nbytes += n_bytes;
2418 ho->n_bytes = ho_nbytes;
2421 ho->ptr0 = hi->ptr0;
2425 ho_nbytes = n_bytes;
2431 ho->n_bytes = ho_nbytes;
2432 t->n_headers_out = n_headers_out;
2436 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2438 __instr_hdr_emit_exec(p, 1);
2445 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2447 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2450 __instr_hdr_emit_exec(p, 1);
2455 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2457 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2460 __instr_hdr_emit_exec(p, 2);
2465 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2467 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2470 __instr_hdr_emit_exec(p, 3);
2475 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2477 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2480 __instr_hdr_emit_exec(p, 4);
2485 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2487 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2490 __instr_hdr_emit_exec(p, 5);
2495 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2497 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2500 __instr_hdr_emit_exec(p, 6);
2505 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2507 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2510 __instr_hdr_emit_exec(p, 7);
2515 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2517 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2520 __instr_hdr_emit_exec(p, 8);
2528 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2529 struct action *action __rte_unused,
2532 struct instruction *instr,
2533 struct instruction_data *data __rte_unused)
2537 CHECK(n_tokens == 2, EINVAL);
2539 h = header_parse(p, tokens[1]);
2542 instr->type = INSTR_HDR_VALIDATE;
2543 instr->valid.header_id = h->id;
2548 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2550 struct thread *t = &p->threads[p->thread_id];
2551 struct instruction *ip = t->ip;
2552 uint32_t header_id = ip->valid.header_id;
2554 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2557 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2567 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2568 struct action *action __rte_unused,
2571 struct instruction *instr,
2572 struct instruction_data *data __rte_unused)
2576 CHECK(n_tokens == 2, EINVAL);
2578 h = header_parse(p, tokens[1]);
2581 instr->type = INSTR_HDR_INVALIDATE;
2582 instr->valid.header_id = h->id;
2587 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2589 struct thread *t = &p->threads[p->thread_id];
2590 struct instruction *ip = t->ip;
2591 uint32_t header_id = ip->valid.header_id;
2593 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2596 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2606 instr_mov_translate(struct rte_swx_pipeline *p,
2607 struct action *action,
2610 struct instruction *instr,
2611 struct instruction_data *data __rte_unused)
2613 char *dst = tokens[1], *src = tokens[2];
2614 struct field *fdst, *fsrc;
2615 uint32_t dst_struct_id, src_struct_id, src_val;
2617 CHECK(n_tokens == 3, EINVAL);
2619 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2620 CHECK(fdst, EINVAL);
2623 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2625 instr->type = INSTR_MOV;
2626 if ((dst[0] == 'h' && src[0] != 'h') ||
2627 (dst[0] != 'h' && src[0] == 'h'))
2628 instr->type = INSTR_MOV_S;
2630 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2631 instr->mov.dst.n_bits = fdst->n_bits;
2632 instr->mov.dst.offset = fdst->offset / 8;
2633 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2634 instr->mov.src.n_bits = fsrc->n_bits;
2635 instr->mov.src.offset = fsrc->offset / 8;
2640 src_val = strtoul(src, &src, 0);
2641 CHECK(!src[0], EINVAL);
2644 src_val = htonl(src_val);
2646 instr->type = INSTR_MOV_I;
2647 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2648 instr->mov.dst.n_bits = fdst->n_bits;
2649 instr->mov.dst.offset = fdst->offset / 8;
2650 instr->mov.src_val = (uint32_t)src_val;
2655 instr_mov_exec(struct rte_swx_pipeline *p)
2657 struct thread *t = &p->threads[p->thread_id];
2658 struct instruction *ip = t->ip;
2660 TRACE("[Thread %2u] mov\n",
2670 instr_mov_s_exec(struct rte_swx_pipeline *p)
2672 struct thread *t = &p->threads[p->thread_id];
2673 struct instruction *ip = t->ip;
2675 TRACE("[Thread %2u] mov (s)\n",
2685 instr_mov_i_exec(struct rte_swx_pipeline *p)
2687 struct thread *t = &p->threads[p->thread_id];
2688 struct instruction *ip = t->ip;
2690 TRACE("[Thread %2u] mov m.f %x\n",
2704 instr_dma_translate(struct rte_swx_pipeline *p,
2705 struct action *action,
2708 struct instruction *instr,
2709 struct instruction_data *data __rte_unused)
2711 char *dst = tokens[1];
2712 char *src = tokens[2];
2716 CHECK(action, EINVAL);
2717 CHECK(n_tokens == 3, EINVAL);
2719 h = header_parse(p, dst);
2722 tf = action_field_parse(action, src);
2725 instr->type = INSTR_DMA_HT;
2726 instr->dma.dst.header_id[0] = h->id;
2727 instr->dma.dst.struct_id[0] = h->struct_id;
2728 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2729 instr->dma.src.offset[0] = tf->offset / 8;
2735 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2738 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2740 struct thread *t = &p->threads[p->thread_id];
2741 struct instruction *ip = t->ip;
2742 uint8_t *action_data = t->structs[0];
2743 uint64_t valid_headers = t->valid_headers;
2746 for (i = 0; i < n_dma; i++) {
2747 uint32_t header_id = ip->dma.dst.header_id[i];
2748 uint32_t struct_id = ip->dma.dst.struct_id[i];
2749 uint32_t offset = ip->dma.src.offset[i];
2750 uint32_t n_bytes = ip->dma.n_bytes[i];
2752 struct header_runtime *h = &t->headers[header_id];
2753 uint8_t *h_ptr0 = h->ptr0;
2754 uint8_t *h_ptr = t->structs[struct_id];
2756 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2758 void *src = &action_data[offset];
2760 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2763 memcpy(dst, src, n_bytes);
2764 t->structs[struct_id] = dst;
2765 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2768 t->valid_headers = valid_headers;
2772 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2774 __instr_dma_ht_exec(p, 1);
2781 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2783 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2786 __instr_dma_ht_exec(p, 2);
2793 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2795 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2798 __instr_dma_ht_exec(p, 3);
2805 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2807 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2810 __instr_dma_ht_exec(p, 4);
2817 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2819 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2822 __instr_dma_ht_exec(p, 5);
2829 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2831 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2834 __instr_dma_ht_exec(p, 6);
2841 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2843 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2846 __instr_dma_ht_exec(p, 7);
2853 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2855 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2858 __instr_dma_ht_exec(p, 8);
2868 instr_alu_add_translate(struct rte_swx_pipeline *p,
2869 struct action *action,
2872 struct instruction *instr,
2873 struct instruction_data *data __rte_unused)
2875 char *dst = tokens[1], *src = tokens[2];
2876 struct field *fdst, *fsrc;
2877 uint32_t dst_struct_id, src_struct_id, src_val;
2879 CHECK(n_tokens == 3, EINVAL);
2881 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2882 CHECK(fdst, EINVAL);
2884 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2885 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2887 instr->type = INSTR_ALU_ADD;
2888 if (dst[0] == 'h' && src[0] == 'm')
2889 instr->type = INSTR_ALU_ADD_HM;
2890 if (dst[0] == 'm' && src[0] == 'h')
2891 instr->type = INSTR_ALU_ADD_MH;
2892 if (dst[0] == 'h' && src[0] == 'h')
2893 instr->type = INSTR_ALU_ADD_HH;
2895 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2896 instr->alu.dst.n_bits = fdst->n_bits;
2897 instr->alu.dst.offset = fdst->offset / 8;
2898 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2899 instr->alu.src.n_bits = fsrc->n_bits;
2900 instr->alu.src.offset = fsrc->offset / 8;
2904 /* ADD_MI, ADD_HI. */
2905 src_val = strtoul(src, &src, 0);
2906 CHECK(!src[0], EINVAL);
2908 instr->type = INSTR_ALU_ADD_MI;
2910 instr->type = INSTR_ALU_ADD_HI;
2912 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2913 instr->alu.dst.n_bits = fdst->n_bits;
2914 instr->alu.dst.offset = fdst->offset / 8;
2915 instr->alu.src_val = (uint32_t)src_val;
2920 instr_alu_add_exec(struct rte_swx_pipeline *p)
2922 struct thread *t = &p->threads[p->thread_id];
2923 struct instruction *ip = t->ip;
2925 TRACE("[Thread %2u] add\n", p->thread_id);
2935 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
2937 struct thread *t = &p->threads[p->thread_id];
2938 struct instruction *ip = t->ip;
2940 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
2950 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
2952 struct thread *t = &p->threads[p->thread_id];
2953 struct instruction *ip = t->ip;
2955 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
2965 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
2967 struct thread *t = &p->threads[p->thread_id];
2968 struct instruction *ip = t->ip;
2970 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
2980 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
2982 struct thread *t = &p->threads[p->thread_id];
2983 struct instruction *ip = t->ip;
2985 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
2995 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
2997 struct thread *t = &p->threads[p->thread_id];
2998 struct instruction *ip = t->ip;
3000 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3009 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
3012 instr_translate(struct rte_swx_pipeline *p,
3013 struct action *action,
3015 struct instruction *instr,
3016 struct instruction_data *data)
3018 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
3019 int n_tokens = 0, tpos = 0;
3021 /* Parse the instruction string into tokens. */
3025 token = strtok_r(string, " \t\v", &string);
3029 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
3031 tokens[n_tokens] = token;
3035 CHECK(n_tokens, EINVAL);
3037 /* Handle the optional instruction label. */
3038 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
3039 strcpy(data->label, tokens[0]);
3042 CHECK(n_tokens - tpos, EINVAL);
3045 /* Identify the instruction type. */
3046 if (!strcmp(tokens[tpos], "rx"))
3047 return instr_rx_translate(p,
3054 if (!strcmp(tokens[tpos], "tx"))
3055 return instr_tx_translate(p,
3062 if (!strcmp(tokens[tpos], "extract"))
3063 return instr_hdr_extract_translate(p,
3070 if (!strcmp(tokens[tpos], "emit"))
3071 return instr_hdr_emit_translate(p,
3078 if (!strcmp(tokens[tpos], "validate"))
3079 return instr_hdr_validate_translate(p,
3086 if (!strcmp(tokens[tpos], "invalidate"))
3087 return instr_hdr_invalidate_translate(p,
3094 if (!strcmp(tokens[tpos], "mov"))
3095 return instr_mov_translate(p,
3102 if (!strcmp(tokens[tpos], "dma"))
3103 return instr_dma_translate(p,
3110 if (!strcmp(tokens[tpos], "add"))
3111 return instr_alu_add_translate(p,
3122 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
3124 uint32_t count = 0, i;
3129 for (i = 0; i < n; i++)
3130 if (!strcmp(label, data[i].jmp_label))
3137 instr_label_check(struct instruction_data *instruction_data,
3138 uint32_t n_instructions)
3142 /* Check that all instruction labels are unique. */
3143 for (i = 0; i < n_instructions; i++) {
3144 struct instruction_data *data = &instruction_data[i];
3145 char *label = data->label;
3151 for (j = i + 1; j < n_instructions; j++)
3152 CHECK(strcmp(label, data[j].label), EINVAL);
3155 /* Get users for each instruction label. */
3156 for (i = 0; i < n_instructions; i++) {
3157 struct instruction_data *data = &instruction_data[i];
3158 char *label = data->label;
3160 data->n_users = label_is_used(instruction_data,
3169 instruction_config(struct rte_swx_pipeline *p,
3171 const char **instructions,
3172 uint32_t n_instructions)
3174 struct instruction *instr = NULL;
3175 struct instruction_data *data = NULL;
3176 char *string = NULL;
3180 CHECK(n_instructions, EINVAL);
3181 CHECK(instructions, EINVAL);
3182 for (i = 0; i < n_instructions; i++)
3183 CHECK(instructions[i], EINVAL);
3185 /* Memory allocation. */
3186 instr = calloc(n_instructions, sizeof(struct instruction));
3192 data = calloc(n_instructions, sizeof(struct instruction_data));
3198 for (i = 0; i < n_instructions; i++) {
3199 string = strdup(instructions[i]);
3205 err = instr_translate(p, a, string, &instr[i], &data[i]);
3212 err = instr_label_check(data, n_instructions);
3219 a->instructions = instr;
3220 a->n_instructions = n_instructions;
3222 p->instructions = instr;
3223 p->n_instructions = n_instructions;
3235 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
3237 static instr_exec_t instruction_table[] = {
3238 [INSTR_RX] = instr_rx_exec,
3239 [INSTR_TX] = instr_tx_exec,
3241 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
3242 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
3243 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
3244 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
3245 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
3246 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
3247 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
3248 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
3250 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
3251 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
3252 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
3253 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
3254 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
3255 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
3256 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
3257 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
3258 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
3260 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
3261 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
3263 [INSTR_MOV] = instr_mov_exec,
3264 [INSTR_MOV_S] = instr_mov_s_exec,
3265 [INSTR_MOV_I] = instr_mov_i_exec,
3267 [INSTR_DMA_HT] = instr_dma_ht_exec,
3268 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
3269 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
3270 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
3271 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
3272 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
3273 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
3274 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
3276 [INSTR_ALU_ADD] = instr_alu_add_exec,
3277 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
3278 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
3279 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
3280 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
3281 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
3285 instr_exec(struct rte_swx_pipeline *p)
3287 struct thread *t = &p->threads[p->thread_id];
3288 struct instruction *ip = t->ip;
3289 instr_exec_t instr = instruction_table[ip->type];
3297 static struct action *
3298 action_find(struct rte_swx_pipeline *p, const char *name)
3300 struct action *elem;
3305 TAILQ_FOREACH(elem, &p->actions, node)
3306 if (strcmp(elem->name, name) == 0)
3312 static struct field *
3313 action_field_find(struct action *a, const char *name)
3315 return a->st ? struct_type_field_find(a->st, name) : NULL;
3318 static struct field *
3319 action_field_parse(struct action *action, const char *name)
3321 if (name[0] != 't' || name[1] != '.')
3324 return action_field_find(action, &name[2]);
3328 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
3330 const char *args_struct_type_name,
3331 const char **instructions,
3332 uint32_t n_instructions)
3334 struct struct_type *args_struct_type;
3340 CHECK_NAME(name, EINVAL);
3341 CHECK(!action_find(p, name), EEXIST);
3343 if (args_struct_type_name) {
3344 CHECK_NAME(args_struct_type_name, EINVAL);
3345 args_struct_type = struct_type_find(p, args_struct_type_name);
3346 CHECK(args_struct_type, EINVAL);
3348 args_struct_type = NULL;
3351 /* Node allocation. */
3352 a = calloc(1, sizeof(struct action));
3355 /* Node initialization. */
3356 strcpy(a->name, name);
3357 a->st = args_struct_type;
3358 a->id = p->n_actions;
3360 /* Instruction translation. */
3361 err = instruction_config(p, a, instructions, n_instructions);
3367 /* Node add to tailq. */
3368 TAILQ_INSERT_TAIL(&p->actions, a, node);
3375 action_build(struct rte_swx_pipeline *p)
3377 struct action *action;
3379 p->action_instructions = calloc(p->n_actions,
3380 sizeof(struct instruction *));
3381 CHECK(p->action_instructions, ENOMEM);
3383 TAILQ_FOREACH(action, &p->actions, node)
3384 p->action_instructions[action->id] = action->instructions;
3390 action_build_free(struct rte_swx_pipeline *p)
3392 free(p->action_instructions);
3393 p->action_instructions = NULL;
3397 action_free(struct rte_swx_pipeline *p)
3399 action_build_free(p);
3402 struct action *action;
3404 action = TAILQ_FIRST(&p->actions);
3408 TAILQ_REMOVE(&p->actions, action, node);
3409 free(action->instructions);
3417 static struct table_type *
3418 table_type_find(struct rte_swx_pipeline *p, const char *name)
3420 struct table_type *elem;
3422 TAILQ_FOREACH(elem, &p->table_types, node)
3423 if (strcmp(elem->name, name) == 0)
3429 static struct table_type *
3430 table_type_resolve(struct rte_swx_pipeline *p,
3431 const char *recommended_type_name,
3432 enum rte_swx_table_match_type match_type)
3434 struct table_type *elem;
3436 /* Only consider the recommended type if the match type is correct. */
3437 if (recommended_type_name)
3438 TAILQ_FOREACH(elem, &p->table_types, node)
3439 if (!strcmp(elem->name, recommended_type_name) &&
3440 (elem->match_type == match_type))
3443 /* Ignore the recommended type and get the first element with this match
3446 TAILQ_FOREACH(elem, &p->table_types, node)
3447 if (elem->match_type == match_type)
3453 static struct table *
3454 table_find(struct rte_swx_pipeline *p, const char *name)
3458 TAILQ_FOREACH(elem, &p->tables, node)
3459 if (strcmp(elem->name, name) == 0)
3465 static struct table *
3466 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
3468 struct table *table = NULL;
3470 TAILQ_FOREACH(table, &p->tables, node)
3471 if (table->id == id)
3478 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
3480 enum rte_swx_table_match_type match_type,
3481 struct rte_swx_table_ops *ops)
3483 struct table_type *elem;
3487 CHECK_NAME(name, EINVAL);
3488 CHECK(!table_type_find(p, name), EEXIST);
3491 CHECK(ops->create, EINVAL);
3492 CHECK(ops->lkp, EINVAL);
3493 CHECK(ops->free, EINVAL);
3495 /* Node allocation. */
3496 elem = calloc(1, sizeof(struct table_type));
3497 CHECK(elem, ENOMEM);
3499 /* Node initialization. */
3500 strcpy(elem->name, name);
3501 elem->match_type = match_type;
3502 memcpy(&elem->ops, ops, sizeof(*ops));
3504 /* Node add to tailq. */
3505 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
3510 static enum rte_swx_table_match_type
3511 table_match_type_resolve(struct rte_swx_match_field_params *fields,
3516 for (i = 0; i < n_fields; i++)
3517 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
3521 return RTE_SWX_TABLE_MATCH_EXACT;
3523 if ((i == n_fields - 1) &&
3524 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
3525 return RTE_SWX_TABLE_MATCH_LPM;
3527 return RTE_SWX_TABLE_MATCH_WILDCARD;
3531 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
3533 struct rte_swx_pipeline_table_params *params,
3534 const char *recommended_table_type_name,
3538 struct table_type *type;
3540 struct action *default_action;
3541 struct header *header = NULL;
3543 uint32_t offset_prev = 0, action_data_size_max = 0, i;
3547 CHECK_NAME(name, EINVAL);
3548 CHECK(!table_find(p, name), EEXIST);
3550 CHECK(params, EINVAL);
3553 CHECK(!params->n_fields || params->fields, EINVAL);
3554 for (i = 0; i < params->n_fields; i++) {
3555 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3557 struct field *hf, *mf;
3560 CHECK_NAME(field->name, EINVAL);
3562 hf = header_field_parse(p, field->name, &h);
3563 mf = metadata_field_parse(p, field->name);
3564 CHECK(hf || mf, EINVAL);
3566 offset = hf ? hf->offset : mf->offset;
3569 is_header = hf ? 1 : 0;
3570 header = hf ? h : NULL;
3571 offset_prev = offset;
3576 CHECK((is_header && hf && (h->id == header->id)) ||
3577 (!is_header && mf), EINVAL);
3579 CHECK(offset > offset_prev, EINVAL);
3580 offset_prev = offset;
3583 /* Action checks. */
3584 CHECK(params->n_actions, EINVAL);
3585 CHECK(params->action_names, EINVAL);
3586 for (i = 0; i < params->n_actions; i++) {
3587 const char *action_name = params->action_names[i];
3589 uint32_t action_data_size;
3591 CHECK(action_name, EINVAL);
3593 a = action_find(p, action_name);
3596 action_data_size = a->st ? a->st->n_bits / 8 : 0;
3597 if (action_data_size > action_data_size_max)
3598 action_data_size_max = action_data_size;
3601 CHECK(params->default_action_name, EINVAL);
3602 for (i = 0; i < p->n_actions; i++)
3603 if (!strcmp(params->action_names[i],
3604 params->default_action_name))
3606 CHECK(i < params->n_actions, EINVAL);
3607 default_action = action_find(p, params->default_action_name);
3608 CHECK((default_action->st && params->default_action_data) ||
3609 !params->default_action_data, EINVAL);
3611 /* Table type checks. */
3612 if (params->n_fields) {
3613 enum rte_swx_table_match_type match_type;
3615 match_type = table_match_type_resolve(params->fields,
3617 type = table_type_resolve(p,
3618 recommended_table_type_name,
3620 CHECK(type, EINVAL);
3625 /* Memory allocation. */
3626 t = calloc(1, sizeof(struct table));
3629 t->fields = calloc(params->n_fields, sizeof(struct match_field));
3635 t->actions = calloc(params->n_actions, sizeof(struct action *));
3642 if (action_data_size_max) {
3643 t->default_action_data = calloc(1, action_data_size_max);
3644 if (!t->default_action_data) {
3652 /* Node initialization. */
3653 strcpy(t->name, name);
3654 if (args && args[0])
3655 strcpy(t->args, args);
3658 for (i = 0; i < params->n_fields; i++) {
3659 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3660 struct match_field *f = &t->fields[i];
3662 f->match_type = field->match_type;
3663 f->field = is_header ?
3664 header_field_parse(p, field->name, NULL) :
3665 metadata_field_parse(p, field->name);
3667 t->n_fields = params->n_fields;
3668 t->is_header = is_header;
3671 for (i = 0; i < params->n_actions; i++)
3672 t->actions[i] = action_find(p, params->action_names[i]);
3673 t->default_action = default_action;
3674 if (default_action->st)
3675 memcpy(t->default_action_data,
3676 params->default_action_data,
3677 default_action->st->n_bits / 8);
3678 t->n_actions = params->n_actions;
3679 t->default_action_is_const = params->default_action_is_const;
3680 t->action_data_size_max = action_data_size_max;
3683 t->id = p->n_tables;
3685 /* Node add to tailq. */
3686 TAILQ_INSERT_TAIL(&p->tables, t, node);
3692 static struct rte_swx_table_params *
3693 table_params_get(struct table *table)
3695 struct rte_swx_table_params *params;
3696 struct field *first, *last;
3698 uint32_t key_size, key_offset, action_data_size, i;
3700 /* Memory allocation. */
3701 params = calloc(1, sizeof(struct rte_swx_table_params));
3705 /* Key offset and size. */
3706 first = table->fields[0].field;
3707 last = table->fields[table->n_fields - 1].field;
3708 key_offset = first->offset / 8;
3709 key_size = (last->offset + last->n_bits - first->offset) / 8;
3711 /* Memory allocation. */
3712 key_mask = calloc(1, key_size);
3719 for (i = 0; i < table->n_fields; i++) {
3720 struct field *f = table->fields[i].field;
3721 uint32_t start = (f->offset - first->offset) / 8;
3722 size_t size = f->n_bits / 8;
3724 memset(&key_mask[start], 0xFF, size);
3727 /* Action data size. */
3728 action_data_size = 0;
3729 for (i = 0; i < table->n_actions; i++) {
3730 struct action *action = table->actions[i];
3731 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
3733 if (ads > action_data_size)
3734 action_data_size = ads;
3738 params->match_type = table->type->match_type;
3739 params->key_size = key_size;
3740 params->key_offset = key_offset;
3741 params->key_mask0 = key_mask;
3742 params->action_data_size = action_data_size;
3743 params->n_keys_max = table->size;
3749 table_params_free(struct rte_swx_table_params *params)
3754 free(params->key_mask0);
3759 table_state_build(struct rte_swx_pipeline *p)
3761 struct table *table;
3763 p->table_state = calloc(p->n_tables,
3764 sizeof(struct rte_swx_table_state));
3765 CHECK(p->table_state, ENOMEM);
3767 TAILQ_FOREACH(table, &p->tables, node) {
3768 struct rte_swx_table_state *ts = &p->table_state[table->id];
3771 struct rte_swx_table_params *params;
3774 params = table_params_get(table);
3775 CHECK(params, ENOMEM);
3777 ts->obj = table->type->ops.create(params,
3782 table_params_free(params);
3783 CHECK(ts->obj, ENODEV);
3786 /* ts->default_action_data. */
3787 if (table->action_data_size_max) {
3788 ts->default_action_data =
3789 malloc(table->action_data_size_max);
3790 CHECK(ts->default_action_data, ENOMEM);
3792 memcpy(ts->default_action_data,
3793 table->default_action_data,
3794 table->action_data_size_max);
3797 /* ts->default_action_id. */
3798 ts->default_action_id = table->default_action->id;
3805 table_state_build_free(struct rte_swx_pipeline *p)
3809 if (!p->table_state)
3812 for (i = 0; i < p->n_tables; i++) {
3813 struct rte_swx_table_state *ts = &p->table_state[i];
3814 struct table *table = table_find_by_id(p, i);
3817 if (table->type && ts->obj)
3818 table->type->ops.free(ts->obj);
3820 /* ts->default_action_data. */
3821 free(ts->default_action_data);
3824 free(p->table_state);
3825 p->table_state = NULL;
3829 table_state_free(struct rte_swx_pipeline *p)
3831 table_state_build_free(p);
3835 table_stub_lkp(void *table __rte_unused,
3836 void *mailbox __rte_unused,
3837 uint8_t **key __rte_unused,
3838 uint64_t *action_id __rte_unused,
3839 uint8_t **action_data __rte_unused,
3843 return 1; /* DONE. */
3847 table_build(struct rte_swx_pipeline *p)
3851 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3852 struct thread *t = &p->threads[i];
3853 struct table *table;
3855 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
3856 CHECK(t->tables, ENOMEM);
3858 TAILQ_FOREACH(table, &p->tables, node) {
3859 struct table_runtime *r = &t->tables[table->id];
3864 size = table->type->ops.mailbox_size_get();
3867 r->func = table->type->ops.lkp;
3871 r->mailbox = calloc(1, size);
3872 CHECK(r->mailbox, ENOMEM);
3876 r->key = table->is_header ?
3877 &t->structs[table->header->struct_id] :
3878 &t->structs[p->metadata_struct_id];
3880 r->func = table_stub_lkp;
3889 table_build_free(struct rte_swx_pipeline *p)
3893 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3894 struct thread *t = &p->threads[i];
3900 for (j = 0; j < p->n_tables; j++) {
3901 struct table_runtime *r = &t->tables[j];
3912 table_free(struct rte_swx_pipeline *p)
3914 table_build_free(p);
3920 elem = TAILQ_FIRST(&p->tables);
3924 TAILQ_REMOVE(&p->tables, elem, node);
3926 free(elem->actions);
3927 free(elem->default_action_data);
3933 struct table_type *elem;
3935 elem = TAILQ_FIRST(&p->table_types);
3939 TAILQ_REMOVE(&p->table_types, elem, node);
3948 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
3950 struct rte_swx_pipeline *pipeline;
3952 /* Check input parameters. */
3955 /* Memory allocation. */
3956 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
3957 CHECK(pipeline, ENOMEM);
3959 /* Initialization. */
3960 TAILQ_INIT(&pipeline->struct_types);
3961 TAILQ_INIT(&pipeline->port_in_types);
3962 TAILQ_INIT(&pipeline->ports_in);
3963 TAILQ_INIT(&pipeline->port_out_types);
3964 TAILQ_INIT(&pipeline->ports_out);
3965 TAILQ_INIT(&pipeline->extern_types);
3966 TAILQ_INIT(&pipeline->extern_objs);
3967 TAILQ_INIT(&pipeline->extern_funcs);
3968 TAILQ_INIT(&pipeline->headers);
3969 TAILQ_INIT(&pipeline->actions);
3970 TAILQ_INIT(&pipeline->table_types);
3971 TAILQ_INIT(&pipeline->tables);
3973 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
3974 pipeline->numa_node = numa_node;
3981 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
3986 free(p->instructions);
3988 table_state_free(p);
3993 extern_func_free(p);
4003 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
4004 const char **instructions,
4005 uint32_t n_instructions)
4010 err = instruction_config(p, NULL, instructions, n_instructions);
4014 /* Thread instruction pointer reset. */
4015 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4016 struct thread *t = &p->threads[i];
4018 thread_ip_reset(p, t);
4025 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
4030 CHECK(p->build_done == 0, EEXIST);
4032 status = port_in_build(p);
4036 status = port_out_build(p);
4040 status = struct_build(p);
4044 status = extern_obj_build(p);
4048 status = extern_func_build(p);
4052 status = header_build(p);
4056 status = metadata_build(p);
4060 status = action_build(p);
4064 status = table_build(p);
4068 status = table_state_build(p);
4076 table_state_build_free(p);
4077 table_build_free(p);
4078 action_build_free(p);
4079 metadata_build_free(p);
4080 header_build_free(p);
4081 extern_func_build_free(p);
4082 extern_obj_build_free(p);
4083 port_out_build_free(p);
4084 port_in_build_free(p);
4085 struct_build_free(p);
4091 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
4095 for (i = 0; i < n_instructions; i++)
4103 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
4104 struct rte_swx_table_state **table_state)
4106 if (!p || !table_state || !p->build_done)
4109 *table_state = p->table_state;
4114 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
4115 struct rte_swx_table_state *table_state)
4117 if (!p || !table_state || !p->build_done)
4120 p->table_state = table_state;