1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
272 struct instr_operand {
287 uint8_t header_id[8];
288 uint8_t struct_id[8];
293 struct instr_hdr_validity {
297 struct instr_dst_src {
298 struct instr_operand dst;
300 struct instr_operand src;
307 uint8_t header_id[8];
308 uint8_t struct_id[8];
319 enum instruction_type type;
322 struct instr_hdr_validity valid;
323 struct instr_dst_src mov;
324 struct instr_dma dma;
328 struct instruction_data {
329 char label[RTE_SWX_NAME_SIZE];
330 char jmp_label[RTE_SWX_NAME_SIZE];
331 uint32_t n_users; /* user = jmp instruction to this instruction. */
339 TAILQ_ENTRY(action) node;
340 char name[RTE_SWX_NAME_SIZE];
341 struct struct_type *st;
342 struct instruction *instructions;
343 uint32_t n_instructions;
347 TAILQ_HEAD(action_tailq, action);
353 TAILQ_ENTRY(table_type) node;
354 char name[RTE_SWX_NAME_SIZE];
355 enum rte_swx_table_match_type match_type;
356 struct rte_swx_table_ops ops;
359 TAILQ_HEAD(table_type_tailq, table_type);
362 enum rte_swx_table_match_type match_type;
367 TAILQ_ENTRY(table) node;
368 char name[RTE_SWX_NAME_SIZE];
369 char args[RTE_SWX_NAME_SIZE];
370 struct table_type *type; /* NULL when n_fields == 0. */
373 struct match_field *fields;
375 int is_header; /* Only valid when n_fields > 0. */
376 struct header *header; /* Only valid when n_fields > 0. */
379 struct action **actions;
380 struct action *default_action;
381 uint8_t *default_action_data;
383 int default_action_is_const;
384 uint32_t action_data_size_max;
390 TAILQ_HEAD(table_tailq, table);
392 struct table_runtime {
393 rte_swx_table_lookup_t func;
403 struct rte_swx_pkt pkt;
409 /* Packet headers. */
410 struct header_runtime *headers; /* Extracted or generated headers. */
411 struct header_out_runtime *headers_out; /* Emitted headers. */
412 uint8_t *header_storage;
413 uint8_t *header_out_storage;
414 uint64_t valid_headers;
415 uint32_t n_headers_out;
417 /* Packet meta-data. */
421 struct table_runtime *tables;
422 struct rte_swx_table_state *table_state;
424 int hit; /* 0 = Miss, 1 = Hit. */
426 /* Extern objects and functions. */
427 struct extern_obj_runtime *extern_objs;
428 struct extern_func_runtime *extern_funcs;
431 struct instruction *ip;
432 struct instruction *ret;
435 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
436 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
437 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
439 #define MOV(thread, ip) \
441 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
442 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
443 uint64_t dst64 = *dst64_ptr; \
444 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
446 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
447 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
448 uint64_t src64 = *src64_ptr; \
449 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
450 uint64_t src = src64 & src64_mask; \
452 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
455 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
457 #define MOV_S(thread, ip) \
459 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
460 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
461 uint64_t dst64 = *dst64_ptr; \
462 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
464 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
465 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
466 uint64_t src64 = *src64_ptr; \
467 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
469 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
478 #define MOV_I(thread, ip) \
480 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
481 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
482 uint64_t dst64 = *dst64_ptr; \
483 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
485 uint64_t src = (ip)->mov.src_val; \
487 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
490 #define METADATA_READ(thread, offset, n_bits) \
492 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
493 uint64_t m64 = *m64_ptr; \
494 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
498 #define METADATA_WRITE(thread, offset, n_bits, value) \
500 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
501 uint64_t m64 = *m64_ptr; \
502 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
504 uint64_t m_new = value; \
506 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
509 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
510 #define RTE_SWX_PIPELINE_THREADS_MAX 16
513 struct rte_swx_pipeline {
514 struct struct_type_tailq struct_types;
515 struct port_in_type_tailq port_in_types;
516 struct port_in_tailq ports_in;
517 struct port_out_type_tailq port_out_types;
518 struct port_out_tailq ports_out;
519 struct extern_type_tailq extern_types;
520 struct extern_obj_tailq extern_objs;
521 struct extern_func_tailq extern_funcs;
522 struct header_tailq headers;
523 struct struct_type *metadata_st;
524 uint32_t metadata_struct_id;
525 struct action_tailq actions;
526 struct table_type_tailq table_types;
527 struct table_tailq tables;
529 struct port_in_runtime *in;
530 struct port_out_runtime *out;
531 struct instruction **action_instructions;
532 struct rte_swx_table_state *table_state;
533 struct instruction *instructions;
534 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
538 uint32_t n_ports_out;
539 uint32_t n_extern_objs;
540 uint32_t n_extern_funcs;
546 uint32_t n_instructions;
554 static struct struct_type *
555 struct_type_find(struct rte_swx_pipeline *p, const char *name)
557 struct struct_type *elem;
559 TAILQ_FOREACH(elem, &p->struct_types, node)
560 if (strcmp(elem->name, name) == 0)
566 static struct field *
567 struct_type_field_find(struct struct_type *st, const char *name)
571 for (i = 0; i < st->n_fields; i++) {
572 struct field *f = &st->fields[i];
574 if (strcmp(f->name, name) == 0)
582 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
584 struct rte_swx_field_params *fields,
587 struct struct_type *st;
591 CHECK_NAME(name, EINVAL);
592 CHECK(fields, EINVAL);
593 CHECK(n_fields, EINVAL);
595 for (i = 0; i < n_fields; i++) {
596 struct rte_swx_field_params *f = &fields[i];
599 CHECK_NAME(f->name, EINVAL);
600 CHECK(f->n_bits, EINVAL);
601 CHECK(f->n_bits <= 64, EINVAL);
602 CHECK((f->n_bits & 7) == 0, EINVAL);
604 for (j = 0; j < i; j++) {
605 struct rte_swx_field_params *f_prev = &fields[j];
607 CHECK(strcmp(f->name, f_prev->name), EINVAL);
611 CHECK(!struct_type_find(p, name), EEXIST);
613 /* Node allocation. */
614 st = calloc(1, sizeof(struct struct_type));
617 st->fields = calloc(n_fields, sizeof(struct field));
623 /* Node initialization. */
624 strcpy(st->name, name);
625 for (i = 0; i < n_fields; i++) {
626 struct field *dst = &st->fields[i];
627 struct rte_swx_field_params *src = &fields[i];
629 strcpy(dst->name, src->name);
630 dst->n_bits = src->n_bits;
631 dst->offset = st->n_bits;
633 st->n_bits += src->n_bits;
635 st->n_fields = n_fields;
637 /* Node add to tailq. */
638 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
644 struct_build(struct rte_swx_pipeline *p)
648 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
649 struct thread *t = &p->threads[i];
651 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
652 CHECK(t->structs, ENOMEM);
659 struct_build_free(struct rte_swx_pipeline *p)
663 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
664 struct thread *t = &p->threads[i];
672 struct_free(struct rte_swx_pipeline *p)
674 struct_build_free(p);
678 struct struct_type *elem;
680 elem = TAILQ_FIRST(&p->struct_types);
684 TAILQ_REMOVE(&p->struct_types, elem, node);
693 static struct port_in_type *
694 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
696 struct port_in_type *elem;
701 TAILQ_FOREACH(elem, &p->port_in_types, node)
702 if (strcmp(elem->name, name) == 0)
709 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
711 struct rte_swx_port_in_ops *ops)
713 struct port_in_type *elem;
716 CHECK_NAME(name, EINVAL);
718 CHECK(ops->create, EINVAL);
719 CHECK(ops->free, EINVAL);
720 CHECK(ops->pkt_rx, EINVAL);
721 CHECK(ops->stats_read, EINVAL);
723 CHECK(!port_in_type_find(p, name), EEXIST);
725 /* Node allocation. */
726 elem = calloc(1, sizeof(struct port_in_type));
729 /* Node initialization. */
730 strcpy(elem->name, name);
731 memcpy(&elem->ops, ops, sizeof(*ops));
733 /* Node add to tailq. */
734 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
739 static struct port_in *
740 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
742 struct port_in *port;
744 TAILQ_FOREACH(port, &p->ports_in, node)
745 if (port->id == port_id)
752 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
754 const char *port_type_name,
757 struct port_in_type *type = NULL;
758 struct port_in *port = NULL;
763 CHECK(!port_in_find(p, port_id), EINVAL);
765 CHECK_NAME(port_type_name, EINVAL);
766 type = port_in_type_find(p, port_type_name);
769 obj = type->ops.create(args);
772 /* Node allocation. */
773 port = calloc(1, sizeof(struct port_in));
776 /* Node initialization. */
781 /* Node add to tailq. */
782 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
783 if (p->n_ports_in < port_id + 1)
784 p->n_ports_in = port_id + 1;
790 port_in_build(struct rte_swx_pipeline *p)
792 struct port_in *port;
795 CHECK(p->n_ports_in, EINVAL);
796 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
798 for (i = 0; i < p->n_ports_in; i++)
799 CHECK(port_in_find(p, i), EINVAL);
801 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
802 CHECK(p->in, ENOMEM);
804 TAILQ_FOREACH(port, &p->ports_in, node) {
805 struct port_in_runtime *in = &p->in[port->id];
807 in->pkt_rx = port->type->ops.pkt_rx;
815 port_in_build_free(struct rte_swx_pipeline *p)
822 port_in_free(struct rte_swx_pipeline *p)
824 port_in_build_free(p);
828 struct port_in *port;
830 port = TAILQ_FIRST(&p->ports_in);
834 TAILQ_REMOVE(&p->ports_in, port, node);
835 port->type->ops.free(port->obj);
839 /* Input port types. */
841 struct port_in_type *elem;
843 elem = TAILQ_FIRST(&p->port_in_types);
847 TAILQ_REMOVE(&p->port_in_types, elem, node);
855 static struct port_out_type *
856 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
858 struct port_out_type *elem;
863 TAILQ_FOREACH(elem, &p->port_out_types, node)
864 if (!strcmp(elem->name, name))
871 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
873 struct rte_swx_port_out_ops *ops)
875 struct port_out_type *elem;
878 CHECK_NAME(name, EINVAL);
880 CHECK(ops->create, EINVAL);
881 CHECK(ops->free, EINVAL);
882 CHECK(ops->pkt_tx, EINVAL);
883 CHECK(ops->stats_read, EINVAL);
885 CHECK(!port_out_type_find(p, name), EEXIST);
887 /* Node allocation. */
888 elem = calloc(1, sizeof(struct port_out_type));
891 /* Node initialization. */
892 strcpy(elem->name, name);
893 memcpy(&elem->ops, ops, sizeof(*ops));
895 /* Node add to tailq. */
896 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
901 static struct port_out *
902 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
904 struct port_out *port;
906 TAILQ_FOREACH(port, &p->ports_out, node)
907 if (port->id == port_id)
914 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
916 const char *port_type_name,
919 struct port_out_type *type = NULL;
920 struct port_out *port = NULL;
925 CHECK(!port_out_find(p, port_id), EINVAL);
927 CHECK_NAME(port_type_name, EINVAL);
928 type = port_out_type_find(p, port_type_name);
931 obj = type->ops.create(args);
934 /* Node allocation. */
935 port = calloc(1, sizeof(struct port_out));
938 /* Node initialization. */
943 /* Node add to tailq. */
944 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
945 if (p->n_ports_out < port_id + 1)
946 p->n_ports_out = port_id + 1;
952 port_out_build(struct rte_swx_pipeline *p)
954 struct port_out *port;
957 CHECK(p->n_ports_out, EINVAL);
959 for (i = 0; i < p->n_ports_out; i++)
960 CHECK(port_out_find(p, i), EINVAL);
962 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
963 CHECK(p->out, ENOMEM);
965 TAILQ_FOREACH(port, &p->ports_out, node) {
966 struct port_out_runtime *out = &p->out[port->id];
968 out->pkt_tx = port->type->ops.pkt_tx;
969 out->flush = port->type->ops.flush;
970 out->obj = port->obj;
977 port_out_build_free(struct rte_swx_pipeline *p)
984 port_out_free(struct rte_swx_pipeline *p)
986 port_out_build_free(p);
990 struct port_out *port;
992 port = TAILQ_FIRST(&p->ports_out);
996 TAILQ_REMOVE(&p->ports_out, port, node);
997 port->type->ops.free(port->obj);
1001 /* Output port types. */
1003 struct port_out_type *elem;
1005 elem = TAILQ_FIRST(&p->port_out_types);
1009 TAILQ_REMOVE(&p->port_out_types, elem, node);
1017 static struct extern_type *
1018 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1020 struct extern_type *elem;
1022 TAILQ_FOREACH(elem, &p->extern_types, node)
1023 if (strcmp(elem->name, name) == 0)
1029 static struct extern_type_member_func *
1030 extern_type_member_func_find(struct extern_type *type, const char *name)
1032 struct extern_type_member_func *elem;
1034 TAILQ_FOREACH(elem, &type->funcs, node)
1035 if (strcmp(elem->name, name) == 0)
1041 static struct extern_obj *
1042 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1044 struct extern_obj *elem;
1046 TAILQ_FOREACH(elem, &p->extern_objs, node)
1047 if (strcmp(elem->name, name) == 0)
1053 static struct field *
1054 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1056 struct extern_obj **object)
1058 struct extern_obj *obj;
1060 char *obj_name, *field_name;
1062 if ((name[0] != 'e') || (name[1] != '.'))
1065 obj_name = strdup(&name[2]);
1069 field_name = strchr(obj_name, '.');
1078 obj = extern_obj_find(p, obj_name);
1084 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1098 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1100 const char *mailbox_struct_type_name,
1101 rte_swx_extern_type_constructor_t constructor,
1102 rte_swx_extern_type_destructor_t destructor)
1104 struct extern_type *elem;
1105 struct struct_type *mailbox_struct_type;
1109 CHECK_NAME(name, EINVAL);
1110 CHECK(!extern_type_find(p, name), EEXIST);
1112 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1113 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1114 CHECK(mailbox_struct_type, EINVAL);
1116 CHECK(constructor, EINVAL);
1117 CHECK(destructor, EINVAL);
1119 /* Node allocation. */
1120 elem = calloc(1, sizeof(struct extern_type));
1121 CHECK(elem, ENOMEM);
1123 /* Node initialization. */
1124 strcpy(elem->name, name);
1125 elem->mailbox_struct_type = mailbox_struct_type;
1126 elem->constructor = constructor;
1127 elem->destructor = destructor;
1128 TAILQ_INIT(&elem->funcs);
1130 /* Node add to tailq. */
1131 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1137 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1138 const char *extern_type_name,
1140 rte_swx_extern_type_member_func_t member_func)
1142 struct extern_type *type;
1143 struct extern_type_member_func *type_member;
1147 CHECK(extern_type_name, EINVAL);
1148 type = extern_type_find(p, extern_type_name);
1149 CHECK(type, EINVAL);
1150 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1152 CHECK(name, EINVAL);
1153 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1155 CHECK(member_func, EINVAL);
1157 /* Node allocation. */
1158 type_member = calloc(1, sizeof(struct extern_type_member_func));
1159 CHECK(type_member, ENOMEM);
1161 /* Node initialization. */
1162 strcpy(type_member->name, name);
1163 type_member->func = member_func;
1164 type_member->id = type->n_funcs;
1166 /* Node add to tailq. */
1167 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1174 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1175 const char *extern_type_name,
1179 struct extern_type *type;
1180 struct extern_obj *obj;
1185 CHECK_NAME(extern_type_name, EINVAL);
1186 type = extern_type_find(p, extern_type_name);
1187 CHECK(type, EINVAL);
1189 CHECK_NAME(name, EINVAL);
1190 CHECK(!extern_obj_find(p, name), EEXIST);
1192 /* Node allocation. */
1193 obj = calloc(1, sizeof(struct extern_obj));
1196 /* Object construction. */
1197 obj_handle = type->constructor(args);
1203 /* Node initialization. */
1204 strcpy(obj->name, name);
1206 obj->obj = obj_handle;
1207 obj->struct_id = p->n_structs;
1208 obj->id = p->n_extern_objs;
1210 /* Node add to tailq. */
1211 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1219 extern_obj_build(struct rte_swx_pipeline *p)
1223 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1224 struct thread *t = &p->threads[i];
1225 struct extern_obj *obj;
1227 t->extern_objs = calloc(p->n_extern_objs,
1228 sizeof(struct extern_obj_runtime));
1229 CHECK(t->extern_objs, ENOMEM);
1231 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1232 struct extern_obj_runtime *r =
1233 &t->extern_objs[obj->id];
1234 struct extern_type_member_func *func;
1235 uint32_t mailbox_size =
1236 obj->type->mailbox_struct_type->n_bits / 8;
1240 r->mailbox = calloc(1, mailbox_size);
1241 CHECK(r->mailbox, ENOMEM);
1243 TAILQ_FOREACH(func, &obj->type->funcs, node)
1244 r->funcs[func->id] = func->func;
1246 t->structs[obj->struct_id] = r->mailbox;
1254 extern_obj_build_free(struct rte_swx_pipeline *p)
1258 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1259 struct thread *t = &p->threads[i];
1262 if (!t->extern_objs)
1265 for (j = 0; j < p->n_extern_objs; j++) {
1266 struct extern_obj_runtime *r = &t->extern_objs[j];
1271 free(t->extern_objs);
1272 t->extern_objs = NULL;
1277 extern_obj_free(struct rte_swx_pipeline *p)
1279 extern_obj_build_free(p);
1281 /* Extern objects. */
1283 struct extern_obj *elem;
1285 elem = TAILQ_FIRST(&p->extern_objs);
1289 TAILQ_REMOVE(&p->extern_objs, elem, node);
1291 elem->type->destructor(elem->obj);
1297 struct extern_type *elem;
1299 elem = TAILQ_FIRST(&p->extern_types);
1303 TAILQ_REMOVE(&p->extern_types, elem, node);
1306 struct extern_type_member_func *func;
1308 func = TAILQ_FIRST(&elem->funcs);
1312 TAILQ_REMOVE(&elem->funcs, func, node);
1323 static struct extern_func *
1324 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1326 struct extern_func *elem;
1328 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1329 if (strcmp(elem->name, name) == 0)
1335 static struct field *
1336 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1338 struct extern_func **function)
1340 struct extern_func *func;
1342 char *func_name, *field_name;
1344 if ((name[0] != 'f') || (name[1] != '.'))
1347 func_name = strdup(&name[2]);
1351 field_name = strchr(func_name, '.');
1360 func = extern_func_find(p, func_name);
1366 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1380 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1382 const char *mailbox_struct_type_name,
1383 rte_swx_extern_func_t func)
1385 struct extern_func *f;
1386 struct struct_type *mailbox_struct_type;
1390 CHECK_NAME(name, EINVAL);
1391 CHECK(!extern_func_find(p, name), EEXIST);
1393 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1394 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1395 CHECK(mailbox_struct_type, EINVAL);
1397 CHECK(func, EINVAL);
1399 /* Node allocation. */
1400 f = calloc(1, sizeof(struct extern_func));
1401 CHECK(func, ENOMEM);
1403 /* Node initialization. */
1404 strcpy(f->name, name);
1405 f->mailbox_struct_type = mailbox_struct_type;
1407 f->struct_id = p->n_structs;
1408 f->id = p->n_extern_funcs;
1410 /* Node add to tailq. */
1411 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1412 p->n_extern_funcs++;
1419 extern_func_build(struct rte_swx_pipeline *p)
1423 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1424 struct thread *t = &p->threads[i];
1425 struct extern_func *func;
1427 /* Memory allocation. */
1428 t->extern_funcs = calloc(p->n_extern_funcs,
1429 sizeof(struct extern_func_runtime));
1430 CHECK(t->extern_funcs, ENOMEM);
1432 /* Extern function. */
1433 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1434 struct extern_func_runtime *r =
1435 &t->extern_funcs[func->id];
1436 uint32_t mailbox_size =
1437 func->mailbox_struct_type->n_bits / 8;
1439 r->func = func->func;
1441 r->mailbox = calloc(1, mailbox_size);
1442 CHECK(r->mailbox, ENOMEM);
1444 t->structs[func->struct_id] = r->mailbox;
1452 extern_func_build_free(struct rte_swx_pipeline *p)
1456 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1457 struct thread *t = &p->threads[i];
1460 if (!t->extern_funcs)
1463 for (j = 0; j < p->n_extern_funcs; j++) {
1464 struct extern_func_runtime *r = &t->extern_funcs[j];
1469 free(t->extern_funcs);
1470 t->extern_funcs = NULL;
1475 extern_func_free(struct rte_swx_pipeline *p)
1477 extern_func_build_free(p);
1480 struct extern_func *elem;
1482 elem = TAILQ_FIRST(&p->extern_funcs);
1486 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1494 static struct header *
1495 header_find(struct rte_swx_pipeline *p, const char *name)
1497 struct header *elem;
1499 TAILQ_FOREACH(elem, &p->headers, node)
1500 if (strcmp(elem->name, name) == 0)
1506 static struct header *
1507 header_parse(struct rte_swx_pipeline *p,
1510 if (name[0] != 'h' || name[1] != '.')
1513 return header_find(p, &name[2]);
1516 static struct field *
1517 header_field_parse(struct rte_swx_pipeline *p,
1519 struct header **header)
1523 char *header_name, *field_name;
1525 if ((name[0] != 'h') || (name[1] != '.'))
1528 header_name = strdup(&name[2]);
1532 field_name = strchr(header_name, '.');
1541 h = header_find(p, header_name);
1547 f = struct_type_field_find(h->st, field_name);
1561 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1563 const char *struct_type_name)
1565 struct struct_type *st;
1567 size_t n_headers_max;
1570 CHECK_NAME(name, EINVAL);
1571 CHECK_NAME(struct_type_name, EINVAL);
1573 CHECK(!header_find(p, name), EEXIST);
1575 st = struct_type_find(p, struct_type_name);
1578 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1579 CHECK(p->n_headers < n_headers_max, ENOSPC);
1581 /* Node allocation. */
1582 h = calloc(1, sizeof(struct header));
1585 /* Node initialization. */
1586 strcpy(h->name, name);
1588 h->struct_id = p->n_structs;
1589 h->id = p->n_headers;
1591 /* Node add to tailq. */
1592 TAILQ_INSERT_TAIL(&p->headers, h, node);
1600 header_build(struct rte_swx_pipeline *p)
1603 uint32_t n_bytes = 0, i;
1605 TAILQ_FOREACH(h, &p->headers, node) {
1606 n_bytes += h->st->n_bits / 8;
1609 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1610 struct thread *t = &p->threads[i];
1611 uint32_t offset = 0;
1613 t->headers = calloc(p->n_headers,
1614 sizeof(struct header_runtime));
1615 CHECK(t->headers, ENOMEM);
1617 t->headers_out = calloc(p->n_headers,
1618 sizeof(struct header_out_runtime));
1619 CHECK(t->headers_out, ENOMEM);
1621 t->header_storage = calloc(1, n_bytes);
1622 CHECK(t->header_storage, ENOMEM);
1624 t->header_out_storage = calloc(1, n_bytes);
1625 CHECK(t->header_out_storage, ENOMEM);
1627 TAILQ_FOREACH(h, &p->headers, node) {
1628 uint8_t *header_storage;
1630 header_storage = &t->header_storage[offset];
1631 offset += h->st->n_bits / 8;
1633 t->headers[h->id].ptr0 = header_storage;
1634 t->structs[h->struct_id] = header_storage;
1642 header_build_free(struct rte_swx_pipeline *p)
1646 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1647 struct thread *t = &p->threads[i];
1649 free(t->headers_out);
1650 t->headers_out = NULL;
1655 free(t->header_out_storage);
1656 t->header_out_storage = NULL;
1658 free(t->header_storage);
1659 t->header_storage = NULL;
1664 header_free(struct rte_swx_pipeline *p)
1666 header_build_free(p);
1669 struct header *elem;
1671 elem = TAILQ_FIRST(&p->headers);
1675 TAILQ_REMOVE(&p->headers, elem, node);
1683 static struct field *
1684 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1686 if (!p->metadata_st)
1689 if (name[0] != 'm' || name[1] != '.')
1692 return struct_type_field_find(p->metadata_st, &name[2]);
1696 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1697 const char *struct_type_name)
1699 struct struct_type *st = NULL;
1703 CHECK_NAME(struct_type_name, EINVAL);
1704 st = struct_type_find(p, struct_type_name);
1706 CHECK(!p->metadata_st, EINVAL);
1708 p->metadata_st = st;
1709 p->metadata_struct_id = p->n_structs;
1717 metadata_build(struct rte_swx_pipeline *p)
1719 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1722 /* Thread-level initialization. */
1723 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1724 struct thread *t = &p->threads[i];
1727 metadata = calloc(1, n_bytes);
1728 CHECK(metadata, ENOMEM);
1730 t->metadata = metadata;
1731 t->structs[p->metadata_struct_id] = metadata;
1738 metadata_build_free(struct rte_swx_pipeline *p)
1742 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1743 struct thread *t = &p->threads[i];
1751 metadata_free(struct rte_swx_pipeline *p)
1753 metadata_build_free(p);
1759 static struct field *
1760 action_field_parse(struct action *action, const char *name);
1762 static struct field *
1763 struct_field_parse(struct rte_swx_pipeline *p,
1764 struct action *action,
1766 uint32_t *struct_id)
1773 struct header *header;
1775 f = header_field_parse(p, name, &header);
1779 *struct_id = header->struct_id;
1785 f = metadata_field_parse(p, name);
1789 *struct_id = p->metadata_struct_id;
1798 f = action_field_parse(action, name);
1808 struct extern_obj *obj;
1810 f = extern_obj_mailbox_field_parse(p, name, &obj);
1814 *struct_id = obj->struct_id;
1820 struct extern_func *func;
1822 f = extern_func_mailbox_field_parse(p, name, &func);
1826 *struct_id = func->struct_id;
1836 pipeline_port_inc(struct rte_swx_pipeline *p)
1838 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1842 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1844 t->ip = p->instructions;
1848 thread_ip_inc(struct rte_swx_pipeline *p);
1851 thread_ip_inc(struct rte_swx_pipeline *p)
1853 struct thread *t = &p->threads[p->thread_id];
1859 thread_ip_inc_cond(struct thread *t, int cond)
1865 thread_yield(struct rte_swx_pipeline *p)
1867 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1874 instr_rx_translate(struct rte_swx_pipeline *p,
1875 struct action *action,
1878 struct instruction *instr,
1879 struct instruction_data *data __rte_unused)
1883 CHECK(!action, EINVAL);
1884 CHECK(n_tokens == 2, EINVAL);
1886 f = metadata_field_parse(p, tokens[1]);
1889 instr->type = INSTR_RX;
1890 instr->io.io.offset = f->offset / 8;
1891 instr->io.io.n_bits = f->n_bits;
1896 instr_rx_exec(struct rte_swx_pipeline *p);
1899 instr_rx_exec(struct rte_swx_pipeline *p)
1901 struct thread *t = &p->threads[p->thread_id];
1902 struct instruction *ip = t->ip;
1903 struct port_in_runtime *port = &p->in[p->port_id];
1904 struct rte_swx_pkt *pkt = &t->pkt;
1908 pkt_received = port->pkt_rx(port->obj, pkt);
1909 t->ptr = &pkt->pkt[pkt->offset];
1910 rte_prefetch0(t->ptr);
1912 TRACE("[Thread %2u] rx %s from port %u\n",
1914 pkt_received ? "1 pkt" : "0 pkts",
1918 t->valid_headers = 0;
1919 t->n_headers_out = 0;
1922 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1925 t->table_state = p->table_state;
1928 pipeline_port_inc(p);
1929 thread_ip_inc_cond(t, pkt_received);
1937 instr_tx_translate(struct rte_swx_pipeline *p,
1938 struct action *action __rte_unused,
1941 struct instruction *instr,
1942 struct instruction_data *data __rte_unused)
1946 CHECK(n_tokens == 2, EINVAL);
1948 f = metadata_field_parse(p, tokens[1]);
1951 instr->type = INSTR_TX;
1952 instr->io.io.offset = f->offset / 8;
1953 instr->io.io.n_bits = f->n_bits;
1958 emit_handler(struct thread *t)
1960 struct header_out_runtime *h0 = &t->headers_out[0];
1961 struct header_out_runtime *h1 = &t->headers_out[1];
1962 uint32_t offset = 0, i;
1964 /* No header change or header decapsulation. */
1965 if ((t->n_headers_out == 1) &&
1966 (h0->ptr + h0->n_bytes == t->ptr)) {
1967 TRACE("Emit handler: no header change or header decap.\n");
1969 t->pkt.offset -= h0->n_bytes;
1970 t->pkt.length += h0->n_bytes;
1975 /* Header encapsulation (optionally, with prior header decasulation). */
1976 if ((t->n_headers_out == 2) &&
1977 (h1->ptr + h1->n_bytes == t->ptr) &&
1978 (h0->ptr == h0->ptr0)) {
1981 TRACE("Emit handler: header encapsulation.\n");
1983 offset = h0->n_bytes + h1->n_bytes;
1984 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1985 t->pkt.offset -= offset;
1986 t->pkt.length += offset;
1991 /* Header insertion. */
1994 /* Header extraction. */
1997 /* For any other case. */
1998 TRACE("Emit handler: complex case.\n");
2000 for (i = 0; i < t->n_headers_out; i++) {
2001 struct header_out_runtime *h = &t->headers_out[i];
2003 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2004 offset += h->n_bytes;
2008 memcpy(t->ptr - offset, t->header_out_storage, offset);
2009 t->pkt.offset -= offset;
2010 t->pkt.length += offset;
2015 instr_tx_exec(struct rte_swx_pipeline *p);
2018 instr_tx_exec(struct rte_swx_pipeline *p)
2020 struct thread *t = &p->threads[p->thread_id];
2021 struct instruction *ip = t->ip;
2022 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2023 struct port_out_runtime *port = &p->out[port_id];
2024 struct rte_swx_pkt *pkt = &t->pkt;
2026 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2034 port->pkt_tx(port->obj, pkt);
2037 thread_ip_reset(p, t);
2045 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2046 struct action *action,
2049 struct instruction *instr,
2050 struct instruction_data *data __rte_unused)
2054 CHECK(!action, EINVAL);
2055 CHECK(n_tokens == 2, EINVAL);
2057 h = header_parse(p, tokens[1]);
2060 instr->type = INSTR_HDR_EXTRACT;
2061 instr->io.hdr.header_id[0] = h->id;
2062 instr->io.hdr.struct_id[0] = h->struct_id;
2063 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2068 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2071 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2073 struct thread *t = &p->threads[p->thread_id];
2074 struct instruction *ip = t->ip;
2075 uint64_t valid_headers = t->valid_headers;
2076 uint8_t *ptr = t->ptr;
2077 uint32_t offset = t->pkt.offset;
2078 uint32_t length = t->pkt.length;
2081 for (i = 0; i < n_extract; i++) {
2082 uint32_t header_id = ip->io.hdr.header_id[i];
2083 uint32_t struct_id = ip->io.hdr.struct_id[i];
2084 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2086 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2092 t->structs[struct_id] = ptr;
2093 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2102 t->valid_headers = valid_headers;
2105 t->pkt.offset = offset;
2106 t->pkt.length = length;
2111 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2113 __instr_hdr_extract_exec(p, 1);
2120 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2122 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2125 __instr_hdr_extract_exec(p, 2);
2132 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2134 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2137 __instr_hdr_extract_exec(p, 3);
2144 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2146 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2149 __instr_hdr_extract_exec(p, 4);
2156 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2158 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2161 __instr_hdr_extract_exec(p, 5);
2168 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2170 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2173 __instr_hdr_extract_exec(p, 6);
2180 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2182 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2185 __instr_hdr_extract_exec(p, 7);
2192 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2194 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2197 __instr_hdr_extract_exec(p, 8);
2207 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2208 struct action *action __rte_unused,
2211 struct instruction *instr,
2212 struct instruction_data *data __rte_unused)
2216 CHECK(n_tokens == 2, EINVAL);
2218 h = header_parse(p, tokens[1]);
2221 instr->type = INSTR_HDR_EMIT;
2222 instr->io.hdr.header_id[0] = h->id;
2223 instr->io.hdr.struct_id[0] = h->struct_id;
2224 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2229 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2232 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2234 struct thread *t = &p->threads[p->thread_id];
2235 struct instruction *ip = t->ip;
2236 uint32_t n_headers_out = t->n_headers_out;
2237 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2238 uint8_t *ho_ptr = NULL;
2239 uint32_t ho_nbytes = 0, i;
2241 for (i = 0; i < n_emit; i++) {
2242 uint32_t header_id = ip->io.hdr.header_id[i];
2243 uint32_t struct_id = ip->io.hdr.struct_id[i];
2244 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2246 struct header_runtime *hi = &t->headers[header_id];
2247 uint8_t *hi_ptr = t->structs[struct_id];
2249 TRACE("[Thread %2u]: emit header %u\n",
2255 if (!t->n_headers_out) {
2256 ho = &t->headers_out[0];
2258 ho->ptr0 = hi->ptr0;
2262 ho_nbytes = n_bytes;
2269 ho_nbytes = ho->n_bytes;
2273 if (ho_ptr + ho_nbytes == hi_ptr) {
2274 ho_nbytes += n_bytes;
2276 ho->n_bytes = ho_nbytes;
2279 ho->ptr0 = hi->ptr0;
2283 ho_nbytes = n_bytes;
2289 ho->n_bytes = ho_nbytes;
2290 t->n_headers_out = n_headers_out;
2294 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2296 __instr_hdr_emit_exec(p, 1);
2303 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2305 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2308 __instr_hdr_emit_exec(p, 1);
2313 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2315 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2318 __instr_hdr_emit_exec(p, 2);
2323 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2325 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2328 __instr_hdr_emit_exec(p, 3);
2333 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2335 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2338 __instr_hdr_emit_exec(p, 4);
2343 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2345 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2348 __instr_hdr_emit_exec(p, 5);
2353 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2355 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2358 __instr_hdr_emit_exec(p, 6);
2363 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2365 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2368 __instr_hdr_emit_exec(p, 7);
2373 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2375 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2378 __instr_hdr_emit_exec(p, 8);
2386 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2387 struct action *action __rte_unused,
2390 struct instruction *instr,
2391 struct instruction_data *data __rte_unused)
2395 CHECK(n_tokens == 2, EINVAL);
2397 h = header_parse(p, tokens[1]);
2400 instr->type = INSTR_HDR_VALIDATE;
2401 instr->valid.header_id = h->id;
2406 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2408 struct thread *t = &p->threads[p->thread_id];
2409 struct instruction *ip = t->ip;
2410 uint32_t header_id = ip->valid.header_id;
2412 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2415 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2425 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2426 struct action *action __rte_unused,
2429 struct instruction *instr,
2430 struct instruction_data *data __rte_unused)
2434 CHECK(n_tokens == 2, EINVAL);
2436 h = header_parse(p, tokens[1]);
2439 instr->type = INSTR_HDR_INVALIDATE;
2440 instr->valid.header_id = h->id;
2445 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2447 struct thread *t = &p->threads[p->thread_id];
2448 struct instruction *ip = t->ip;
2449 uint32_t header_id = ip->valid.header_id;
2451 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2454 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2464 instr_mov_translate(struct rte_swx_pipeline *p,
2465 struct action *action,
2468 struct instruction *instr,
2469 struct instruction_data *data __rte_unused)
2471 char *dst = tokens[1], *src = tokens[2];
2472 struct field *fdst, *fsrc;
2473 uint32_t dst_struct_id, src_struct_id, src_val;
2475 CHECK(n_tokens == 3, EINVAL);
2477 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2478 CHECK(fdst, EINVAL);
2481 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2483 instr->type = INSTR_MOV;
2484 if ((dst[0] == 'h' && src[0] != 'h') ||
2485 (dst[0] != 'h' && src[0] == 'h'))
2486 instr->type = INSTR_MOV_S;
2488 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2489 instr->mov.dst.n_bits = fdst->n_bits;
2490 instr->mov.dst.offset = fdst->offset / 8;
2491 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2492 instr->mov.src.n_bits = fsrc->n_bits;
2493 instr->mov.src.offset = fsrc->offset / 8;
2498 src_val = strtoul(src, &src, 0);
2499 CHECK(!src[0], EINVAL);
2502 src_val = htonl(src_val);
2504 instr->type = INSTR_MOV_I;
2505 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2506 instr->mov.dst.n_bits = fdst->n_bits;
2507 instr->mov.dst.offset = fdst->offset / 8;
2508 instr->mov.src_val = (uint32_t)src_val;
2513 instr_mov_exec(struct rte_swx_pipeline *p)
2515 struct thread *t = &p->threads[p->thread_id];
2516 struct instruction *ip = t->ip;
2518 TRACE("[Thread %2u] mov\n",
2528 instr_mov_s_exec(struct rte_swx_pipeline *p)
2530 struct thread *t = &p->threads[p->thread_id];
2531 struct instruction *ip = t->ip;
2533 TRACE("[Thread %2u] mov (s)\n",
2543 instr_mov_i_exec(struct rte_swx_pipeline *p)
2545 struct thread *t = &p->threads[p->thread_id];
2546 struct instruction *ip = t->ip;
2548 TRACE("[Thread %2u] mov m.f %x\n",
2562 instr_dma_translate(struct rte_swx_pipeline *p,
2563 struct action *action,
2566 struct instruction *instr,
2567 struct instruction_data *data __rte_unused)
2569 char *dst = tokens[1];
2570 char *src = tokens[2];
2574 CHECK(action, EINVAL);
2575 CHECK(n_tokens == 3, EINVAL);
2577 h = header_parse(p, dst);
2580 tf = action_field_parse(action, src);
2583 instr->type = INSTR_DMA_HT;
2584 instr->dma.dst.header_id[0] = h->id;
2585 instr->dma.dst.struct_id[0] = h->struct_id;
2586 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2587 instr->dma.src.offset[0] = tf->offset / 8;
2593 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2596 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2598 struct thread *t = &p->threads[p->thread_id];
2599 struct instruction *ip = t->ip;
2600 uint8_t *action_data = t->structs[0];
2601 uint64_t valid_headers = t->valid_headers;
2604 for (i = 0; i < n_dma; i++) {
2605 uint32_t header_id = ip->dma.dst.header_id[i];
2606 uint32_t struct_id = ip->dma.dst.struct_id[i];
2607 uint32_t offset = ip->dma.src.offset[i];
2608 uint32_t n_bytes = ip->dma.n_bytes[i];
2610 struct header_runtime *h = &t->headers[header_id];
2611 uint8_t *h_ptr0 = h->ptr0;
2612 uint8_t *h_ptr = t->structs[struct_id];
2614 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2616 void *src = &action_data[offset];
2618 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2621 memcpy(dst, src, n_bytes);
2622 t->structs[struct_id] = dst;
2623 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2626 t->valid_headers = valid_headers;
2630 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2632 __instr_dma_ht_exec(p, 1);
2639 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2641 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2644 __instr_dma_ht_exec(p, 2);
2651 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2653 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2656 __instr_dma_ht_exec(p, 3);
2663 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2665 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2668 __instr_dma_ht_exec(p, 4);
2675 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2677 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2680 __instr_dma_ht_exec(p, 5);
2687 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2689 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2692 __instr_dma_ht_exec(p, 6);
2699 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2701 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2704 __instr_dma_ht_exec(p, 7);
2711 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2713 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2716 __instr_dma_ht_exec(p, 8);
2722 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
2725 instr_translate(struct rte_swx_pipeline *p,
2726 struct action *action,
2728 struct instruction *instr,
2729 struct instruction_data *data)
2731 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
2732 int n_tokens = 0, tpos = 0;
2734 /* Parse the instruction string into tokens. */
2738 token = strtok_r(string, " \t\v", &string);
2742 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
2744 tokens[n_tokens] = token;
2748 CHECK(n_tokens, EINVAL);
2750 /* Handle the optional instruction label. */
2751 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
2752 strcpy(data->label, tokens[0]);
2755 CHECK(n_tokens - tpos, EINVAL);
2758 /* Identify the instruction type. */
2759 if (!strcmp(tokens[tpos], "rx"))
2760 return instr_rx_translate(p,
2767 if (!strcmp(tokens[tpos], "tx"))
2768 return instr_tx_translate(p,
2775 if (!strcmp(tokens[tpos], "extract"))
2776 return instr_hdr_extract_translate(p,
2783 if (!strcmp(tokens[tpos], "emit"))
2784 return instr_hdr_emit_translate(p,
2791 if (!strcmp(tokens[tpos], "validate"))
2792 return instr_hdr_validate_translate(p,
2799 if (!strcmp(tokens[tpos], "invalidate"))
2800 return instr_hdr_invalidate_translate(p,
2807 if (!strcmp(tokens[tpos], "mov"))
2808 return instr_mov_translate(p,
2815 if (!strcmp(tokens[tpos], "dma"))
2816 return instr_dma_translate(p,
2827 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
2829 uint32_t count = 0, i;
2834 for (i = 0; i < n; i++)
2835 if (!strcmp(label, data[i].jmp_label))
2842 instr_label_check(struct instruction_data *instruction_data,
2843 uint32_t n_instructions)
2847 /* Check that all instruction labels are unique. */
2848 for (i = 0; i < n_instructions; i++) {
2849 struct instruction_data *data = &instruction_data[i];
2850 char *label = data->label;
2856 for (j = i + 1; j < n_instructions; j++)
2857 CHECK(strcmp(label, data[j].label), EINVAL);
2860 /* Get users for each instruction label. */
2861 for (i = 0; i < n_instructions; i++) {
2862 struct instruction_data *data = &instruction_data[i];
2863 char *label = data->label;
2865 data->n_users = label_is_used(instruction_data,
2874 instruction_config(struct rte_swx_pipeline *p,
2876 const char **instructions,
2877 uint32_t n_instructions)
2879 struct instruction *instr = NULL;
2880 struct instruction_data *data = NULL;
2881 char *string = NULL;
2885 CHECK(n_instructions, EINVAL);
2886 CHECK(instructions, EINVAL);
2887 for (i = 0; i < n_instructions; i++)
2888 CHECK(instructions[i], EINVAL);
2890 /* Memory allocation. */
2891 instr = calloc(n_instructions, sizeof(struct instruction));
2897 data = calloc(n_instructions, sizeof(struct instruction_data));
2903 for (i = 0; i < n_instructions; i++) {
2904 string = strdup(instructions[i]);
2910 err = instr_translate(p, a, string, &instr[i], &data[i]);
2917 err = instr_label_check(data, n_instructions);
2924 a->instructions = instr;
2925 a->n_instructions = n_instructions;
2927 p->instructions = instr;
2928 p->n_instructions = n_instructions;
2940 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
2942 static instr_exec_t instruction_table[] = {
2943 [INSTR_RX] = instr_rx_exec,
2944 [INSTR_TX] = instr_tx_exec,
2946 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
2947 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
2948 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
2949 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
2950 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
2951 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
2952 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
2953 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
2955 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
2956 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
2957 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
2958 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
2959 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
2960 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
2961 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
2962 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
2963 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
2965 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
2966 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
2968 [INSTR_MOV] = instr_mov_exec,
2969 [INSTR_MOV_S] = instr_mov_s_exec,
2970 [INSTR_MOV_I] = instr_mov_i_exec,
2972 [INSTR_DMA_HT] = instr_dma_ht_exec,
2973 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
2974 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
2975 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
2976 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
2977 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
2978 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
2979 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
2983 instr_exec(struct rte_swx_pipeline *p)
2985 struct thread *t = &p->threads[p->thread_id];
2986 struct instruction *ip = t->ip;
2987 instr_exec_t instr = instruction_table[ip->type];
2995 static struct action *
2996 action_find(struct rte_swx_pipeline *p, const char *name)
2998 struct action *elem;
3003 TAILQ_FOREACH(elem, &p->actions, node)
3004 if (strcmp(elem->name, name) == 0)
3010 static struct field *
3011 action_field_find(struct action *a, const char *name)
3013 return a->st ? struct_type_field_find(a->st, name) : NULL;
3016 static struct field *
3017 action_field_parse(struct action *action, const char *name)
3019 if (name[0] != 't' || name[1] != '.')
3022 return action_field_find(action, &name[2]);
3026 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
3028 const char *args_struct_type_name,
3029 const char **instructions,
3030 uint32_t n_instructions)
3032 struct struct_type *args_struct_type;
3038 CHECK_NAME(name, EINVAL);
3039 CHECK(!action_find(p, name), EEXIST);
3041 if (args_struct_type_name) {
3042 CHECK_NAME(args_struct_type_name, EINVAL);
3043 args_struct_type = struct_type_find(p, args_struct_type_name);
3044 CHECK(args_struct_type, EINVAL);
3046 args_struct_type = NULL;
3049 /* Node allocation. */
3050 a = calloc(1, sizeof(struct action));
3053 /* Node initialization. */
3054 strcpy(a->name, name);
3055 a->st = args_struct_type;
3056 a->id = p->n_actions;
3058 /* Instruction translation. */
3059 err = instruction_config(p, a, instructions, n_instructions);
3065 /* Node add to tailq. */
3066 TAILQ_INSERT_TAIL(&p->actions, a, node);
3073 action_build(struct rte_swx_pipeline *p)
3075 struct action *action;
3077 p->action_instructions = calloc(p->n_actions,
3078 sizeof(struct instruction *));
3079 CHECK(p->action_instructions, ENOMEM);
3081 TAILQ_FOREACH(action, &p->actions, node)
3082 p->action_instructions[action->id] = action->instructions;
3088 action_build_free(struct rte_swx_pipeline *p)
3090 free(p->action_instructions);
3091 p->action_instructions = NULL;
3095 action_free(struct rte_swx_pipeline *p)
3097 action_build_free(p);
3100 struct action *action;
3102 action = TAILQ_FIRST(&p->actions);
3106 TAILQ_REMOVE(&p->actions, action, node);
3107 free(action->instructions);
3115 static struct table_type *
3116 table_type_find(struct rte_swx_pipeline *p, const char *name)
3118 struct table_type *elem;
3120 TAILQ_FOREACH(elem, &p->table_types, node)
3121 if (strcmp(elem->name, name) == 0)
3127 static struct table_type *
3128 table_type_resolve(struct rte_swx_pipeline *p,
3129 const char *recommended_type_name,
3130 enum rte_swx_table_match_type match_type)
3132 struct table_type *elem;
3134 /* Only consider the recommended type if the match type is correct. */
3135 if (recommended_type_name)
3136 TAILQ_FOREACH(elem, &p->table_types, node)
3137 if (!strcmp(elem->name, recommended_type_name) &&
3138 (elem->match_type == match_type))
3141 /* Ignore the recommended type and get the first element with this match
3144 TAILQ_FOREACH(elem, &p->table_types, node)
3145 if (elem->match_type == match_type)
3151 static struct table *
3152 table_find(struct rte_swx_pipeline *p, const char *name)
3156 TAILQ_FOREACH(elem, &p->tables, node)
3157 if (strcmp(elem->name, name) == 0)
3163 static struct table *
3164 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
3166 struct table *table = NULL;
3168 TAILQ_FOREACH(table, &p->tables, node)
3169 if (table->id == id)
3176 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
3178 enum rte_swx_table_match_type match_type,
3179 struct rte_swx_table_ops *ops)
3181 struct table_type *elem;
3185 CHECK_NAME(name, EINVAL);
3186 CHECK(!table_type_find(p, name), EEXIST);
3189 CHECK(ops->create, EINVAL);
3190 CHECK(ops->lkp, EINVAL);
3191 CHECK(ops->free, EINVAL);
3193 /* Node allocation. */
3194 elem = calloc(1, sizeof(struct table_type));
3195 CHECK(elem, ENOMEM);
3197 /* Node initialization. */
3198 strcpy(elem->name, name);
3199 elem->match_type = match_type;
3200 memcpy(&elem->ops, ops, sizeof(*ops));
3202 /* Node add to tailq. */
3203 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
3208 static enum rte_swx_table_match_type
3209 table_match_type_resolve(struct rte_swx_match_field_params *fields,
3214 for (i = 0; i < n_fields; i++)
3215 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
3219 return RTE_SWX_TABLE_MATCH_EXACT;
3221 if ((i == n_fields - 1) &&
3222 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
3223 return RTE_SWX_TABLE_MATCH_LPM;
3225 return RTE_SWX_TABLE_MATCH_WILDCARD;
3229 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
3231 struct rte_swx_pipeline_table_params *params,
3232 const char *recommended_table_type_name,
3236 struct table_type *type;
3238 struct action *default_action;
3239 struct header *header = NULL;
3241 uint32_t offset_prev = 0, action_data_size_max = 0, i;
3245 CHECK_NAME(name, EINVAL);
3246 CHECK(!table_find(p, name), EEXIST);
3248 CHECK(params, EINVAL);
3251 CHECK(!params->n_fields || params->fields, EINVAL);
3252 for (i = 0; i < params->n_fields; i++) {
3253 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3255 struct field *hf, *mf;
3258 CHECK_NAME(field->name, EINVAL);
3260 hf = header_field_parse(p, field->name, &h);
3261 mf = metadata_field_parse(p, field->name);
3262 CHECK(hf || mf, EINVAL);
3264 offset = hf ? hf->offset : mf->offset;
3267 is_header = hf ? 1 : 0;
3268 header = hf ? h : NULL;
3269 offset_prev = offset;
3274 CHECK((is_header && hf && (h->id == header->id)) ||
3275 (!is_header && mf), EINVAL);
3277 CHECK(offset > offset_prev, EINVAL);
3278 offset_prev = offset;
3281 /* Action checks. */
3282 CHECK(params->n_actions, EINVAL);
3283 CHECK(params->action_names, EINVAL);
3284 for (i = 0; i < params->n_actions; i++) {
3285 const char *action_name = params->action_names[i];
3287 uint32_t action_data_size;
3289 CHECK(action_name, EINVAL);
3291 a = action_find(p, action_name);
3294 action_data_size = a->st ? a->st->n_bits / 8 : 0;
3295 if (action_data_size > action_data_size_max)
3296 action_data_size_max = action_data_size;
3299 CHECK(params->default_action_name, EINVAL);
3300 for (i = 0; i < p->n_actions; i++)
3301 if (!strcmp(params->action_names[i],
3302 params->default_action_name))
3304 CHECK(i < params->n_actions, EINVAL);
3305 default_action = action_find(p, params->default_action_name);
3306 CHECK((default_action->st && params->default_action_data) ||
3307 !params->default_action_data, EINVAL);
3309 /* Table type checks. */
3310 if (params->n_fields) {
3311 enum rte_swx_table_match_type match_type;
3313 match_type = table_match_type_resolve(params->fields,
3315 type = table_type_resolve(p,
3316 recommended_table_type_name,
3318 CHECK(type, EINVAL);
3323 /* Memory allocation. */
3324 t = calloc(1, sizeof(struct table));
3327 t->fields = calloc(params->n_fields, sizeof(struct match_field));
3333 t->actions = calloc(params->n_actions, sizeof(struct action *));
3340 if (action_data_size_max) {
3341 t->default_action_data = calloc(1, action_data_size_max);
3342 if (!t->default_action_data) {
3350 /* Node initialization. */
3351 strcpy(t->name, name);
3352 if (args && args[0])
3353 strcpy(t->args, args);
3356 for (i = 0; i < params->n_fields; i++) {
3357 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3358 struct match_field *f = &t->fields[i];
3360 f->match_type = field->match_type;
3361 f->field = is_header ?
3362 header_field_parse(p, field->name, NULL) :
3363 metadata_field_parse(p, field->name);
3365 t->n_fields = params->n_fields;
3366 t->is_header = is_header;
3369 for (i = 0; i < params->n_actions; i++)
3370 t->actions[i] = action_find(p, params->action_names[i]);
3371 t->default_action = default_action;
3372 if (default_action->st)
3373 memcpy(t->default_action_data,
3374 params->default_action_data,
3375 default_action->st->n_bits / 8);
3376 t->n_actions = params->n_actions;
3377 t->default_action_is_const = params->default_action_is_const;
3378 t->action_data_size_max = action_data_size_max;
3381 t->id = p->n_tables;
3383 /* Node add to tailq. */
3384 TAILQ_INSERT_TAIL(&p->tables, t, node);
3390 static struct rte_swx_table_params *
3391 table_params_get(struct table *table)
3393 struct rte_swx_table_params *params;
3394 struct field *first, *last;
3396 uint32_t key_size, key_offset, action_data_size, i;
3398 /* Memory allocation. */
3399 params = calloc(1, sizeof(struct rte_swx_table_params));
3403 /* Key offset and size. */
3404 first = table->fields[0].field;
3405 last = table->fields[table->n_fields - 1].field;
3406 key_offset = first->offset / 8;
3407 key_size = (last->offset + last->n_bits - first->offset) / 8;
3409 /* Memory allocation. */
3410 key_mask = calloc(1, key_size);
3417 for (i = 0; i < table->n_fields; i++) {
3418 struct field *f = table->fields[i].field;
3419 uint32_t start = (f->offset - first->offset) / 8;
3420 size_t size = f->n_bits / 8;
3422 memset(&key_mask[start], 0xFF, size);
3425 /* Action data size. */
3426 action_data_size = 0;
3427 for (i = 0; i < table->n_actions; i++) {
3428 struct action *action = table->actions[i];
3429 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
3431 if (ads > action_data_size)
3432 action_data_size = ads;
3436 params->match_type = table->type->match_type;
3437 params->key_size = key_size;
3438 params->key_offset = key_offset;
3439 params->key_mask0 = key_mask;
3440 params->action_data_size = action_data_size;
3441 params->n_keys_max = table->size;
3447 table_params_free(struct rte_swx_table_params *params)
3452 free(params->key_mask0);
3457 table_state_build(struct rte_swx_pipeline *p)
3459 struct table *table;
3461 p->table_state = calloc(p->n_tables,
3462 sizeof(struct rte_swx_table_state));
3463 CHECK(p->table_state, ENOMEM);
3465 TAILQ_FOREACH(table, &p->tables, node) {
3466 struct rte_swx_table_state *ts = &p->table_state[table->id];
3469 struct rte_swx_table_params *params;
3472 params = table_params_get(table);
3473 CHECK(params, ENOMEM);
3475 ts->obj = table->type->ops.create(params,
3480 table_params_free(params);
3481 CHECK(ts->obj, ENODEV);
3484 /* ts->default_action_data. */
3485 if (table->action_data_size_max) {
3486 ts->default_action_data =
3487 malloc(table->action_data_size_max);
3488 CHECK(ts->default_action_data, ENOMEM);
3490 memcpy(ts->default_action_data,
3491 table->default_action_data,
3492 table->action_data_size_max);
3495 /* ts->default_action_id. */
3496 ts->default_action_id = table->default_action->id;
3503 table_state_build_free(struct rte_swx_pipeline *p)
3507 if (!p->table_state)
3510 for (i = 0; i < p->n_tables; i++) {
3511 struct rte_swx_table_state *ts = &p->table_state[i];
3512 struct table *table = table_find_by_id(p, i);
3515 if (table->type && ts->obj)
3516 table->type->ops.free(ts->obj);
3518 /* ts->default_action_data. */
3519 free(ts->default_action_data);
3522 free(p->table_state);
3523 p->table_state = NULL;
3527 table_state_free(struct rte_swx_pipeline *p)
3529 table_state_build_free(p);
3533 table_stub_lkp(void *table __rte_unused,
3534 void *mailbox __rte_unused,
3535 uint8_t **key __rte_unused,
3536 uint64_t *action_id __rte_unused,
3537 uint8_t **action_data __rte_unused,
3541 return 1; /* DONE. */
3545 table_build(struct rte_swx_pipeline *p)
3549 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3550 struct thread *t = &p->threads[i];
3551 struct table *table;
3553 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
3554 CHECK(t->tables, ENOMEM);
3556 TAILQ_FOREACH(table, &p->tables, node) {
3557 struct table_runtime *r = &t->tables[table->id];
3562 size = table->type->ops.mailbox_size_get();
3565 r->func = table->type->ops.lkp;
3569 r->mailbox = calloc(1, size);
3570 CHECK(r->mailbox, ENOMEM);
3574 r->key = table->is_header ?
3575 &t->structs[table->header->struct_id] :
3576 &t->structs[p->metadata_struct_id];
3578 r->func = table_stub_lkp;
3587 table_build_free(struct rte_swx_pipeline *p)
3591 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3592 struct thread *t = &p->threads[i];
3598 for (j = 0; j < p->n_tables; j++) {
3599 struct table_runtime *r = &t->tables[j];
3610 table_free(struct rte_swx_pipeline *p)
3612 table_build_free(p);
3618 elem = TAILQ_FIRST(&p->tables);
3622 TAILQ_REMOVE(&p->tables, elem, node);
3624 free(elem->actions);
3625 free(elem->default_action_data);
3631 struct table_type *elem;
3633 elem = TAILQ_FIRST(&p->table_types);
3637 TAILQ_REMOVE(&p->table_types, elem, node);
3646 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
3648 struct rte_swx_pipeline *pipeline;
3650 /* Check input parameters. */
3653 /* Memory allocation. */
3654 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
3655 CHECK(pipeline, ENOMEM);
3657 /* Initialization. */
3658 TAILQ_INIT(&pipeline->struct_types);
3659 TAILQ_INIT(&pipeline->port_in_types);
3660 TAILQ_INIT(&pipeline->ports_in);
3661 TAILQ_INIT(&pipeline->port_out_types);
3662 TAILQ_INIT(&pipeline->ports_out);
3663 TAILQ_INIT(&pipeline->extern_types);
3664 TAILQ_INIT(&pipeline->extern_objs);
3665 TAILQ_INIT(&pipeline->extern_funcs);
3666 TAILQ_INIT(&pipeline->headers);
3667 TAILQ_INIT(&pipeline->actions);
3668 TAILQ_INIT(&pipeline->table_types);
3669 TAILQ_INIT(&pipeline->tables);
3671 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
3672 pipeline->numa_node = numa_node;
3679 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
3684 free(p->instructions);
3686 table_state_free(p);
3691 extern_func_free(p);
3701 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
3702 const char **instructions,
3703 uint32_t n_instructions)
3708 err = instruction_config(p, NULL, instructions, n_instructions);
3712 /* Thread instruction pointer reset. */
3713 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3714 struct thread *t = &p->threads[i];
3716 thread_ip_reset(p, t);
3723 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
3728 CHECK(p->build_done == 0, EEXIST);
3730 status = port_in_build(p);
3734 status = port_out_build(p);
3738 status = struct_build(p);
3742 status = extern_obj_build(p);
3746 status = extern_func_build(p);
3750 status = header_build(p);
3754 status = metadata_build(p);
3758 status = action_build(p);
3762 status = table_build(p);
3766 status = table_state_build(p);
3774 table_state_build_free(p);
3775 table_build_free(p);
3776 action_build_free(p);
3777 metadata_build_free(p);
3778 header_build_free(p);
3779 extern_func_build_free(p);
3780 extern_obj_build_free(p);
3781 port_out_build_free(p);
3782 port_in_build_free(p);
3783 struct_build_free(p);
3789 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
3793 for (i = 0; i < n_instructions; i++)
3801 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
3802 struct rte_swx_table_state **table_state)
3804 if (!p || !table_state || !p->build_done)
3807 *table_state = p->table_state;
3812 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
3813 struct rte_swx_table_state *table_state)
3815 if (!p || !table_state || !p->build_done)
3818 p->table_state = table_state;