1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <rte_common.h>
11 #include <rte_prefetch.h>
13 #include "rte_swx_pipeline.h"
14 #include "rte_swx_ctl.h"
16 #define CHECK(condition, err_code) \
22 #define CHECK_NAME(name, err_code) \
23 CHECK((name) && (name)[0], err_code)
30 #define TRACE(...) printf(__VA_ARGS__)
39 char name[RTE_SWX_NAME_SIZE];
45 TAILQ_ENTRY(struct_type) node;
46 char name[RTE_SWX_NAME_SIZE];
52 TAILQ_HEAD(struct_type_tailq, struct_type);
58 TAILQ_ENTRY(port_in_type) node;
59 char name[RTE_SWX_NAME_SIZE];
60 struct rte_swx_port_in_ops ops;
63 TAILQ_HEAD(port_in_type_tailq, port_in_type);
66 TAILQ_ENTRY(port_in) node;
67 struct port_in_type *type;
72 TAILQ_HEAD(port_in_tailq, port_in);
74 struct port_in_runtime {
75 rte_swx_port_in_pkt_rx_t pkt_rx;
82 struct port_out_type {
83 TAILQ_ENTRY(port_out_type) node;
84 char name[RTE_SWX_NAME_SIZE];
85 struct rte_swx_port_out_ops ops;
88 TAILQ_HEAD(port_out_type_tailq, port_out_type);
91 TAILQ_ENTRY(port_out) node;
92 struct port_out_type *type;
97 TAILQ_HEAD(port_out_tailq, port_out);
99 struct port_out_runtime {
100 rte_swx_port_out_pkt_tx_t pkt_tx;
101 rte_swx_port_out_flush_t flush;
108 struct extern_type_member_func {
109 TAILQ_ENTRY(extern_type_member_func) node;
110 char name[RTE_SWX_NAME_SIZE];
111 rte_swx_extern_type_member_func_t func;
115 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
118 TAILQ_ENTRY(extern_type) node;
119 char name[RTE_SWX_NAME_SIZE];
120 struct struct_type *mailbox_struct_type;
121 rte_swx_extern_type_constructor_t constructor;
122 rte_swx_extern_type_destructor_t destructor;
123 struct extern_type_member_func_tailq funcs;
127 TAILQ_HEAD(extern_type_tailq, extern_type);
130 TAILQ_ENTRY(extern_obj) node;
131 char name[RTE_SWX_NAME_SIZE];
132 struct extern_type *type;
138 TAILQ_HEAD(extern_obj_tailq, extern_obj);
140 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
141 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
144 struct extern_obj_runtime {
147 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
154 TAILQ_ENTRY(extern_func) node;
155 char name[RTE_SWX_NAME_SIZE];
156 struct struct_type *mailbox_struct_type;
157 rte_swx_extern_func_t func;
162 TAILQ_HEAD(extern_func_tailq, extern_func);
164 struct extern_func_runtime {
166 rte_swx_extern_func_t func;
173 TAILQ_ENTRY(header) node;
174 char name[RTE_SWX_NAME_SIZE];
175 struct struct_type *st;
180 TAILQ_HEAD(header_tailq, header);
182 struct header_runtime {
186 struct header_out_runtime {
196 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
197 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
198 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
199 * when transferred to packet meta-data and in NBO when transferred to packet
203 /* Notation conventions:
204 * -Header field: H = h.header.field (dst/src)
205 * -Meta-data field: M = m.field (dst/src)
206 * -Extern object mailbox field: E = e.field (dst/src)
207 * -Extern function mailbox field: F = f.field (dst/src)
208 * -Table action data field: T = t.field (src only)
209 * -Immediate value: I = 32-bit unsigned value (src only)
212 enum instruction_type {
219 /* extract h.header */
240 /* validate h.header */
243 /* invalidate h.header */
244 INSTR_HDR_INVALIDATE,
255 uint8_t header_id[8];
256 uint8_t struct_id[8];
261 struct instr_hdr_validity {
266 enum instruction_type type;
269 struct instr_hdr_validity valid;
273 struct instruction_data {
274 char label[RTE_SWX_NAME_SIZE];
275 char jmp_label[RTE_SWX_NAME_SIZE];
276 uint32_t n_users; /* user = jmp instruction to this instruction. */
284 TAILQ_ENTRY(action) node;
285 char name[RTE_SWX_NAME_SIZE];
286 struct struct_type *st;
287 struct instruction *instructions;
288 uint32_t n_instructions;
292 TAILQ_HEAD(action_tailq, action);
298 TAILQ_ENTRY(table_type) node;
299 char name[RTE_SWX_NAME_SIZE];
300 enum rte_swx_table_match_type match_type;
301 struct rte_swx_table_ops ops;
304 TAILQ_HEAD(table_type_tailq, table_type);
307 enum rte_swx_table_match_type match_type;
312 TAILQ_ENTRY(table) node;
313 char name[RTE_SWX_NAME_SIZE];
314 char args[RTE_SWX_NAME_SIZE];
315 struct table_type *type; /* NULL when n_fields == 0. */
318 struct match_field *fields;
320 int is_header; /* Only valid when n_fields > 0. */
321 struct header *header; /* Only valid when n_fields > 0. */
324 struct action **actions;
325 struct action *default_action;
326 uint8_t *default_action_data;
328 int default_action_is_const;
329 uint32_t action_data_size_max;
335 TAILQ_HEAD(table_tailq, table);
337 struct table_runtime {
338 rte_swx_table_lookup_t func;
348 struct rte_swx_pkt pkt;
354 /* Packet headers. */
355 struct header_runtime *headers; /* Extracted or generated headers. */
356 struct header_out_runtime *headers_out; /* Emitted headers. */
357 uint8_t *header_storage;
358 uint8_t *header_out_storage;
359 uint64_t valid_headers;
360 uint32_t n_headers_out;
362 /* Packet meta-data. */
366 struct table_runtime *tables;
367 struct rte_swx_table_state *table_state;
369 int hit; /* 0 = Miss, 1 = Hit. */
371 /* Extern objects and functions. */
372 struct extern_obj_runtime *extern_objs;
373 struct extern_func_runtime *extern_funcs;
376 struct instruction *ip;
377 struct instruction *ret;
380 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
381 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
382 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
384 #define METADATA_READ(thread, offset, n_bits) \
386 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
387 uint64_t m64 = *m64_ptr; \
388 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
392 #define METADATA_WRITE(thread, offset, n_bits, value) \
394 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
395 uint64_t m64 = *m64_ptr; \
396 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
398 uint64_t m_new = value; \
400 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
403 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
404 #define RTE_SWX_PIPELINE_THREADS_MAX 16
407 struct rte_swx_pipeline {
408 struct struct_type_tailq struct_types;
409 struct port_in_type_tailq port_in_types;
410 struct port_in_tailq ports_in;
411 struct port_out_type_tailq port_out_types;
412 struct port_out_tailq ports_out;
413 struct extern_type_tailq extern_types;
414 struct extern_obj_tailq extern_objs;
415 struct extern_func_tailq extern_funcs;
416 struct header_tailq headers;
417 struct struct_type *metadata_st;
418 uint32_t metadata_struct_id;
419 struct action_tailq actions;
420 struct table_type_tailq table_types;
421 struct table_tailq tables;
423 struct port_in_runtime *in;
424 struct port_out_runtime *out;
425 struct instruction **action_instructions;
426 struct rte_swx_table_state *table_state;
427 struct instruction *instructions;
428 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
432 uint32_t n_ports_out;
433 uint32_t n_extern_objs;
434 uint32_t n_extern_funcs;
440 uint32_t n_instructions;
448 static struct struct_type *
449 struct_type_find(struct rte_swx_pipeline *p, const char *name)
451 struct struct_type *elem;
453 TAILQ_FOREACH(elem, &p->struct_types, node)
454 if (strcmp(elem->name, name) == 0)
460 static struct field *
461 struct_type_field_find(struct struct_type *st, const char *name)
465 for (i = 0; i < st->n_fields; i++) {
466 struct field *f = &st->fields[i];
468 if (strcmp(f->name, name) == 0)
476 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
478 struct rte_swx_field_params *fields,
481 struct struct_type *st;
485 CHECK_NAME(name, EINVAL);
486 CHECK(fields, EINVAL);
487 CHECK(n_fields, EINVAL);
489 for (i = 0; i < n_fields; i++) {
490 struct rte_swx_field_params *f = &fields[i];
493 CHECK_NAME(f->name, EINVAL);
494 CHECK(f->n_bits, EINVAL);
495 CHECK(f->n_bits <= 64, EINVAL);
496 CHECK((f->n_bits & 7) == 0, EINVAL);
498 for (j = 0; j < i; j++) {
499 struct rte_swx_field_params *f_prev = &fields[j];
501 CHECK(strcmp(f->name, f_prev->name), EINVAL);
505 CHECK(!struct_type_find(p, name), EEXIST);
507 /* Node allocation. */
508 st = calloc(1, sizeof(struct struct_type));
511 st->fields = calloc(n_fields, sizeof(struct field));
517 /* Node initialization. */
518 strcpy(st->name, name);
519 for (i = 0; i < n_fields; i++) {
520 struct field *dst = &st->fields[i];
521 struct rte_swx_field_params *src = &fields[i];
523 strcpy(dst->name, src->name);
524 dst->n_bits = src->n_bits;
525 dst->offset = st->n_bits;
527 st->n_bits += src->n_bits;
529 st->n_fields = n_fields;
531 /* Node add to tailq. */
532 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
538 struct_build(struct rte_swx_pipeline *p)
542 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
543 struct thread *t = &p->threads[i];
545 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
546 CHECK(t->structs, ENOMEM);
553 struct_build_free(struct rte_swx_pipeline *p)
557 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
558 struct thread *t = &p->threads[i];
566 struct_free(struct rte_swx_pipeline *p)
568 struct_build_free(p);
572 struct struct_type *elem;
574 elem = TAILQ_FIRST(&p->struct_types);
578 TAILQ_REMOVE(&p->struct_types, elem, node);
587 static struct port_in_type *
588 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
590 struct port_in_type *elem;
595 TAILQ_FOREACH(elem, &p->port_in_types, node)
596 if (strcmp(elem->name, name) == 0)
603 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
605 struct rte_swx_port_in_ops *ops)
607 struct port_in_type *elem;
610 CHECK_NAME(name, EINVAL);
612 CHECK(ops->create, EINVAL);
613 CHECK(ops->free, EINVAL);
614 CHECK(ops->pkt_rx, EINVAL);
615 CHECK(ops->stats_read, EINVAL);
617 CHECK(!port_in_type_find(p, name), EEXIST);
619 /* Node allocation. */
620 elem = calloc(1, sizeof(struct port_in_type));
623 /* Node initialization. */
624 strcpy(elem->name, name);
625 memcpy(&elem->ops, ops, sizeof(*ops));
627 /* Node add to tailq. */
628 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
633 static struct port_in *
634 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
636 struct port_in *port;
638 TAILQ_FOREACH(port, &p->ports_in, node)
639 if (port->id == port_id)
646 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
648 const char *port_type_name,
651 struct port_in_type *type = NULL;
652 struct port_in *port = NULL;
657 CHECK(!port_in_find(p, port_id), EINVAL);
659 CHECK_NAME(port_type_name, EINVAL);
660 type = port_in_type_find(p, port_type_name);
663 obj = type->ops.create(args);
666 /* Node allocation. */
667 port = calloc(1, sizeof(struct port_in));
670 /* Node initialization. */
675 /* Node add to tailq. */
676 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
677 if (p->n_ports_in < port_id + 1)
678 p->n_ports_in = port_id + 1;
684 port_in_build(struct rte_swx_pipeline *p)
686 struct port_in *port;
689 CHECK(p->n_ports_in, EINVAL);
690 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
692 for (i = 0; i < p->n_ports_in; i++)
693 CHECK(port_in_find(p, i), EINVAL);
695 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
696 CHECK(p->in, ENOMEM);
698 TAILQ_FOREACH(port, &p->ports_in, node) {
699 struct port_in_runtime *in = &p->in[port->id];
701 in->pkt_rx = port->type->ops.pkt_rx;
709 port_in_build_free(struct rte_swx_pipeline *p)
716 port_in_free(struct rte_swx_pipeline *p)
718 port_in_build_free(p);
722 struct port_in *port;
724 port = TAILQ_FIRST(&p->ports_in);
728 TAILQ_REMOVE(&p->ports_in, port, node);
729 port->type->ops.free(port->obj);
733 /* Input port types. */
735 struct port_in_type *elem;
737 elem = TAILQ_FIRST(&p->port_in_types);
741 TAILQ_REMOVE(&p->port_in_types, elem, node);
749 static struct port_out_type *
750 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
752 struct port_out_type *elem;
757 TAILQ_FOREACH(elem, &p->port_out_types, node)
758 if (!strcmp(elem->name, name))
765 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
767 struct rte_swx_port_out_ops *ops)
769 struct port_out_type *elem;
772 CHECK_NAME(name, EINVAL);
774 CHECK(ops->create, EINVAL);
775 CHECK(ops->free, EINVAL);
776 CHECK(ops->pkt_tx, EINVAL);
777 CHECK(ops->stats_read, EINVAL);
779 CHECK(!port_out_type_find(p, name), EEXIST);
781 /* Node allocation. */
782 elem = calloc(1, sizeof(struct port_out_type));
785 /* Node initialization. */
786 strcpy(elem->name, name);
787 memcpy(&elem->ops, ops, sizeof(*ops));
789 /* Node add to tailq. */
790 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
795 static struct port_out *
796 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
798 struct port_out *port;
800 TAILQ_FOREACH(port, &p->ports_out, node)
801 if (port->id == port_id)
808 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
810 const char *port_type_name,
813 struct port_out_type *type = NULL;
814 struct port_out *port = NULL;
819 CHECK(!port_out_find(p, port_id), EINVAL);
821 CHECK_NAME(port_type_name, EINVAL);
822 type = port_out_type_find(p, port_type_name);
825 obj = type->ops.create(args);
828 /* Node allocation. */
829 port = calloc(1, sizeof(struct port_out));
832 /* Node initialization. */
837 /* Node add to tailq. */
838 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
839 if (p->n_ports_out < port_id + 1)
840 p->n_ports_out = port_id + 1;
846 port_out_build(struct rte_swx_pipeline *p)
848 struct port_out *port;
851 CHECK(p->n_ports_out, EINVAL);
853 for (i = 0; i < p->n_ports_out; i++)
854 CHECK(port_out_find(p, i), EINVAL);
856 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
857 CHECK(p->out, ENOMEM);
859 TAILQ_FOREACH(port, &p->ports_out, node) {
860 struct port_out_runtime *out = &p->out[port->id];
862 out->pkt_tx = port->type->ops.pkt_tx;
863 out->flush = port->type->ops.flush;
864 out->obj = port->obj;
871 port_out_build_free(struct rte_swx_pipeline *p)
878 port_out_free(struct rte_swx_pipeline *p)
880 port_out_build_free(p);
884 struct port_out *port;
886 port = TAILQ_FIRST(&p->ports_out);
890 TAILQ_REMOVE(&p->ports_out, port, node);
891 port->type->ops.free(port->obj);
895 /* Output port types. */
897 struct port_out_type *elem;
899 elem = TAILQ_FIRST(&p->port_out_types);
903 TAILQ_REMOVE(&p->port_out_types, elem, node);
911 static struct extern_type *
912 extern_type_find(struct rte_swx_pipeline *p, const char *name)
914 struct extern_type *elem;
916 TAILQ_FOREACH(elem, &p->extern_types, node)
917 if (strcmp(elem->name, name) == 0)
923 static struct extern_type_member_func *
924 extern_type_member_func_find(struct extern_type *type, const char *name)
926 struct extern_type_member_func *elem;
928 TAILQ_FOREACH(elem, &type->funcs, node)
929 if (strcmp(elem->name, name) == 0)
935 static struct extern_obj *
936 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
938 struct extern_obj *elem;
940 TAILQ_FOREACH(elem, &p->extern_objs, node)
941 if (strcmp(elem->name, name) == 0)
948 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
950 const char *mailbox_struct_type_name,
951 rte_swx_extern_type_constructor_t constructor,
952 rte_swx_extern_type_destructor_t destructor)
954 struct extern_type *elem;
955 struct struct_type *mailbox_struct_type;
959 CHECK_NAME(name, EINVAL);
960 CHECK(!extern_type_find(p, name), EEXIST);
962 CHECK_NAME(mailbox_struct_type_name, EINVAL);
963 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
964 CHECK(mailbox_struct_type, EINVAL);
966 CHECK(constructor, EINVAL);
967 CHECK(destructor, EINVAL);
969 /* Node allocation. */
970 elem = calloc(1, sizeof(struct extern_type));
973 /* Node initialization. */
974 strcpy(elem->name, name);
975 elem->mailbox_struct_type = mailbox_struct_type;
976 elem->constructor = constructor;
977 elem->destructor = destructor;
978 TAILQ_INIT(&elem->funcs);
980 /* Node add to tailq. */
981 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
987 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
988 const char *extern_type_name,
990 rte_swx_extern_type_member_func_t member_func)
992 struct extern_type *type;
993 struct extern_type_member_func *type_member;
997 CHECK(extern_type_name, EINVAL);
998 type = extern_type_find(p, extern_type_name);
1000 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1002 CHECK(name, EINVAL);
1003 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1005 CHECK(member_func, EINVAL);
1007 /* Node allocation. */
1008 type_member = calloc(1, sizeof(struct extern_type_member_func));
1009 CHECK(type_member, ENOMEM);
1011 /* Node initialization. */
1012 strcpy(type_member->name, name);
1013 type_member->func = member_func;
1014 type_member->id = type->n_funcs;
1016 /* Node add to tailq. */
1017 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1024 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1025 const char *extern_type_name,
1029 struct extern_type *type;
1030 struct extern_obj *obj;
1035 CHECK_NAME(extern_type_name, EINVAL);
1036 type = extern_type_find(p, extern_type_name);
1037 CHECK(type, EINVAL);
1039 CHECK_NAME(name, EINVAL);
1040 CHECK(!extern_obj_find(p, name), EEXIST);
1042 /* Node allocation. */
1043 obj = calloc(1, sizeof(struct extern_obj));
1046 /* Object construction. */
1047 obj_handle = type->constructor(args);
1053 /* Node initialization. */
1054 strcpy(obj->name, name);
1056 obj->obj = obj_handle;
1057 obj->struct_id = p->n_structs;
1058 obj->id = p->n_extern_objs;
1060 /* Node add to tailq. */
1061 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1069 extern_obj_build(struct rte_swx_pipeline *p)
1073 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1074 struct thread *t = &p->threads[i];
1075 struct extern_obj *obj;
1077 t->extern_objs = calloc(p->n_extern_objs,
1078 sizeof(struct extern_obj_runtime));
1079 CHECK(t->extern_objs, ENOMEM);
1081 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1082 struct extern_obj_runtime *r =
1083 &t->extern_objs[obj->id];
1084 struct extern_type_member_func *func;
1085 uint32_t mailbox_size =
1086 obj->type->mailbox_struct_type->n_bits / 8;
1090 r->mailbox = calloc(1, mailbox_size);
1091 CHECK(r->mailbox, ENOMEM);
1093 TAILQ_FOREACH(func, &obj->type->funcs, node)
1094 r->funcs[func->id] = func->func;
1096 t->structs[obj->struct_id] = r->mailbox;
1104 extern_obj_build_free(struct rte_swx_pipeline *p)
1108 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1109 struct thread *t = &p->threads[i];
1112 if (!t->extern_objs)
1115 for (j = 0; j < p->n_extern_objs; j++) {
1116 struct extern_obj_runtime *r = &t->extern_objs[j];
1121 free(t->extern_objs);
1122 t->extern_objs = NULL;
1127 extern_obj_free(struct rte_swx_pipeline *p)
1129 extern_obj_build_free(p);
1131 /* Extern objects. */
1133 struct extern_obj *elem;
1135 elem = TAILQ_FIRST(&p->extern_objs);
1139 TAILQ_REMOVE(&p->extern_objs, elem, node);
1141 elem->type->destructor(elem->obj);
1147 struct extern_type *elem;
1149 elem = TAILQ_FIRST(&p->extern_types);
1153 TAILQ_REMOVE(&p->extern_types, elem, node);
1156 struct extern_type_member_func *func;
1158 func = TAILQ_FIRST(&elem->funcs);
1162 TAILQ_REMOVE(&elem->funcs, func, node);
1173 static struct extern_func *
1174 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1176 struct extern_func *elem;
1178 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1179 if (strcmp(elem->name, name) == 0)
1186 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1188 const char *mailbox_struct_type_name,
1189 rte_swx_extern_func_t func)
1191 struct extern_func *f;
1192 struct struct_type *mailbox_struct_type;
1196 CHECK_NAME(name, EINVAL);
1197 CHECK(!extern_func_find(p, name), EEXIST);
1199 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1200 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1201 CHECK(mailbox_struct_type, EINVAL);
1203 CHECK(func, EINVAL);
1205 /* Node allocation. */
1206 f = calloc(1, sizeof(struct extern_func));
1207 CHECK(func, ENOMEM);
1209 /* Node initialization. */
1210 strcpy(f->name, name);
1211 f->mailbox_struct_type = mailbox_struct_type;
1213 f->struct_id = p->n_structs;
1214 f->id = p->n_extern_funcs;
1216 /* Node add to tailq. */
1217 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1218 p->n_extern_funcs++;
1225 extern_func_build(struct rte_swx_pipeline *p)
1229 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1230 struct thread *t = &p->threads[i];
1231 struct extern_func *func;
1233 /* Memory allocation. */
1234 t->extern_funcs = calloc(p->n_extern_funcs,
1235 sizeof(struct extern_func_runtime));
1236 CHECK(t->extern_funcs, ENOMEM);
1238 /* Extern function. */
1239 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1240 struct extern_func_runtime *r =
1241 &t->extern_funcs[func->id];
1242 uint32_t mailbox_size =
1243 func->mailbox_struct_type->n_bits / 8;
1245 r->func = func->func;
1247 r->mailbox = calloc(1, mailbox_size);
1248 CHECK(r->mailbox, ENOMEM);
1250 t->structs[func->struct_id] = r->mailbox;
1258 extern_func_build_free(struct rte_swx_pipeline *p)
1262 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1263 struct thread *t = &p->threads[i];
1266 if (!t->extern_funcs)
1269 for (j = 0; j < p->n_extern_funcs; j++) {
1270 struct extern_func_runtime *r = &t->extern_funcs[j];
1275 free(t->extern_funcs);
1276 t->extern_funcs = NULL;
1281 extern_func_free(struct rte_swx_pipeline *p)
1283 extern_func_build_free(p);
1286 struct extern_func *elem;
1288 elem = TAILQ_FIRST(&p->extern_funcs);
1292 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1300 static struct header *
1301 header_find(struct rte_swx_pipeline *p, const char *name)
1303 struct header *elem;
1305 TAILQ_FOREACH(elem, &p->headers, node)
1306 if (strcmp(elem->name, name) == 0)
1312 static struct header *
1313 header_parse(struct rte_swx_pipeline *p,
1316 if (name[0] != 'h' || name[1] != '.')
1319 return header_find(p, &name[2]);
1322 static struct field *
1323 header_field_parse(struct rte_swx_pipeline *p,
1325 struct header **header)
1329 char *header_name, *field_name;
1331 if ((name[0] != 'h') || (name[1] != '.'))
1334 header_name = strdup(&name[2]);
1338 field_name = strchr(header_name, '.');
1347 h = header_find(p, header_name);
1353 f = struct_type_field_find(h->st, field_name);
1367 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1369 const char *struct_type_name)
1371 struct struct_type *st;
1373 size_t n_headers_max;
1376 CHECK_NAME(name, EINVAL);
1377 CHECK_NAME(struct_type_name, EINVAL);
1379 CHECK(!header_find(p, name), EEXIST);
1381 st = struct_type_find(p, struct_type_name);
1384 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1385 CHECK(p->n_headers < n_headers_max, ENOSPC);
1387 /* Node allocation. */
1388 h = calloc(1, sizeof(struct header));
1391 /* Node initialization. */
1392 strcpy(h->name, name);
1394 h->struct_id = p->n_structs;
1395 h->id = p->n_headers;
1397 /* Node add to tailq. */
1398 TAILQ_INSERT_TAIL(&p->headers, h, node);
1406 header_build(struct rte_swx_pipeline *p)
1409 uint32_t n_bytes = 0, i;
1411 TAILQ_FOREACH(h, &p->headers, node) {
1412 n_bytes += h->st->n_bits / 8;
1415 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1416 struct thread *t = &p->threads[i];
1417 uint32_t offset = 0;
1419 t->headers = calloc(p->n_headers,
1420 sizeof(struct header_runtime));
1421 CHECK(t->headers, ENOMEM);
1423 t->headers_out = calloc(p->n_headers,
1424 sizeof(struct header_out_runtime));
1425 CHECK(t->headers_out, ENOMEM);
1427 t->header_storage = calloc(1, n_bytes);
1428 CHECK(t->header_storage, ENOMEM);
1430 t->header_out_storage = calloc(1, n_bytes);
1431 CHECK(t->header_out_storage, ENOMEM);
1433 TAILQ_FOREACH(h, &p->headers, node) {
1434 uint8_t *header_storage;
1436 header_storage = &t->header_storage[offset];
1437 offset += h->st->n_bits / 8;
1439 t->headers[h->id].ptr0 = header_storage;
1440 t->structs[h->struct_id] = header_storage;
1448 header_build_free(struct rte_swx_pipeline *p)
1452 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1453 struct thread *t = &p->threads[i];
1455 free(t->headers_out);
1456 t->headers_out = NULL;
1461 free(t->header_out_storage);
1462 t->header_out_storage = NULL;
1464 free(t->header_storage);
1465 t->header_storage = NULL;
1470 header_free(struct rte_swx_pipeline *p)
1472 header_build_free(p);
1475 struct header *elem;
1477 elem = TAILQ_FIRST(&p->headers);
1481 TAILQ_REMOVE(&p->headers, elem, node);
1489 static struct field *
1490 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1492 if (!p->metadata_st)
1495 if (name[0] != 'm' || name[1] != '.')
1498 return struct_type_field_find(p->metadata_st, &name[2]);
1502 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1503 const char *struct_type_name)
1505 struct struct_type *st = NULL;
1509 CHECK_NAME(struct_type_name, EINVAL);
1510 st = struct_type_find(p, struct_type_name);
1512 CHECK(!p->metadata_st, EINVAL);
1514 p->metadata_st = st;
1515 p->metadata_struct_id = p->n_structs;
1523 metadata_build(struct rte_swx_pipeline *p)
1525 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1528 /* Thread-level initialization. */
1529 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1530 struct thread *t = &p->threads[i];
1533 metadata = calloc(1, n_bytes);
1534 CHECK(metadata, ENOMEM);
1536 t->metadata = metadata;
1537 t->structs[p->metadata_struct_id] = metadata;
1544 metadata_build_free(struct rte_swx_pipeline *p)
1548 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1549 struct thread *t = &p->threads[i];
1557 metadata_free(struct rte_swx_pipeline *p)
1559 metadata_build_free(p);
1566 pipeline_port_inc(struct rte_swx_pipeline *p)
1568 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1572 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1574 t->ip = p->instructions;
1578 thread_ip_inc(struct rte_swx_pipeline *p);
1581 thread_ip_inc(struct rte_swx_pipeline *p)
1583 struct thread *t = &p->threads[p->thread_id];
1589 thread_ip_inc_cond(struct thread *t, int cond)
1595 thread_yield(struct rte_swx_pipeline *p)
1597 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1604 instr_rx_translate(struct rte_swx_pipeline *p,
1605 struct action *action,
1608 struct instruction *instr,
1609 struct instruction_data *data __rte_unused)
1613 CHECK(!action, EINVAL);
1614 CHECK(n_tokens == 2, EINVAL);
1616 f = metadata_field_parse(p, tokens[1]);
1619 instr->type = INSTR_RX;
1620 instr->io.io.offset = f->offset / 8;
1621 instr->io.io.n_bits = f->n_bits;
1626 instr_rx_exec(struct rte_swx_pipeline *p);
1629 instr_rx_exec(struct rte_swx_pipeline *p)
1631 struct thread *t = &p->threads[p->thread_id];
1632 struct instruction *ip = t->ip;
1633 struct port_in_runtime *port = &p->in[p->port_id];
1634 struct rte_swx_pkt *pkt = &t->pkt;
1638 pkt_received = port->pkt_rx(port->obj, pkt);
1639 t->ptr = &pkt->pkt[pkt->offset];
1640 rte_prefetch0(t->ptr);
1642 TRACE("[Thread %2u] rx %s from port %u\n",
1644 pkt_received ? "1 pkt" : "0 pkts",
1648 t->valid_headers = 0;
1649 t->n_headers_out = 0;
1652 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1655 t->table_state = p->table_state;
1658 pipeline_port_inc(p);
1659 thread_ip_inc_cond(t, pkt_received);
1667 instr_tx_translate(struct rte_swx_pipeline *p,
1668 struct action *action __rte_unused,
1671 struct instruction *instr,
1672 struct instruction_data *data __rte_unused)
1676 CHECK(n_tokens == 2, EINVAL);
1678 f = metadata_field_parse(p, tokens[1]);
1681 instr->type = INSTR_TX;
1682 instr->io.io.offset = f->offset / 8;
1683 instr->io.io.n_bits = f->n_bits;
1688 emit_handler(struct thread *t)
1690 struct header_out_runtime *h0 = &t->headers_out[0];
1691 struct header_out_runtime *h1 = &t->headers_out[1];
1692 uint32_t offset = 0, i;
1694 /* No header change or header decapsulation. */
1695 if ((t->n_headers_out == 1) &&
1696 (h0->ptr + h0->n_bytes == t->ptr)) {
1697 TRACE("Emit handler: no header change or header decap.\n");
1699 t->pkt.offset -= h0->n_bytes;
1700 t->pkt.length += h0->n_bytes;
1705 /* Header encapsulation (optionally, with prior header decasulation). */
1706 if ((t->n_headers_out == 2) &&
1707 (h1->ptr + h1->n_bytes == t->ptr) &&
1708 (h0->ptr == h0->ptr0)) {
1711 TRACE("Emit handler: header encapsulation.\n");
1713 offset = h0->n_bytes + h1->n_bytes;
1714 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1715 t->pkt.offset -= offset;
1716 t->pkt.length += offset;
1721 /* Header insertion. */
1724 /* Header extraction. */
1727 /* For any other case. */
1728 TRACE("Emit handler: complex case.\n");
1730 for (i = 0; i < t->n_headers_out; i++) {
1731 struct header_out_runtime *h = &t->headers_out[i];
1733 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1734 offset += h->n_bytes;
1738 memcpy(t->ptr - offset, t->header_out_storage, offset);
1739 t->pkt.offset -= offset;
1740 t->pkt.length += offset;
1745 instr_tx_exec(struct rte_swx_pipeline *p);
1748 instr_tx_exec(struct rte_swx_pipeline *p)
1750 struct thread *t = &p->threads[p->thread_id];
1751 struct instruction *ip = t->ip;
1752 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1753 struct port_out_runtime *port = &p->out[port_id];
1754 struct rte_swx_pkt *pkt = &t->pkt;
1756 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
1764 port->pkt_tx(port->obj, pkt);
1767 thread_ip_reset(p, t);
1775 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
1776 struct action *action,
1779 struct instruction *instr,
1780 struct instruction_data *data __rte_unused)
1784 CHECK(!action, EINVAL);
1785 CHECK(n_tokens == 2, EINVAL);
1787 h = header_parse(p, tokens[1]);
1790 instr->type = INSTR_HDR_EXTRACT;
1791 instr->io.hdr.header_id[0] = h->id;
1792 instr->io.hdr.struct_id[0] = h->struct_id;
1793 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
1798 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
1801 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
1803 struct thread *t = &p->threads[p->thread_id];
1804 struct instruction *ip = t->ip;
1805 uint64_t valid_headers = t->valid_headers;
1806 uint8_t *ptr = t->ptr;
1807 uint32_t offset = t->pkt.offset;
1808 uint32_t length = t->pkt.length;
1811 for (i = 0; i < n_extract; i++) {
1812 uint32_t header_id = ip->io.hdr.header_id[i];
1813 uint32_t struct_id = ip->io.hdr.struct_id[i];
1814 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1816 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
1822 t->structs[struct_id] = ptr;
1823 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1832 t->valid_headers = valid_headers;
1835 t->pkt.offset = offset;
1836 t->pkt.length = length;
1841 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
1843 __instr_hdr_extract_exec(p, 1);
1850 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
1852 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
1855 __instr_hdr_extract_exec(p, 2);
1862 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
1864 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
1867 __instr_hdr_extract_exec(p, 3);
1874 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
1876 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
1879 __instr_hdr_extract_exec(p, 4);
1886 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
1888 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
1891 __instr_hdr_extract_exec(p, 5);
1898 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
1900 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
1903 __instr_hdr_extract_exec(p, 6);
1910 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
1912 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
1915 __instr_hdr_extract_exec(p, 7);
1922 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
1924 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
1927 __instr_hdr_extract_exec(p, 8);
1937 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
1938 struct action *action __rte_unused,
1941 struct instruction *instr,
1942 struct instruction_data *data __rte_unused)
1946 CHECK(n_tokens == 2, EINVAL);
1948 h = header_parse(p, tokens[1]);
1951 instr->type = INSTR_HDR_EMIT;
1952 instr->io.hdr.header_id[0] = h->id;
1953 instr->io.hdr.struct_id[0] = h->struct_id;
1954 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
1959 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
1962 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
1964 struct thread *t = &p->threads[p->thread_id];
1965 struct instruction *ip = t->ip;
1966 uint32_t n_headers_out = t->n_headers_out;
1967 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
1968 uint8_t *ho_ptr = NULL;
1969 uint32_t ho_nbytes = 0, i;
1971 for (i = 0; i < n_emit; i++) {
1972 uint32_t header_id = ip->io.hdr.header_id[i];
1973 uint32_t struct_id = ip->io.hdr.struct_id[i];
1974 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1976 struct header_runtime *hi = &t->headers[header_id];
1977 uint8_t *hi_ptr = t->structs[struct_id];
1979 TRACE("[Thread %2u]: emit header %u\n",
1985 if (!t->n_headers_out) {
1986 ho = &t->headers_out[0];
1988 ho->ptr0 = hi->ptr0;
1992 ho_nbytes = n_bytes;
1999 ho_nbytes = ho->n_bytes;
2003 if (ho_ptr + ho_nbytes == hi_ptr) {
2004 ho_nbytes += n_bytes;
2006 ho->n_bytes = ho_nbytes;
2009 ho->ptr0 = hi->ptr0;
2013 ho_nbytes = n_bytes;
2019 ho->n_bytes = ho_nbytes;
2020 t->n_headers_out = n_headers_out;
2024 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2026 __instr_hdr_emit_exec(p, 1);
2033 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2035 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2038 __instr_hdr_emit_exec(p, 1);
2043 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2045 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2048 __instr_hdr_emit_exec(p, 2);
2053 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2055 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2058 __instr_hdr_emit_exec(p, 3);
2063 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2065 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2068 __instr_hdr_emit_exec(p, 4);
2073 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2075 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2078 __instr_hdr_emit_exec(p, 5);
2083 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2085 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2088 __instr_hdr_emit_exec(p, 6);
2093 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2095 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2098 __instr_hdr_emit_exec(p, 7);
2103 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2105 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2108 __instr_hdr_emit_exec(p, 8);
2116 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2117 struct action *action __rte_unused,
2120 struct instruction *instr,
2121 struct instruction_data *data __rte_unused)
2125 CHECK(n_tokens == 2, EINVAL);
2127 h = header_parse(p, tokens[1]);
2130 instr->type = INSTR_HDR_VALIDATE;
2131 instr->valid.header_id = h->id;
2136 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2138 struct thread *t = &p->threads[p->thread_id];
2139 struct instruction *ip = t->ip;
2140 uint32_t header_id = ip->valid.header_id;
2142 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2145 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2155 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2156 struct action *action __rte_unused,
2159 struct instruction *instr,
2160 struct instruction_data *data __rte_unused)
2164 CHECK(n_tokens == 2, EINVAL);
2166 h = header_parse(p, tokens[1]);
2169 instr->type = INSTR_HDR_INVALIDATE;
2170 instr->valid.header_id = h->id;
2175 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2177 struct thread *t = &p->threads[p->thread_id];
2178 struct instruction *ip = t->ip;
2179 uint32_t header_id = ip->valid.header_id;
2181 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2184 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2190 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
2193 instr_translate(struct rte_swx_pipeline *p,
2194 struct action *action,
2196 struct instruction *instr,
2197 struct instruction_data *data)
2199 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
2200 int n_tokens = 0, tpos = 0;
2202 /* Parse the instruction string into tokens. */
2206 token = strtok_r(string, " \t\v", &string);
2210 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
2212 tokens[n_tokens] = token;
2216 CHECK(n_tokens, EINVAL);
2218 /* Handle the optional instruction label. */
2219 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
2220 strcpy(data->label, tokens[0]);
2223 CHECK(n_tokens - tpos, EINVAL);
2226 /* Identify the instruction type. */
2227 if (!strcmp(tokens[tpos], "rx"))
2228 return instr_rx_translate(p,
2235 if (!strcmp(tokens[tpos], "tx"))
2236 return instr_tx_translate(p,
2243 if (!strcmp(tokens[tpos], "extract"))
2244 return instr_hdr_extract_translate(p,
2251 if (!strcmp(tokens[tpos], "emit"))
2252 return instr_hdr_emit_translate(p,
2259 if (!strcmp(tokens[tpos], "validate"))
2260 return instr_hdr_validate_translate(p,
2267 if (!strcmp(tokens[tpos], "invalidate"))
2268 return instr_hdr_invalidate_translate(p,
2279 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
2281 uint32_t count = 0, i;
2286 for (i = 0; i < n; i++)
2287 if (!strcmp(label, data[i].jmp_label))
2294 instr_label_check(struct instruction_data *instruction_data,
2295 uint32_t n_instructions)
2299 /* Check that all instruction labels are unique. */
2300 for (i = 0; i < n_instructions; i++) {
2301 struct instruction_data *data = &instruction_data[i];
2302 char *label = data->label;
2308 for (j = i + 1; j < n_instructions; j++)
2309 CHECK(strcmp(label, data[j].label), EINVAL);
2312 /* Get users for each instruction label. */
2313 for (i = 0; i < n_instructions; i++) {
2314 struct instruction_data *data = &instruction_data[i];
2315 char *label = data->label;
2317 data->n_users = label_is_used(instruction_data,
2326 instruction_config(struct rte_swx_pipeline *p,
2328 const char **instructions,
2329 uint32_t n_instructions)
2331 struct instruction *instr = NULL;
2332 struct instruction_data *data = NULL;
2333 char *string = NULL;
2337 CHECK(n_instructions, EINVAL);
2338 CHECK(instructions, EINVAL);
2339 for (i = 0; i < n_instructions; i++)
2340 CHECK(instructions[i], EINVAL);
2342 /* Memory allocation. */
2343 instr = calloc(n_instructions, sizeof(struct instruction));
2349 data = calloc(n_instructions, sizeof(struct instruction_data));
2355 for (i = 0; i < n_instructions; i++) {
2356 string = strdup(instructions[i]);
2362 err = instr_translate(p, a, string, &instr[i], &data[i]);
2369 err = instr_label_check(data, n_instructions);
2376 a->instructions = instr;
2377 a->n_instructions = n_instructions;
2379 p->instructions = instr;
2380 p->n_instructions = n_instructions;
2392 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
2394 static instr_exec_t instruction_table[] = {
2395 [INSTR_RX] = instr_rx_exec,
2396 [INSTR_TX] = instr_tx_exec,
2398 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
2399 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
2400 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
2401 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
2402 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
2403 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
2404 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
2405 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
2407 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
2408 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
2409 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
2410 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
2411 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
2412 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
2413 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
2414 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
2415 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
2417 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
2418 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
2422 instr_exec(struct rte_swx_pipeline *p)
2424 struct thread *t = &p->threads[p->thread_id];
2425 struct instruction *ip = t->ip;
2426 instr_exec_t instr = instruction_table[ip->type];
2434 static struct action *
2435 action_find(struct rte_swx_pipeline *p, const char *name)
2437 struct action *elem;
2442 TAILQ_FOREACH(elem, &p->actions, node)
2443 if (strcmp(elem->name, name) == 0)
2450 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
2452 const char *args_struct_type_name,
2453 const char **instructions,
2454 uint32_t n_instructions)
2456 struct struct_type *args_struct_type;
2462 CHECK_NAME(name, EINVAL);
2463 CHECK(!action_find(p, name), EEXIST);
2465 if (args_struct_type_name) {
2466 CHECK_NAME(args_struct_type_name, EINVAL);
2467 args_struct_type = struct_type_find(p, args_struct_type_name);
2468 CHECK(args_struct_type, EINVAL);
2470 args_struct_type = NULL;
2473 /* Node allocation. */
2474 a = calloc(1, sizeof(struct action));
2477 /* Node initialization. */
2478 strcpy(a->name, name);
2479 a->st = args_struct_type;
2480 a->id = p->n_actions;
2482 /* Instruction translation. */
2483 err = instruction_config(p, a, instructions, n_instructions);
2489 /* Node add to tailq. */
2490 TAILQ_INSERT_TAIL(&p->actions, a, node);
2497 action_build(struct rte_swx_pipeline *p)
2499 struct action *action;
2501 p->action_instructions = calloc(p->n_actions,
2502 sizeof(struct instruction *));
2503 CHECK(p->action_instructions, ENOMEM);
2505 TAILQ_FOREACH(action, &p->actions, node)
2506 p->action_instructions[action->id] = action->instructions;
2512 action_build_free(struct rte_swx_pipeline *p)
2514 free(p->action_instructions);
2515 p->action_instructions = NULL;
2519 action_free(struct rte_swx_pipeline *p)
2521 action_build_free(p);
2524 struct action *action;
2526 action = TAILQ_FIRST(&p->actions);
2530 TAILQ_REMOVE(&p->actions, action, node);
2531 free(action->instructions);
2539 static struct table_type *
2540 table_type_find(struct rte_swx_pipeline *p, const char *name)
2542 struct table_type *elem;
2544 TAILQ_FOREACH(elem, &p->table_types, node)
2545 if (strcmp(elem->name, name) == 0)
2551 static struct table_type *
2552 table_type_resolve(struct rte_swx_pipeline *p,
2553 const char *recommended_type_name,
2554 enum rte_swx_table_match_type match_type)
2556 struct table_type *elem;
2558 /* Only consider the recommended type if the match type is correct. */
2559 if (recommended_type_name)
2560 TAILQ_FOREACH(elem, &p->table_types, node)
2561 if (!strcmp(elem->name, recommended_type_name) &&
2562 (elem->match_type == match_type))
2565 /* Ignore the recommended type and get the first element with this match
2568 TAILQ_FOREACH(elem, &p->table_types, node)
2569 if (elem->match_type == match_type)
2575 static struct table *
2576 table_find(struct rte_swx_pipeline *p, const char *name)
2580 TAILQ_FOREACH(elem, &p->tables, node)
2581 if (strcmp(elem->name, name) == 0)
2587 static struct table *
2588 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
2590 struct table *table = NULL;
2592 TAILQ_FOREACH(table, &p->tables, node)
2593 if (table->id == id)
2600 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
2602 enum rte_swx_table_match_type match_type,
2603 struct rte_swx_table_ops *ops)
2605 struct table_type *elem;
2609 CHECK_NAME(name, EINVAL);
2610 CHECK(!table_type_find(p, name), EEXIST);
2613 CHECK(ops->create, EINVAL);
2614 CHECK(ops->lkp, EINVAL);
2615 CHECK(ops->free, EINVAL);
2617 /* Node allocation. */
2618 elem = calloc(1, sizeof(struct table_type));
2619 CHECK(elem, ENOMEM);
2621 /* Node initialization. */
2622 strcpy(elem->name, name);
2623 elem->match_type = match_type;
2624 memcpy(&elem->ops, ops, sizeof(*ops));
2626 /* Node add to tailq. */
2627 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
2632 static enum rte_swx_table_match_type
2633 table_match_type_resolve(struct rte_swx_match_field_params *fields,
2638 for (i = 0; i < n_fields; i++)
2639 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
2643 return RTE_SWX_TABLE_MATCH_EXACT;
2645 if ((i == n_fields - 1) &&
2646 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
2647 return RTE_SWX_TABLE_MATCH_LPM;
2649 return RTE_SWX_TABLE_MATCH_WILDCARD;
2653 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
2655 struct rte_swx_pipeline_table_params *params,
2656 const char *recommended_table_type_name,
2660 struct table_type *type;
2662 struct action *default_action;
2663 struct header *header = NULL;
2665 uint32_t offset_prev = 0, action_data_size_max = 0, i;
2669 CHECK_NAME(name, EINVAL);
2670 CHECK(!table_find(p, name), EEXIST);
2672 CHECK(params, EINVAL);
2675 CHECK(!params->n_fields || params->fields, EINVAL);
2676 for (i = 0; i < params->n_fields; i++) {
2677 struct rte_swx_match_field_params *field = ¶ms->fields[i];
2679 struct field *hf, *mf;
2682 CHECK_NAME(field->name, EINVAL);
2684 hf = header_field_parse(p, field->name, &h);
2685 mf = metadata_field_parse(p, field->name);
2686 CHECK(hf || mf, EINVAL);
2688 offset = hf ? hf->offset : mf->offset;
2691 is_header = hf ? 1 : 0;
2692 header = hf ? h : NULL;
2693 offset_prev = offset;
2698 CHECK((is_header && hf && (h->id == header->id)) ||
2699 (!is_header && mf), EINVAL);
2701 CHECK(offset > offset_prev, EINVAL);
2702 offset_prev = offset;
2705 /* Action checks. */
2706 CHECK(params->n_actions, EINVAL);
2707 CHECK(params->action_names, EINVAL);
2708 for (i = 0; i < params->n_actions; i++) {
2709 const char *action_name = params->action_names[i];
2711 uint32_t action_data_size;
2713 CHECK(action_name, EINVAL);
2715 a = action_find(p, action_name);
2718 action_data_size = a->st ? a->st->n_bits / 8 : 0;
2719 if (action_data_size > action_data_size_max)
2720 action_data_size_max = action_data_size;
2723 CHECK(params->default_action_name, EINVAL);
2724 for (i = 0; i < p->n_actions; i++)
2725 if (!strcmp(params->action_names[i],
2726 params->default_action_name))
2728 CHECK(i < params->n_actions, EINVAL);
2729 default_action = action_find(p, params->default_action_name);
2730 CHECK((default_action->st && params->default_action_data) ||
2731 !params->default_action_data, EINVAL);
2733 /* Table type checks. */
2734 if (params->n_fields) {
2735 enum rte_swx_table_match_type match_type;
2737 match_type = table_match_type_resolve(params->fields,
2739 type = table_type_resolve(p,
2740 recommended_table_type_name,
2742 CHECK(type, EINVAL);
2747 /* Memory allocation. */
2748 t = calloc(1, sizeof(struct table));
2751 t->fields = calloc(params->n_fields, sizeof(struct match_field));
2757 t->actions = calloc(params->n_actions, sizeof(struct action *));
2764 if (action_data_size_max) {
2765 t->default_action_data = calloc(1, action_data_size_max);
2766 if (!t->default_action_data) {
2774 /* Node initialization. */
2775 strcpy(t->name, name);
2776 if (args && args[0])
2777 strcpy(t->args, args);
2780 for (i = 0; i < params->n_fields; i++) {
2781 struct rte_swx_match_field_params *field = ¶ms->fields[i];
2782 struct match_field *f = &t->fields[i];
2784 f->match_type = field->match_type;
2785 f->field = is_header ?
2786 header_field_parse(p, field->name, NULL) :
2787 metadata_field_parse(p, field->name);
2789 t->n_fields = params->n_fields;
2790 t->is_header = is_header;
2793 for (i = 0; i < params->n_actions; i++)
2794 t->actions[i] = action_find(p, params->action_names[i]);
2795 t->default_action = default_action;
2796 if (default_action->st)
2797 memcpy(t->default_action_data,
2798 params->default_action_data,
2799 default_action->st->n_bits / 8);
2800 t->n_actions = params->n_actions;
2801 t->default_action_is_const = params->default_action_is_const;
2802 t->action_data_size_max = action_data_size_max;
2805 t->id = p->n_tables;
2807 /* Node add to tailq. */
2808 TAILQ_INSERT_TAIL(&p->tables, t, node);
2814 static struct rte_swx_table_params *
2815 table_params_get(struct table *table)
2817 struct rte_swx_table_params *params;
2818 struct field *first, *last;
2820 uint32_t key_size, key_offset, action_data_size, i;
2822 /* Memory allocation. */
2823 params = calloc(1, sizeof(struct rte_swx_table_params));
2827 /* Key offset and size. */
2828 first = table->fields[0].field;
2829 last = table->fields[table->n_fields - 1].field;
2830 key_offset = first->offset / 8;
2831 key_size = (last->offset + last->n_bits - first->offset) / 8;
2833 /* Memory allocation. */
2834 key_mask = calloc(1, key_size);
2841 for (i = 0; i < table->n_fields; i++) {
2842 struct field *f = table->fields[i].field;
2843 uint32_t start = (f->offset - first->offset) / 8;
2844 size_t size = f->n_bits / 8;
2846 memset(&key_mask[start], 0xFF, size);
2849 /* Action data size. */
2850 action_data_size = 0;
2851 for (i = 0; i < table->n_actions; i++) {
2852 struct action *action = table->actions[i];
2853 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
2855 if (ads > action_data_size)
2856 action_data_size = ads;
2860 params->match_type = table->type->match_type;
2861 params->key_size = key_size;
2862 params->key_offset = key_offset;
2863 params->key_mask0 = key_mask;
2864 params->action_data_size = action_data_size;
2865 params->n_keys_max = table->size;
2871 table_params_free(struct rte_swx_table_params *params)
2876 free(params->key_mask0);
2881 table_state_build(struct rte_swx_pipeline *p)
2883 struct table *table;
2885 p->table_state = calloc(p->n_tables,
2886 sizeof(struct rte_swx_table_state));
2887 CHECK(p->table_state, ENOMEM);
2889 TAILQ_FOREACH(table, &p->tables, node) {
2890 struct rte_swx_table_state *ts = &p->table_state[table->id];
2893 struct rte_swx_table_params *params;
2896 params = table_params_get(table);
2897 CHECK(params, ENOMEM);
2899 ts->obj = table->type->ops.create(params,
2904 table_params_free(params);
2905 CHECK(ts->obj, ENODEV);
2908 /* ts->default_action_data. */
2909 if (table->action_data_size_max) {
2910 ts->default_action_data =
2911 malloc(table->action_data_size_max);
2912 CHECK(ts->default_action_data, ENOMEM);
2914 memcpy(ts->default_action_data,
2915 table->default_action_data,
2916 table->action_data_size_max);
2919 /* ts->default_action_id. */
2920 ts->default_action_id = table->default_action->id;
2927 table_state_build_free(struct rte_swx_pipeline *p)
2931 if (!p->table_state)
2934 for (i = 0; i < p->n_tables; i++) {
2935 struct rte_swx_table_state *ts = &p->table_state[i];
2936 struct table *table = table_find_by_id(p, i);
2939 if (table->type && ts->obj)
2940 table->type->ops.free(ts->obj);
2942 /* ts->default_action_data. */
2943 free(ts->default_action_data);
2946 free(p->table_state);
2947 p->table_state = NULL;
2951 table_state_free(struct rte_swx_pipeline *p)
2953 table_state_build_free(p);
2957 table_stub_lkp(void *table __rte_unused,
2958 void *mailbox __rte_unused,
2959 uint8_t **key __rte_unused,
2960 uint64_t *action_id __rte_unused,
2961 uint8_t **action_data __rte_unused,
2965 return 1; /* DONE. */
2969 table_build(struct rte_swx_pipeline *p)
2973 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2974 struct thread *t = &p->threads[i];
2975 struct table *table;
2977 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
2978 CHECK(t->tables, ENOMEM);
2980 TAILQ_FOREACH(table, &p->tables, node) {
2981 struct table_runtime *r = &t->tables[table->id];
2986 size = table->type->ops.mailbox_size_get();
2989 r->func = table->type->ops.lkp;
2993 r->mailbox = calloc(1, size);
2994 CHECK(r->mailbox, ENOMEM);
2998 r->key = table->is_header ?
2999 &t->structs[table->header->struct_id] :
3000 &t->structs[p->metadata_struct_id];
3002 r->func = table_stub_lkp;
3011 table_build_free(struct rte_swx_pipeline *p)
3015 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3016 struct thread *t = &p->threads[i];
3022 for (j = 0; j < p->n_tables; j++) {
3023 struct table_runtime *r = &t->tables[j];
3034 table_free(struct rte_swx_pipeline *p)
3036 table_build_free(p);
3042 elem = TAILQ_FIRST(&p->tables);
3046 TAILQ_REMOVE(&p->tables, elem, node);
3048 free(elem->actions);
3049 free(elem->default_action_data);
3055 struct table_type *elem;
3057 elem = TAILQ_FIRST(&p->table_types);
3061 TAILQ_REMOVE(&p->table_types, elem, node);
3070 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
3072 struct rte_swx_pipeline *pipeline;
3074 /* Check input parameters. */
3077 /* Memory allocation. */
3078 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
3079 CHECK(pipeline, ENOMEM);
3081 /* Initialization. */
3082 TAILQ_INIT(&pipeline->struct_types);
3083 TAILQ_INIT(&pipeline->port_in_types);
3084 TAILQ_INIT(&pipeline->ports_in);
3085 TAILQ_INIT(&pipeline->port_out_types);
3086 TAILQ_INIT(&pipeline->ports_out);
3087 TAILQ_INIT(&pipeline->extern_types);
3088 TAILQ_INIT(&pipeline->extern_objs);
3089 TAILQ_INIT(&pipeline->extern_funcs);
3090 TAILQ_INIT(&pipeline->headers);
3091 TAILQ_INIT(&pipeline->actions);
3092 TAILQ_INIT(&pipeline->table_types);
3093 TAILQ_INIT(&pipeline->tables);
3095 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
3096 pipeline->numa_node = numa_node;
3103 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
3108 free(p->instructions);
3110 table_state_free(p);
3115 extern_func_free(p);
3125 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
3126 const char **instructions,
3127 uint32_t n_instructions)
3132 err = instruction_config(p, NULL, instructions, n_instructions);
3136 /* Thread instruction pointer reset. */
3137 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3138 struct thread *t = &p->threads[i];
3140 thread_ip_reset(p, t);
3147 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
3152 CHECK(p->build_done == 0, EEXIST);
3154 status = port_in_build(p);
3158 status = port_out_build(p);
3162 status = struct_build(p);
3166 status = extern_obj_build(p);
3170 status = extern_func_build(p);
3174 status = header_build(p);
3178 status = metadata_build(p);
3182 status = action_build(p);
3186 status = table_build(p);
3190 status = table_state_build(p);
3198 table_state_build_free(p);
3199 table_build_free(p);
3200 action_build_free(p);
3201 metadata_build_free(p);
3202 header_build_free(p);
3203 extern_func_build_free(p);
3204 extern_obj_build_free(p);
3205 port_out_build_free(p);
3206 port_in_build_free(p);
3207 struct_build_free(p);
3213 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
3217 for (i = 0; i < n_instructions; i++)
3225 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
3226 struct rte_swx_table_state **table_state)
3228 if (!p || !table_state || !p->build_done)
3231 *table_state = p->table_state;
3236 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
3237 struct rte_swx_table_state *table_state)
3239 if (!p || !table_state || !p->build_done)
3242 p->table_state = table_state;