1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
10 #include <rte_common.h>
11 #include <rte_prefetch.h>
13 #include "rte_swx_pipeline.h"
14 #include "rte_swx_ctl.h"
16 #define CHECK(condition, err_code) \
22 #define CHECK_NAME(name, err_code) \
23 CHECK((name) && (name)[0], err_code)
30 #define TRACE(...) printf(__VA_ARGS__)
39 char name[RTE_SWX_NAME_SIZE];
45 TAILQ_ENTRY(struct_type) node;
46 char name[RTE_SWX_NAME_SIZE];
52 TAILQ_HEAD(struct_type_tailq, struct_type);
58 TAILQ_ENTRY(port_in_type) node;
59 char name[RTE_SWX_NAME_SIZE];
60 struct rte_swx_port_in_ops ops;
63 TAILQ_HEAD(port_in_type_tailq, port_in_type);
66 TAILQ_ENTRY(port_in) node;
67 struct port_in_type *type;
72 TAILQ_HEAD(port_in_tailq, port_in);
74 struct port_in_runtime {
75 rte_swx_port_in_pkt_rx_t pkt_rx;
82 struct port_out_type {
83 TAILQ_ENTRY(port_out_type) node;
84 char name[RTE_SWX_NAME_SIZE];
85 struct rte_swx_port_out_ops ops;
88 TAILQ_HEAD(port_out_type_tailq, port_out_type);
91 TAILQ_ENTRY(port_out) node;
92 struct port_out_type *type;
97 TAILQ_HEAD(port_out_tailq, port_out);
99 struct port_out_runtime {
100 rte_swx_port_out_pkt_tx_t pkt_tx;
101 rte_swx_port_out_flush_t flush;
108 struct extern_type_member_func {
109 TAILQ_ENTRY(extern_type_member_func) node;
110 char name[RTE_SWX_NAME_SIZE];
111 rte_swx_extern_type_member_func_t func;
115 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
118 TAILQ_ENTRY(extern_type) node;
119 char name[RTE_SWX_NAME_SIZE];
120 struct struct_type *mailbox_struct_type;
121 rte_swx_extern_type_constructor_t constructor;
122 rte_swx_extern_type_destructor_t destructor;
123 struct extern_type_member_func_tailq funcs;
127 TAILQ_HEAD(extern_type_tailq, extern_type);
130 TAILQ_ENTRY(extern_obj) node;
131 char name[RTE_SWX_NAME_SIZE];
132 struct extern_type *type;
138 TAILQ_HEAD(extern_obj_tailq, extern_obj);
140 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
141 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
144 struct extern_obj_runtime {
147 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
154 TAILQ_ENTRY(extern_func) node;
155 char name[RTE_SWX_NAME_SIZE];
156 struct struct_type *mailbox_struct_type;
157 rte_swx_extern_func_t func;
162 TAILQ_HEAD(extern_func_tailq, extern_func);
164 struct extern_func_runtime {
166 rte_swx_extern_func_t func;
173 TAILQ_ENTRY(header) node;
174 char name[RTE_SWX_NAME_SIZE];
175 struct struct_type *st;
180 TAILQ_HEAD(header_tailq, header);
182 struct header_runtime {
186 struct header_out_runtime {
196 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
197 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
198 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
199 * when transferred to packet meta-data and in NBO when transferred to packet
203 /* Notation conventions:
204 * -Header field: H = h.header.field (dst/src)
205 * -Meta-data field: M = m.field (dst/src)
206 * -Extern object mailbox field: E = e.field (dst/src)
207 * -Extern function mailbox field: F = f.field (dst/src)
208 * -Table action data field: T = t.field (src only)
209 * -Immediate value: I = 32-bit unsigned value (src only)
212 enum instruction_type {
219 /* extract h.header */
249 uint8_t header_id[8];
250 uint8_t struct_id[8];
256 enum instruction_type type;
262 struct instruction_data {
263 char label[RTE_SWX_NAME_SIZE];
264 char jmp_label[RTE_SWX_NAME_SIZE];
265 uint32_t n_users; /* user = jmp instruction to this instruction. */
273 TAILQ_ENTRY(action) node;
274 char name[RTE_SWX_NAME_SIZE];
275 struct struct_type *st;
276 struct instruction *instructions;
277 uint32_t n_instructions;
281 TAILQ_HEAD(action_tailq, action);
287 TAILQ_ENTRY(table_type) node;
288 char name[RTE_SWX_NAME_SIZE];
289 enum rte_swx_table_match_type match_type;
290 struct rte_swx_table_ops ops;
293 TAILQ_HEAD(table_type_tailq, table_type);
296 enum rte_swx_table_match_type match_type;
301 TAILQ_ENTRY(table) node;
302 char name[RTE_SWX_NAME_SIZE];
303 char args[RTE_SWX_NAME_SIZE];
304 struct table_type *type; /* NULL when n_fields == 0. */
307 struct match_field *fields;
309 int is_header; /* Only valid when n_fields > 0. */
310 struct header *header; /* Only valid when n_fields > 0. */
313 struct action **actions;
314 struct action *default_action;
315 uint8_t *default_action_data;
317 int default_action_is_const;
318 uint32_t action_data_size_max;
324 TAILQ_HEAD(table_tailq, table);
326 struct table_runtime {
327 rte_swx_table_lookup_t func;
337 struct rte_swx_pkt pkt;
343 /* Packet headers. */
344 struct header_runtime *headers; /* Extracted or generated headers. */
345 struct header_out_runtime *headers_out; /* Emitted headers. */
346 uint8_t *header_storage;
347 uint8_t *header_out_storage;
348 uint64_t valid_headers;
349 uint32_t n_headers_out;
351 /* Packet meta-data. */
355 struct table_runtime *tables;
356 struct rte_swx_table_state *table_state;
358 int hit; /* 0 = Miss, 1 = Hit. */
360 /* Extern objects and functions. */
361 struct extern_obj_runtime *extern_objs;
362 struct extern_func_runtime *extern_funcs;
365 struct instruction *ip;
366 struct instruction *ret;
369 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
370 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
371 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
373 #define METADATA_READ(thread, offset, n_bits) \
375 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
376 uint64_t m64 = *m64_ptr; \
377 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
381 #define METADATA_WRITE(thread, offset, n_bits, value) \
383 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
384 uint64_t m64 = *m64_ptr; \
385 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
387 uint64_t m_new = value; \
389 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
392 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
393 #define RTE_SWX_PIPELINE_THREADS_MAX 16
396 struct rte_swx_pipeline {
397 struct struct_type_tailq struct_types;
398 struct port_in_type_tailq port_in_types;
399 struct port_in_tailq ports_in;
400 struct port_out_type_tailq port_out_types;
401 struct port_out_tailq ports_out;
402 struct extern_type_tailq extern_types;
403 struct extern_obj_tailq extern_objs;
404 struct extern_func_tailq extern_funcs;
405 struct header_tailq headers;
406 struct struct_type *metadata_st;
407 uint32_t metadata_struct_id;
408 struct action_tailq actions;
409 struct table_type_tailq table_types;
410 struct table_tailq tables;
412 struct port_in_runtime *in;
413 struct port_out_runtime *out;
414 struct instruction **action_instructions;
415 struct rte_swx_table_state *table_state;
416 struct instruction *instructions;
417 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
421 uint32_t n_ports_out;
422 uint32_t n_extern_objs;
423 uint32_t n_extern_funcs;
429 uint32_t n_instructions;
437 static struct struct_type *
438 struct_type_find(struct rte_swx_pipeline *p, const char *name)
440 struct struct_type *elem;
442 TAILQ_FOREACH(elem, &p->struct_types, node)
443 if (strcmp(elem->name, name) == 0)
449 static struct field *
450 struct_type_field_find(struct struct_type *st, const char *name)
454 for (i = 0; i < st->n_fields; i++) {
455 struct field *f = &st->fields[i];
457 if (strcmp(f->name, name) == 0)
465 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
467 struct rte_swx_field_params *fields,
470 struct struct_type *st;
474 CHECK_NAME(name, EINVAL);
475 CHECK(fields, EINVAL);
476 CHECK(n_fields, EINVAL);
478 for (i = 0; i < n_fields; i++) {
479 struct rte_swx_field_params *f = &fields[i];
482 CHECK_NAME(f->name, EINVAL);
483 CHECK(f->n_bits, EINVAL);
484 CHECK(f->n_bits <= 64, EINVAL);
485 CHECK((f->n_bits & 7) == 0, EINVAL);
487 for (j = 0; j < i; j++) {
488 struct rte_swx_field_params *f_prev = &fields[j];
490 CHECK(strcmp(f->name, f_prev->name), EINVAL);
494 CHECK(!struct_type_find(p, name), EEXIST);
496 /* Node allocation. */
497 st = calloc(1, sizeof(struct struct_type));
500 st->fields = calloc(n_fields, sizeof(struct field));
506 /* Node initialization. */
507 strcpy(st->name, name);
508 for (i = 0; i < n_fields; i++) {
509 struct field *dst = &st->fields[i];
510 struct rte_swx_field_params *src = &fields[i];
512 strcpy(dst->name, src->name);
513 dst->n_bits = src->n_bits;
514 dst->offset = st->n_bits;
516 st->n_bits += src->n_bits;
518 st->n_fields = n_fields;
520 /* Node add to tailq. */
521 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
527 struct_build(struct rte_swx_pipeline *p)
531 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
532 struct thread *t = &p->threads[i];
534 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
535 CHECK(t->structs, ENOMEM);
542 struct_build_free(struct rte_swx_pipeline *p)
546 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
547 struct thread *t = &p->threads[i];
555 struct_free(struct rte_swx_pipeline *p)
557 struct_build_free(p);
561 struct struct_type *elem;
563 elem = TAILQ_FIRST(&p->struct_types);
567 TAILQ_REMOVE(&p->struct_types, elem, node);
576 static struct port_in_type *
577 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
579 struct port_in_type *elem;
584 TAILQ_FOREACH(elem, &p->port_in_types, node)
585 if (strcmp(elem->name, name) == 0)
592 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
594 struct rte_swx_port_in_ops *ops)
596 struct port_in_type *elem;
599 CHECK_NAME(name, EINVAL);
601 CHECK(ops->create, EINVAL);
602 CHECK(ops->free, EINVAL);
603 CHECK(ops->pkt_rx, EINVAL);
604 CHECK(ops->stats_read, EINVAL);
606 CHECK(!port_in_type_find(p, name), EEXIST);
608 /* Node allocation. */
609 elem = calloc(1, sizeof(struct port_in_type));
612 /* Node initialization. */
613 strcpy(elem->name, name);
614 memcpy(&elem->ops, ops, sizeof(*ops));
616 /* Node add to tailq. */
617 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
622 static struct port_in *
623 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
625 struct port_in *port;
627 TAILQ_FOREACH(port, &p->ports_in, node)
628 if (port->id == port_id)
635 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
637 const char *port_type_name,
640 struct port_in_type *type = NULL;
641 struct port_in *port = NULL;
646 CHECK(!port_in_find(p, port_id), EINVAL);
648 CHECK_NAME(port_type_name, EINVAL);
649 type = port_in_type_find(p, port_type_name);
652 obj = type->ops.create(args);
655 /* Node allocation. */
656 port = calloc(1, sizeof(struct port_in));
659 /* Node initialization. */
664 /* Node add to tailq. */
665 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
666 if (p->n_ports_in < port_id + 1)
667 p->n_ports_in = port_id + 1;
673 port_in_build(struct rte_swx_pipeline *p)
675 struct port_in *port;
678 CHECK(p->n_ports_in, EINVAL);
679 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
681 for (i = 0; i < p->n_ports_in; i++)
682 CHECK(port_in_find(p, i), EINVAL);
684 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
685 CHECK(p->in, ENOMEM);
687 TAILQ_FOREACH(port, &p->ports_in, node) {
688 struct port_in_runtime *in = &p->in[port->id];
690 in->pkt_rx = port->type->ops.pkt_rx;
698 port_in_build_free(struct rte_swx_pipeline *p)
705 port_in_free(struct rte_swx_pipeline *p)
707 port_in_build_free(p);
711 struct port_in *port;
713 port = TAILQ_FIRST(&p->ports_in);
717 TAILQ_REMOVE(&p->ports_in, port, node);
718 port->type->ops.free(port->obj);
722 /* Input port types. */
724 struct port_in_type *elem;
726 elem = TAILQ_FIRST(&p->port_in_types);
730 TAILQ_REMOVE(&p->port_in_types, elem, node);
738 static struct port_out_type *
739 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
741 struct port_out_type *elem;
746 TAILQ_FOREACH(elem, &p->port_out_types, node)
747 if (!strcmp(elem->name, name))
754 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
756 struct rte_swx_port_out_ops *ops)
758 struct port_out_type *elem;
761 CHECK_NAME(name, EINVAL);
763 CHECK(ops->create, EINVAL);
764 CHECK(ops->free, EINVAL);
765 CHECK(ops->pkt_tx, EINVAL);
766 CHECK(ops->stats_read, EINVAL);
768 CHECK(!port_out_type_find(p, name), EEXIST);
770 /* Node allocation. */
771 elem = calloc(1, sizeof(struct port_out_type));
774 /* Node initialization. */
775 strcpy(elem->name, name);
776 memcpy(&elem->ops, ops, sizeof(*ops));
778 /* Node add to tailq. */
779 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
784 static struct port_out *
785 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
787 struct port_out *port;
789 TAILQ_FOREACH(port, &p->ports_out, node)
790 if (port->id == port_id)
797 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
799 const char *port_type_name,
802 struct port_out_type *type = NULL;
803 struct port_out *port = NULL;
808 CHECK(!port_out_find(p, port_id), EINVAL);
810 CHECK_NAME(port_type_name, EINVAL);
811 type = port_out_type_find(p, port_type_name);
814 obj = type->ops.create(args);
817 /* Node allocation. */
818 port = calloc(1, sizeof(struct port_out));
821 /* Node initialization. */
826 /* Node add to tailq. */
827 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
828 if (p->n_ports_out < port_id + 1)
829 p->n_ports_out = port_id + 1;
835 port_out_build(struct rte_swx_pipeline *p)
837 struct port_out *port;
840 CHECK(p->n_ports_out, EINVAL);
842 for (i = 0; i < p->n_ports_out; i++)
843 CHECK(port_out_find(p, i), EINVAL);
845 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
846 CHECK(p->out, ENOMEM);
848 TAILQ_FOREACH(port, &p->ports_out, node) {
849 struct port_out_runtime *out = &p->out[port->id];
851 out->pkt_tx = port->type->ops.pkt_tx;
852 out->flush = port->type->ops.flush;
853 out->obj = port->obj;
860 port_out_build_free(struct rte_swx_pipeline *p)
867 port_out_free(struct rte_swx_pipeline *p)
869 port_out_build_free(p);
873 struct port_out *port;
875 port = TAILQ_FIRST(&p->ports_out);
879 TAILQ_REMOVE(&p->ports_out, port, node);
880 port->type->ops.free(port->obj);
884 /* Output port types. */
886 struct port_out_type *elem;
888 elem = TAILQ_FIRST(&p->port_out_types);
892 TAILQ_REMOVE(&p->port_out_types, elem, node);
900 static struct extern_type *
901 extern_type_find(struct rte_swx_pipeline *p, const char *name)
903 struct extern_type *elem;
905 TAILQ_FOREACH(elem, &p->extern_types, node)
906 if (strcmp(elem->name, name) == 0)
912 static struct extern_type_member_func *
913 extern_type_member_func_find(struct extern_type *type, const char *name)
915 struct extern_type_member_func *elem;
917 TAILQ_FOREACH(elem, &type->funcs, node)
918 if (strcmp(elem->name, name) == 0)
924 static struct extern_obj *
925 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
927 struct extern_obj *elem;
929 TAILQ_FOREACH(elem, &p->extern_objs, node)
930 if (strcmp(elem->name, name) == 0)
937 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
939 const char *mailbox_struct_type_name,
940 rte_swx_extern_type_constructor_t constructor,
941 rte_swx_extern_type_destructor_t destructor)
943 struct extern_type *elem;
944 struct struct_type *mailbox_struct_type;
948 CHECK_NAME(name, EINVAL);
949 CHECK(!extern_type_find(p, name), EEXIST);
951 CHECK_NAME(mailbox_struct_type_name, EINVAL);
952 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
953 CHECK(mailbox_struct_type, EINVAL);
955 CHECK(constructor, EINVAL);
956 CHECK(destructor, EINVAL);
958 /* Node allocation. */
959 elem = calloc(1, sizeof(struct extern_type));
962 /* Node initialization. */
963 strcpy(elem->name, name);
964 elem->mailbox_struct_type = mailbox_struct_type;
965 elem->constructor = constructor;
966 elem->destructor = destructor;
967 TAILQ_INIT(&elem->funcs);
969 /* Node add to tailq. */
970 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
976 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
977 const char *extern_type_name,
979 rte_swx_extern_type_member_func_t member_func)
981 struct extern_type *type;
982 struct extern_type_member_func *type_member;
986 CHECK(extern_type_name, EINVAL);
987 type = extern_type_find(p, extern_type_name);
989 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
992 CHECK(!extern_type_member_func_find(type, name), EEXIST);
994 CHECK(member_func, EINVAL);
996 /* Node allocation. */
997 type_member = calloc(1, sizeof(struct extern_type_member_func));
998 CHECK(type_member, ENOMEM);
1000 /* Node initialization. */
1001 strcpy(type_member->name, name);
1002 type_member->func = member_func;
1003 type_member->id = type->n_funcs;
1005 /* Node add to tailq. */
1006 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1013 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1014 const char *extern_type_name,
1018 struct extern_type *type;
1019 struct extern_obj *obj;
1024 CHECK_NAME(extern_type_name, EINVAL);
1025 type = extern_type_find(p, extern_type_name);
1026 CHECK(type, EINVAL);
1028 CHECK_NAME(name, EINVAL);
1029 CHECK(!extern_obj_find(p, name), EEXIST);
1031 /* Node allocation. */
1032 obj = calloc(1, sizeof(struct extern_obj));
1035 /* Object construction. */
1036 obj_handle = type->constructor(args);
1042 /* Node initialization. */
1043 strcpy(obj->name, name);
1045 obj->obj = obj_handle;
1046 obj->struct_id = p->n_structs;
1047 obj->id = p->n_extern_objs;
1049 /* Node add to tailq. */
1050 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1058 extern_obj_build(struct rte_swx_pipeline *p)
1062 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1063 struct thread *t = &p->threads[i];
1064 struct extern_obj *obj;
1066 t->extern_objs = calloc(p->n_extern_objs,
1067 sizeof(struct extern_obj_runtime));
1068 CHECK(t->extern_objs, ENOMEM);
1070 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1071 struct extern_obj_runtime *r =
1072 &t->extern_objs[obj->id];
1073 struct extern_type_member_func *func;
1074 uint32_t mailbox_size =
1075 obj->type->mailbox_struct_type->n_bits / 8;
1079 r->mailbox = calloc(1, mailbox_size);
1080 CHECK(r->mailbox, ENOMEM);
1082 TAILQ_FOREACH(func, &obj->type->funcs, node)
1083 r->funcs[func->id] = func->func;
1085 t->structs[obj->struct_id] = r->mailbox;
1093 extern_obj_build_free(struct rte_swx_pipeline *p)
1097 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1098 struct thread *t = &p->threads[i];
1101 if (!t->extern_objs)
1104 for (j = 0; j < p->n_extern_objs; j++) {
1105 struct extern_obj_runtime *r = &t->extern_objs[j];
1110 free(t->extern_objs);
1111 t->extern_objs = NULL;
1116 extern_obj_free(struct rte_swx_pipeline *p)
1118 extern_obj_build_free(p);
1120 /* Extern objects. */
1122 struct extern_obj *elem;
1124 elem = TAILQ_FIRST(&p->extern_objs);
1128 TAILQ_REMOVE(&p->extern_objs, elem, node);
1130 elem->type->destructor(elem->obj);
1136 struct extern_type *elem;
1138 elem = TAILQ_FIRST(&p->extern_types);
1142 TAILQ_REMOVE(&p->extern_types, elem, node);
1145 struct extern_type_member_func *func;
1147 func = TAILQ_FIRST(&elem->funcs);
1151 TAILQ_REMOVE(&elem->funcs, func, node);
1162 static struct extern_func *
1163 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1165 struct extern_func *elem;
1167 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1168 if (strcmp(elem->name, name) == 0)
1175 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1177 const char *mailbox_struct_type_name,
1178 rte_swx_extern_func_t func)
1180 struct extern_func *f;
1181 struct struct_type *mailbox_struct_type;
1185 CHECK_NAME(name, EINVAL);
1186 CHECK(!extern_func_find(p, name), EEXIST);
1188 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1189 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1190 CHECK(mailbox_struct_type, EINVAL);
1192 CHECK(func, EINVAL);
1194 /* Node allocation. */
1195 f = calloc(1, sizeof(struct extern_func));
1196 CHECK(func, ENOMEM);
1198 /* Node initialization. */
1199 strcpy(f->name, name);
1200 f->mailbox_struct_type = mailbox_struct_type;
1202 f->struct_id = p->n_structs;
1203 f->id = p->n_extern_funcs;
1205 /* Node add to tailq. */
1206 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1207 p->n_extern_funcs++;
1214 extern_func_build(struct rte_swx_pipeline *p)
1218 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1219 struct thread *t = &p->threads[i];
1220 struct extern_func *func;
1222 /* Memory allocation. */
1223 t->extern_funcs = calloc(p->n_extern_funcs,
1224 sizeof(struct extern_func_runtime));
1225 CHECK(t->extern_funcs, ENOMEM);
1227 /* Extern function. */
1228 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1229 struct extern_func_runtime *r =
1230 &t->extern_funcs[func->id];
1231 uint32_t mailbox_size =
1232 func->mailbox_struct_type->n_bits / 8;
1234 r->func = func->func;
1236 r->mailbox = calloc(1, mailbox_size);
1237 CHECK(r->mailbox, ENOMEM);
1239 t->structs[func->struct_id] = r->mailbox;
1247 extern_func_build_free(struct rte_swx_pipeline *p)
1251 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1252 struct thread *t = &p->threads[i];
1255 if (!t->extern_funcs)
1258 for (j = 0; j < p->n_extern_funcs; j++) {
1259 struct extern_func_runtime *r = &t->extern_funcs[j];
1264 free(t->extern_funcs);
1265 t->extern_funcs = NULL;
1270 extern_func_free(struct rte_swx_pipeline *p)
1272 extern_func_build_free(p);
1275 struct extern_func *elem;
1277 elem = TAILQ_FIRST(&p->extern_funcs);
1281 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1289 static struct header *
1290 header_find(struct rte_swx_pipeline *p, const char *name)
1292 struct header *elem;
1294 TAILQ_FOREACH(elem, &p->headers, node)
1295 if (strcmp(elem->name, name) == 0)
1301 static struct header *
1302 header_parse(struct rte_swx_pipeline *p,
1305 if (name[0] != 'h' || name[1] != '.')
1308 return header_find(p, &name[2]);
1311 static struct field *
1312 header_field_parse(struct rte_swx_pipeline *p,
1314 struct header **header)
1318 char *header_name, *field_name;
1320 if ((name[0] != 'h') || (name[1] != '.'))
1323 header_name = strdup(&name[2]);
1327 field_name = strchr(header_name, '.');
1336 h = header_find(p, header_name);
1342 f = struct_type_field_find(h->st, field_name);
1356 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1358 const char *struct_type_name)
1360 struct struct_type *st;
1362 size_t n_headers_max;
1365 CHECK_NAME(name, EINVAL);
1366 CHECK_NAME(struct_type_name, EINVAL);
1368 CHECK(!header_find(p, name), EEXIST);
1370 st = struct_type_find(p, struct_type_name);
1373 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1374 CHECK(p->n_headers < n_headers_max, ENOSPC);
1376 /* Node allocation. */
1377 h = calloc(1, sizeof(struct header));
1380 /* Node initialization. */
1381 strcpy(h->name, name);
1383 h->struct_id = p->n_structs;
1384 h->id = p->n_headers;
1386 /* Node add to tailq. */
1387 TAILQ_INSERT_TAIL(&p->headers, h, node);
1395 header_build(struct rte_swx_pipeline *p)
1398 uint32_t n_bytes = 0, i;
1400 TAILQ_FOREACH(h, &p->headers, node) {
1401 n_bytes += h->st->n_bits / 8;
1404 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1405 struct thread *t = &p->threads[i];
1406 uint32_t offset = 0;
1408 t->headers = calloc(p->n_headers,
1409 sizeof(struct header_runtime));
1410 CHECK(t->headers, ENOMEM);
1412 t->headers_out = calloc(p->n_headers,
1413 sizeof(struct header_out_runtime));
1414 CHECK(t->headers_out, ENOMEM);
1416 t->header_storage = calloc(1, n_bytes);
1417 CHECK(t->header_storage, ENOMEM);
1419 t->header_out_storage = calloc(1, n_bytes);
1420 CHECK(t->header_out_storage, ENOMEM);
1422 TAILQ_FOREACH(h, &p->headers, node) {
1423 uint8_t *header_storage;
1425 header_storage = &t->header_storage[offset];
1426 offset += h->st->n_bits / 8;
1428 t->headers[h->id].ptr0 = header_storage;
1429 t->structs[h->struct_id] = header_storage;
1437 header_build_free(struct rte_swx_pipeline *p)
1441 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1442 struct thread *t = &p->threads[i];
1444 free(t->headers_out);
1445 t->headers_out = NULL;
1450 free(t->header_out_storage);
1451 t->header_out_storage = NULL;
1453 free(t->header_storage);
1454 t->header_storage = NULL;
1459 header_free(struct rte_swx_pipeline *p)
1461 header_build_free(p);
1464 struct header *elem;
1466 elem = TAILQ_FIRST(&p->headers);
1470 TAILQ_REMOVE(&p->headers, elem, node);
1478 static struct field *
1479 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1481 if (!p->metadata_st)
1484 if (name[0] != 'm' || name[1] != '.')
1487 return struct_type_field_find(p->metadata_st, &name[2]);
1491 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1492 const char *struct_type_name)
1494 struct struct_type *st = NULL;
1498 CHECK_NAME(struct_type_name, EINVAL);
1499 st = struct_type_find(p, struct_type_name);
1501 CHECK(!p->metadata_st, EINVAL);
1503 p->metadata_st = st;
1504 p->metadata_struct_id = p->n_structs;
1512 metadata_build(struct rte_swx_pipeline *p)
1514 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1517 /* Thread-level initialization. */
1518 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1519 struct thread *t = &p->threads[i];
1522 metadata = calloc(1, n_bytes);
1523 CHECK(metadata, ENOMEM);
1525 t->metadata = metadata;
1526 t->structs[p->metadata_struct_id] = metadata;
1533 metadata_build_free(struct rte_swx_pipeline *p)
1537 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1538 struct thread *t = &p->threads[i];
1546 metadata_free(struct rte_swx_pipeline *p)
1548 metadata_build_free(p);
1555 pipeline_port_inc(struct rte_swx_pipeline *p)
1557 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1561 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1563 t->ip = p->instructions;
1567 thread_ip_inc(struct rte_swx_pipeline *p);
1570 thread_ip_inc(struct rte_swx_pipeline *p)
1572 struct thread *t = &p->threads[p->thread_id];
1578 thread_ip_inc_cond(struct thread *t, int cond)
1584 thread_yield(struct rte_swx_pipeline *p)
1586 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1593 instr_rx_translate(struct rte_swx_pipeline *p,
1594 struct action *action,
1597 struct instruction *instr,
1598 struct instruction_data *data __rte_unused)
1602 CHECK(!action, EINVAL);
1603 CHECK(n_tokens == 2, EINVAL);
1605 f = metadata_field_parse(p, tokens[1]);
1608 instr->type = INSTR_RX;
1609 instr->io.io.offset = f->offset / 8;
1610 instr->io.io.n_bits = f->n_bits;
1615 instr_rx_exec(struct rte_swx_pipeline *p);
1618 instr_rx_exec(struct rte_swx_pipeline *p)
1620 struct thread *t = &p->threads[p->thread_id];
1621 struct instruction *ip = t->ip;
1622 struct port_in_runtime *port = &p->in[p->port_id];
1623 struct rte_swx_pkt *pkt = &t->pkt;
1627 pkt_received = port->pkt_rx(port->obj, pkt);
1628 t->ptr = &pkt->pkt[pkt->offset];
1629 rte_prefetch0(t->ptr);
1631 TRACE("[Thread %2u] rx %s from port %u\n",
1633 pkt_received ? "1 pkt" : "0 pkts",
1637 t->valid_headers = 0;
1638 t->n_headers_out = 0;
1641 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1644 t->table_state = p->table_state;
1647 pipeline_port_inc(p);
1648 thread_ip_inc_cond(t, pkt_received);
1656 instr_tx_translate(struct rte_swx_pipeline *p,
1657 struct action *action __rte_unused,
1660 struct instruction *instr,
1661 struct instruction_data *data __rte_unused)
1665 CHECK(n_tokens == 2, EINVAL);
1667 f = metadata_field_parse(p, tokens[1]);
1670 instr->type = INSTR_TX;
1671 instr->io.io.offset = f->offset / 8;
1672 instr->io.io.n_bits = f->n_bits;
1677 emit_handler(struct thread *t)
1679 struct header_out_runtime *h0 = &t->headers_out[0];
1680 struct header_out_runtime *h1 = &t->headers_out[1];
1681 uint32_t offset = 0, i;
1683 /* No header change or header decapsulation. */
1684 if ((t->n_headers_out == 1) &&
1685 (h0->ptr + h0->n_bytes == t->ptr)) {
1686 TRACE("Emit handler: no header change or header decap.\n");
1688 t->pkt.offset -= h0->n_bytes;
1689 t->pkt.length += h0->n_bytes;
1694 /* Header encapsulation (optionally, with prior header decasulation). */
1695 if ((t->n_headers_out == 2) &&
1696 (h1->ptr + h1->n_bytes == t->ptr) &&
1697 (h0->ptr == h0->ptr0)) {
1700 TRACE("Emit handler: header encapsulation.\n");
1702 offset = h0->n_bytes + h1->n_bytes;
1703 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1704 t->pkt.offset -= offset;
1705 t->pkt.length += offset;
1710 /* Header insertion. */
1713 /* Header extraction. */
1716 /* For any other case. */
1717 TRACE("Emit handler: complex case.\n");
1719 for (i = 0; i < t->n_headers_out; i++) {
1720 struct header_out_runtime *h = &t->headers_out[i];
1722 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1723 offset += h->n_bytes;
1727 memcpy(t->ptr - offset, t->header_out_storage, offset);
1728 t->pkt.offset -= offset;
1729 t->pkt.length += offset;
1734 instr_tx_exec(struct rte_swx_pipeline *p);
1737 instr_tx_exec(struct rte_swx_pipeline *p)
1739 struct thread *t = &p->threads[p->thread_id];
1740 struct instruction *ip = t->ip;
1741 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1742 struct port_out_runtime *port = &p->out[port_id];
1743 struct rte_swx_pkt *pkt = &t->pkt;
1745 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
1753 port->pkt_tx(port->obj, pkt);
1756 thread_ip_reset(p, t);
1764 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
1765 struct action *action,
1768 struct instruction *instr,
1769 struct instruction_data *data __rte_unused)
1773 CHECK(!action, EINVAL);
1774 CHECK(n_tokens == 2, EINVAL);
1776 h = header_parse(p, tokens[1]);
1779 instr->type = INSTR_HDR_EXTRACT;
1780 instr->io.hdr.header_id[0] = h->id;
1781 instr->io.hdr.struct_id[0] = h->struct_id;
1782 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
1787 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
1790 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
1792 struct thread *t = &p->threads[p->thread_id];
1793 struct instruction *ip = t->ip;
1794 uint64_t valid_headers = t->valid_headers;
1795 uint8_t *ptr = t->ptr;
1796 uint32_t offset = t->pkt.offset;
1797 uint32_t length = t->pkt.length;
1800 for (i = 0; i < n_extract; i++) {
1801 uint32_t header_id = ip->io.hdr.header_id[i];
1802 uint32_t struct_id = ip->io.hdr.struct_id[i];
1803 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1805 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
1811 t->structs[struct_id] = ptr;
1812 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1821 t->valid_headers = valid_headers;
1824 t->pkt.offset = offset;
1825 t->pkt.length = length;
1830 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
1832 __instr_hdr_extract_exec(p, 1);
1839 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
1841 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
1844 __instr_hdr_extract_exec(p, 2);
1851 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
1853 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
1856 __instr_hdr_extract_exec(p, 3);
1863 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
1865 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
1868 __instr_hdr_extract_exec(p, 4);
1875 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
1877 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
1880 __instr_hdr_extract_exec(p, 5);
1887 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
1889 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
1892 __instr_hdr_extract_exec(p, 6);
1899 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
1901 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
1904 __instr_hdr_extract_exec(p, 7);
1911 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
1913 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
1916 __instr_hdr_extract_exec(p, 8);
1926 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
1927 struct action *action __rte_unused,
1930 struct instruction *instr,
1931 struct instruction_data *data __rte_unused)
1935 CHECK(n_tokens == 2, EINVAL);
1937 h = header_parse(p, tokens[1]);
1940 instr->type = INSTR_HDR_EMIT;
1941 instr->io.hdr.header_id[0] = h->id;
1942 instr->io.hdr.struct_id[0] = h->struct_id;
1943 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
1948 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
1951 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
1953 struct thread *t = &p->threads[p->thread_id];
1954 struct instruction *ip = t->ip;
1955 uint32_t n_headers_out = t->n_headers_out;
1956 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
1957 uint8_t *ho_ptr = NULL;
1958 uint32_t ho_nbytes = 0, i;
1960 for (i = 0; i < n_emit; i++) {
1961 uint32_t header_id = ip->io.hdr.header_id[i];
1962 uint32_t struct_id = ip->io.hdr.struct_id[i];
1963 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1965 struct header_runtime *hi = &t->headers[header_id];
1966 uint8_t *hi_ptr = t->structs[struct_id];
1968 TRACE("[Thread %2u]: emit header %u\n",
1974 if (!t->n_headers_out) {
1975 ho = &t->headers_out[0];
1977 ho->ptr0 = hi->ptr0;
1981 ho_nbytes = n_bytes;
1988 ho_nbytes = ho->n_bytes;
1992 if (ho_ptr + ho_nbytes == hi_ptr) {
1993 ho_nbytes += n_bytes;
1995 ho->n_bytes = ho_nbytes;
1998 ho->ptr0 = hi->ptr0;
2002 ho_nbytes = n_bytes;
2008 ho->n_bytes = ho_nbytes;
2009 t->n_headers_out = n_headers_out;
2013 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2015 __instr_hdr_emit_exec(p, 1);
2022 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2024 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2027 __instr_hdr_emit_exec(p, 1);
2032 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2034 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2037 __instr_hdr_emit_exec(p, 2);
2042 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2044 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2047 __instr_hdr_emit_exec(p, 3);
2052 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2054 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2057 __instr_hdr_emit_exec(p, 4);
2062 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2064 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2067 __instr_hdr_emit_exec(p, 5);
2072 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2074 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2077 __instr_hdr_emit_exec(p, 6);
2082 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2084 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2087 __instr_hdr_emit_exec(p, 7);
2092 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2094 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2097 __instr_hdr_emit_exec(p, 8);
2101 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
2104 instr_translate(struct rte_swx_pipeline *p,
2105 struct action *action,
2107 struct instruction *instr,
2108 struct instruction_data *data)
2110 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
2111 int n_tokens = 0, tpos = 0;
2113 /* Parse the instruction string into tokens. */
2117 token = strtok_r(string, " \t\v", &string);
2121 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
2123 tokens[n_tokens] = token;
2127 CHECK(n_tokens, EINVAL);
2129 /* Handle the optional instruction label. */
2130 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
2131 strcpy(data->label, tokens[0]);
2134 CHECK(n_tokens - tpos, EINVAL);
2137 /* Identify the instruction type. */
2138 if (!strcmp(tokens[tpos], "rx"))
2139 return instr_rx_translate(p,
2146 if (!strcmp(tokens[tpos], "tx"))
2147 return instr_tx_translate(p,
2154 if (!strcmp(tokens[tpos], "extract"))
2155 return instr_hdr_extract_translate(p,
2162 if (!strcmp(tokens[tpos], "emit"))
2163 return instr_hdr_emit_translate(p,
2174 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
2176 uint32_t count = 0, i;
2181 for (i = 0; i < n; i++)
2182 if (!strcmp(label, data[i].jmp_label))
2189 instr_label_check(struct instruction_data *instruction_data,
2190 uint32_t n_instructions)
2194 /* Check that all instruction labels are unique. */
2195 for (i = 0; i < n_instructions; i++) {
2196 struct instruction_data *data = &instruction_data[i];
2197 char *label = data->label;
2203 for (j = i + 1; j < n_instructions; j++)
2204 CHECK(strcmp(label, data[j].label), EINVAL);
2207 /* Get users for each instruction label. */
2208 for (i = 0; i < n_instructions; i++) {
2209 struct instruction_data *data = &instruction_data[i];
2210 char *label = data->label;
2212 data->n_users = label_is_used(instruction_data,
2221 instruction_config(struct rte_swx_pipeline *p,
2223 const char **instructions,
2224 uint32_t n_instructions)
2226 struct instruction *instr = NULL;
2227 struct instruction_data *data = NULL;
2228 char *string = NULL;
2232 CHECK(n_instructions, EINVAL);
2233 CHECK(instructions, EINVAL);
2234 for (i = 0; i < n_instructions; i++)
2235 CHECK(instructions[i], EINVAL);
2237 /* Memory allocation. */
2238 instr = calloc(n_instructions, sizeof(struct instruction));
2244 data = calloc(n_instructions, sizeof(struct instruction_data));
2250 for (i = 0; i < n_instructions; i++) {
2251 string = strdup(instructions[i]);
2257 err = instr_translate(p, a, string, &instr[i], &data[i]);
2264 err = instr_label_check(data, n_instructions);
2271 a->instructions = instr;
2272 a->n_instructions = n_instructions;
2274 p->instructions = instr;
2275 p->n_instructions = n_instructions;
2287 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
2289 static instr_exec_t instruction_table[] = {
2290 [INSTR_RX] = instr_rx_exec,
2291 [INSTR_TX] = instr_tx_exec,
2293 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
2294 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
2295 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
2296 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
2297 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
2298 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
2299 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
2300 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
2302 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
2303 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
2304 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
2305 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
2306 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
2307 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
2308 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
2309 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
2310 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
2314 instr_exec(struct rte_swx_pipeline *p)
2316 struct thread *t = &p->threads[p->thread_id];
2317 struct instruction *ip = t->ip;
2318 instr_exec_t instr = instruction_table[ip->type];
2326 static struct action *
2327 action_find(struct rte_swx_pipeline *p, const char *name)
2329 struct action *elem;
2334 TAILQ_FOREACH(elem, &p->actions, node)
2335 if (strcmp(elem->name, name) == 0)
2342 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
2344 const char *args_struct_type_name,
2345 const char **instructions,
2346 uint32_t n_instructions)
2348 struct struct_type *args_struct_type;
2354 CHECK_NAME(name, EINVAL);
2355 CHECK(!action_find(p, name), EEXIST);
2357 if (args_struct_type_name) {
2358 CHECK_NAME(args_struct_type_name, EINVAL);
2359 args_struct_type = struct_type_find(p, args_struct_type_name);
2360 CHECK(args_struct_type, EINVAL);
2362 args_struct_type = NULL;
2365 /* Node allocation. */
2366 a = calloc(1, sizeof(struct action));
2369 /* Node initialization. */
2370 strcpy(a->name, name);
2371 a->st = args_struct_type;
2372 a->id = p->n_actions;
2374 /* Instruction translation. */
2375 err = instruction_config(p, a, instructions, n_instructions);
2381 /* Node add to tailq. */
2382 TAILQ_INSERT_TAIL(&p->actions, a, node);
2389 action_build(struct rte_swx_pipeline *p)
2391 struct action *action;
2393 p->action_instructions = calloc(p->n_actions,
2394 sizeof(struct instruction *));
2395 CHECK(p->action_instructions, ENOMEM);
2397 TAILQ_FOREACH(action, &p->actions, node)
2398 p->action_instructions[action->id] = action->instructions;
2404 action_build_free(struct rte_swx_pipeline *p)
2406 free(p->action_instructions);
2407 p->action_instructions = NULL;
2411 action_free(struct rte_swx_pipeline *p)
2413 action_build_free(p);
2416 struct action *action;
2418 action = TAILQ_FIRST(&p->actions);
2422 TAILQ_REMOVE(&p->actions, action, node);
2423 free(action->instructions);
2431 static struct table_type *
2432 table_type_find(struct rte_swx_pipeline *p, const char *name)
2434 struct table_type *elem;
2436 TAILQ_FOREACH(elem, &p->table_types, node)
2437 if (strcmp(elem->name, name) == 0)
2443 static struct table_type *
2444 table_type_resolve(struct rte_swx_pipeline *p,
2445 const char *recommended_type_name,
2446 enum rte_swx_table_match_type match_type)
2448 struct table_type *elem;
2450 /* Only consider the recommended type if the match type is correct. */
2451 if (recommended_type_name)
2452 TAILQ_FOREACH(elem, &p->table_types, node)
2453 if (!strcmp(elem->name, recommended_type_name) &&
2454 (elem->match_type == match_type))
2457 /* Ignore the recommended type and get the first element with this match
2460 TAILQ_FOREACH(elem, &p->table_types, node)
2461 if (elem->match_type == match_type)
2467 static struct table *
2468 table_find(struct rte_swx_pipeline *p, const char *name)
2472 TAILQ_FOREACH(elem, &p->tables, node)
2473 if (strcmp(elem->name, name) == 0)
2479 static struct table *
2480 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
2482 struct table *table = NULL;
2484 TAILQ_FOREACH(table, &p->tables, node)
2485 if (table->id == id)
2492 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
2494 enum rte_swx_table_match_type match_type,
2495 struct rte_swx_table_ops *ops)
2497 struct table_type *elem;
2501 CHECK_NAME(name, EINVAL);
2502 CHECK(!table_type_find(p, name), EEXIST);
2505 CHECK(ops->create, EINVAL);
2506 CHECK(ops->lkp, EINVAL);
2507 CHECK(ops->free, EINVAL);
2509 /* Node allocation. */
2510 elem = calloc(1, sizeof(struct table_type));
2511 CHECK(elem, ENOMEM);
2513 /* Node initialization. */
2514 strcpy(elem->name, name);
2515 elem->match_type = match_type;
2516 memcpy(&elem->ops, ops, sizeof(*ops));
2518 /* Node add to tailq. */
2519 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
2524 static enum rte_swx_table_match_type
2525 table_match_type_resolve(struct rte_swx_match_field_params *fields,
2530 for (i = 0; i < n_fields; i++)
2531 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
2535 return RTE_SWX_TABLE_MATCH_EXACT;
2537 if ((i == n_fields - 1) &&
2538 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
2539 return RTE_SWX_TABLE_MATCH_LPM;
2541 return RTE_SWX_TABLE_MATCH_WILDCARD;
2545 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
2547 struct rte_swx_pipeline_table_params *params,
2548 const char *recommended_table_type_name,
2552 struct table_type *type;
2554 struct action *default_action;
2555 struct header *header = NULL;
2557 uint32_t offset_prev = 0, action_data_size_max = 0, i;
2561 CHECK_NAME(name, EINVAL);
2562 CHECK(!table_find(p, name), EEXIST);
2564 CHECK(params, EINVAL);
2567 CHECK(!params->n_fields || params->fields, EINVAL);
2568 for (i = 0; i < params->n_fields; i++) {
2569 struct rte_swx_match_field_params *field = ¶ms->fields[i];
2571 struct field *hf, *mf;
2574 CHECK_NAME(field->name, EINVAL);
2576 hf = header_field_parse(p, field->name, &h);
2577 mf = metadata_field_parse(p, field->name);
2578 CHECK(hf || mf, EINVAL);
2580 offset = hf ? hf->offset : mf->offset;
2583 is_header = hf ? 1 : 0;
2584 header = hf ? h : NULL;
2585 offset_prev = offset;
2590 CHECK((is_header && hf && (h->id == header->id)) ||
2591 (!is_header && mf), EINVAL);
2593 CHECK(offset > offset_prev, EINVAL);
2594 offset_prev = offset;
2597 /* Action checks. */
2598 CHECK(params->n_actions, EINVAL);
2599 CHECK(params->action_names, EINVAL);
2600 for (i = 0; i < params->n_actions; i++) {
2601 const char *action_name = params->action_names[i];
2603 uint32_t action_data_size;
2605 CHECK(action_name, EINVAL);
2607 a = action_find(p, action_name);
2610 action_data_size = a->st ? a->st->n_bits / 8 : 0;
2611 if (action_data_size > action_data_size_max)
2612 action_data_size_max = action_data_size;
2615 CHECK(params->default_action_name, EINVAL);
2616 for (i = 0; i < p->n_actions; i++)
2617 if (!strcmp(params->action_names[i],
2618 params->default_action_name))
2620 CHECK(i < params->n_actions, EINVAL);
2621 default_action = action_find(p, params->default_action_name);
2622 CHECK((default_action->st && params->default_action_data) ||
2623 !params->default_action_data, EINVAL);
2625 /* Table type checks. */
2626 if (params->n_fields) {
2627 enum rte_swx_table_match_type match_type;
2629 match_type = table_match_type_resolve(params->fields,
2631 type = table_type_resolve(p,
2632 recommended_table_type_name,
2634 CHECK(type, EINVAL);
2639 /* Memory allocation. */
2640 t = calloc(1, sizeof(struct table));
2643 t->fields = calloc(params->n_fields, sizeof(struct match_field));
2649 t->actions = calloc(params->n_actions, sizeof(struct action *));
2656 if (action_data_size_max) {
2657 t->default_action_data = calloc(1, action_data_size_max);
2658 if (!t->default_action_data) {
2666 /* Node initialization. */
2667 strcpy(t->name, name);
2668 if (args && args[0])
2669 strcpy(t->args, args);
2672 for (i = 0; i < params->n_fields; i++) {
2673 struct rte_swx_match_field_params *field = ¶ms->fields[i];
2674 struct match_field *f = &t->fields[i];
2676 f->match_type = field->match_type;
2677 f->field = is_header ?
2678 header_field_parse(p, field->name, NULL) :
2679 metadata_field_parse(p, field->name);
2681 t->n_fields = params->n_fields;
2682 t->is_header = is_header;
2685 for (i = 0; i < params->n_actions; i++)
2686 t->actions[i] = action_find(p, params->action_names[i]);
2687 t->default_action = default_action;
2688 if (default_action->st)
2689 memcpy(t->default_action_data,
2690 params->default_action_data,
2691 default_action->st->n_bits / 8);
2692 t->n_actions = params->n_actions;
2693 t->default_action_is_const = params->default_action_is_const;
2694 t->action_data_size_max = action_data_size_max;
2697 t->id = p->n_tables;
2699 /* Node add to tailq. */
2700 TAILQ_INSERT_TAIL(&p->tables, t, node);
2706 static struct rte_swx_table_params *
2707 table_params_get(struct table *table)
2709 struct rte_swx_table_params *params;
2710 struct field *first, *last;
2712 uint32_t key_size, key_offset, action_data_size, i;
2714 /* Memory allocation. */
2715 params = calloc(1, sizeof(struct rte_swx_table_params));
2719 /* Key offset and size. */
2720 first = table->fields[0].field;
2721 last = table->fields[table->n_fields - 1].field;
2722 key_offset = first->offset / 8;
2723 key_size = (last->offset + last->n_bits - first->offset) / 8;
2725 /* Memory allocation. */
2726 key_mask = calloc(1, key_size);
2733 for (i = 0; i < table->n_fields; i++) {
2734 struct field *f = table->fields[i].field;
2735 uint32_t start = (f->offset - first->offset) / 8;
2736 size_t size = f->n_bits / 8;
2738 memset(&key_mask[start], 0xFF, size);
2741 /* Action data size. */
2742 action_data_size = 0;
2743 for (i = 0; i < table->n_actions; i++) {
2744 struct action *action = table->actions[i];
2745 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
2747 if (ads > action_data_size)
2748 action_data_size = ads;
2752 params->match_type = table->type->match_type;
2753 params->key_size = key_size;
2754 params->key_offset = key_offset;
2755 params->key_mask0 = key_mask;
2756 params->action_data_size = action_data_size;
2757 params->n_keys_max = table->size;
2763 table_params_free(struct rte_swx_table_params *params)
2768 free(params->key_mask0);
2773 table_state_build(struct rte_swx_pipeline *p)
2775 struct table *table;
2777 p->table_state = calloc(p->n_tables,
2778 sizeof(struct rte_swx_table_state));
2779 CHECK(p->table_state, ENOMEM);
2781 TAILQ_FOREACH(table, &p->tables, node) {
2782 struct rte_swx_table_state *ts = &p->table_state[table->id];
2785 struct rte_swx_table_params *params;
2788 params = table_params_get(table);
2789 CHECK(params, ENOMEM);
2791 ts->obj = table->type->ops.create(params,
2796 table_params_free(params);
2797 CHECK(ts->obj, ENODEV);
2800 /* ts->default_action_data. */
2801 if (table->action_data_size_max) {
2802 ts->default_action_data =
2803 malloc(table->action_data_size_max);
2804 CHECK(ts->default_action_data, ENOMEM);
2806 memcpy(ts->default_action_data,
2807 table->default_action_data,
2808 table->action_data_size_max);
2811 /* ts->default_action_id. */
2812 ts->default_action_id = table->default_action->id;
2819 table_state_build_free(struct rte_swx_pipeline *p)
2823 if (!p->table_state)
2826 for (i = 0; i < p->n_tables; i++) {
2827 struct rte_swx_table_state *ts = &p->table_state[i];
2828 struct table *table = table_find_by_id(p, i);
2831 if (table->type && ts->obj)
2832 table->type->ops.free(ts->obj);
2834 /* ts->default_action_data. */
2835 free(ts->default_action_data);
2838 free(p->table_state);
2839 p->table_state = NULL;
2843 table_state_free(struct rte_swx_pipeline *p)
2845 table_state_build_free(p);
2849 table_stub_lkp(void *table __rte_unused,
2850 void *mailbox __rte_unused,
2851 uint8_t **key __rte_unused,
2852 uint64_t *action_id __rte_unused,
2853 uint8_t **action_data __rte_unused,
2857 return 1; /* DONE. */
2861 table_build(struct rte_swx_pipeline *p)
2865 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2866 struct thread *t = &p->threads[i];
2867 struct table *table;
2869 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
2870 CHECK(t->tables, ENOMEM);
2872 TAILQ_FOREACH(table, &p->tables, node) {
2873 struct table_runtime *r = &t->tables[table->id];
2878 size = table->type->ops.mailbox_size_get();
2881 r->func = table->type->ops.lkp;
2885 r->mailbox = calloc(1, size);
2886 CHECK(r->mailbox, ENOMEM);
2890 r->key = table->is_header ?
2891 &t->structs[table->header->struct_id] :
2892 &t->structs[p->metadata_struct_id];
2894 r->func = table_stub_lkp;
2903 table_build_free(struct rte_swx_pipeline *p)
2907 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2908 struct thread *t = &p->threads[i];
2914 for (j = 0; j < p->n_tables; j++) {
2915 struct table_runtime *r = &t->tables[j];
2926 table_free(struct rte_swx_pipeline *p)
2928 table_build_free(p);
2934 elem = TAILQ_FIRST(&p->tables);
2938 TAILQ_REMOVE(&p->tables, elem, node);
2940 free(elem->actions);
2941 free(elem->default_action_data);
2947 struct table_type *elem;
2949 elem = TAILQ_FIRST(&p->table_types);
2953 TAILQ_REMOVE(&p->table_types, elem, node);
2962 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
2964 struct rte_swx_pipeline *pipeline;
2966 /* Check input parameters. */
2969 /* Memory allocation. */
2970 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
2971 CHECK(pipeline, ENOMEM);
2973 /* Initialization. */
2974 TAILQ_INIT(&pipeline->struct_types);
2975 TAILQ_INIT(&pipeline->port_in_types);
2976 TAILQ_INIT(&pipeline->ports_in);
2977 TAILQ_INIT(&pipeline->port_out_types);
2978 TAILQ_INIT(&pipeline->ports_out);
2979 TAILQ_INIT(&pipeline->extern_types);
2980 TAILQ_INIT(&pipeline->extern_objs);
2981 TAILQ_INIT(&pipeline->extern_funcs);
2982 TAILQ_INIT(&pipeline->headers);
2983 TAILQ_INIT(&pipeline->actions);
2984 TAILQ_INIT(&pipeline->table_types);
2985 TAILQ_INIT(&pipeline->tables);
2987 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
2988 pipeline->numa_node = numa_node;
2995 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
3000 free(p->instructions);
3002 table_state_free(p);
3007 extern_func_free(p);
3017 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
3018 const char **instructions,
3019 uint32_t n_instructions)
3024 err = instruction_config(p, NULL, instructions, n_instructions);
3028 /* Thread instruction pointer reset. */
3029 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3030 struct thread *t = &p->threads[i];
3032 thread_ip_reset(p, t);
3039 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
3044 CHECK(p->build_done == 0, EEXIST);
3046 status = port_in_build(p);
3050 status = port_out_build(p);
3054 status = struct_build(p);
3058 status = extern_obj_build(p);
3062 status = extern_func_build(p);
3066 status = header_build(p);
3070 status = metadata_build(p);
3074 status = action_build(p);
3078 status = table_build(p);
3082 status = table_state_build(p);
3090 table_state_build_free(p);
3091 table_build_free(p);
3092 action_build_free(p);
3093 metadata_build_free(p);
3094 header_build_free(p);
3095 extern_func_build_free(p);
3096 extern_obj_build_free(p);
3097 port_out_build_free(p);
3098 port_in_build_free(p);
3099 struct_build_free(p);
3105 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
3109 for (i = 0; i < n_instructions; i++)
3117 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
3118 struct rte_swx_table_state **table_state)
3120 if (!p || !table_state || !p->build_done)
3123 *table_state = p->table_state;
3128 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
3129 struct rte_swx_table_state *table_state)
3131 if (!p || !table_state || !p->build_done)
3134 p->table_state = table_state;