1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
260 struct instr_operand {
275 uint8_t header_id[8];
276 uint8_t struct_id[8];
281 struct instr_hdr_validity {
285 struct instr_dst_src {
286 struct instr_operand dst;
288 struct instr_operand src;
294 enum instruction_type type;
297 struct instr_hdr_validity valid;
298 struct instr_dst_src mov;
302 struct instruction_data {
303 char label[RTE_SWX_NAME_SIZE];
304 char jmp_label[RTE_SWX_NAME_SIZE];
305 uint32_t n_users; /* user = jmp instruction to this instruction. */
313 TAILQ_ENTRY(action) node;
314 char name[RTE_SWX_NAME_SIZE];
315 struct struct_type *st;
316 struct instruction *instructions;
317 uint32_t n_instructions;
321 TAILQ_HEAD(action_tailq, action);
327 TAILQ_ENTRY(table_type) node;
328 char name[RTE_SWX_NAME_SIZE];
329 enum rte_swx_table_match_type match_type;
330 struct rte_swx_table_ops ops;
333 TAILQ_HEAD(table_type_tailq, table_type);
336 enum rte_swx_table_match_type match_type;
341 TAILQ_ENTRY(table) node;
342 char name[RTE_SWX_NAME_SIZE];
343 char args[RTE_SWX_NAME_SIZE];
344 struct table_type *type; /* NULL when n_fields == 0. */
347 struct match_field *fields;
349 int is_header; /* Only valid when n_fields > 0. */
350 struct header *header; /* Only valid when n_fields > 0. */
353 struct action **actions;
354 struct action *default_action;
355 uint8_t *default_action_data;
357 int default_action_is_const;
358 uint32_t action_data_size_max;
364 TAILQ_HEAD(table_tailq, table);
366 struct table_runtime {
367 rte_swx_table_lookup_t func;
377 struct rte_swx_pkt pkt;
383 /* Packet headers. */
384 struct header_runtime *headers; /* Extracted or generated headers. */
385 struct header_out_runtime *headers_out; /* Emitted headers. */
386 uint8_t *header_storage;
387 uint8_t *header_out_storage;
388 uint64_t valid_headers;
389 uint32_t n_headers_out;
391 /* Packet meta-data. */
395 struct table_runtime *tables;
396 struct rte_swx_table_state *table_state;
398 int hit; /* 0 = Miss, 1 = Hit. */
400 /* Extern objects and functions. */
401 struct extern_obj_runtime *extern_objs;
402 struct extern_func_runtime *extern_funcs;
405 struct instruction *ip;
406 struct instruction *ret;
409 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
410 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
411 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
413 #define MOV(thread, ip) \
415 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
416 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
417 uint64_t dst64 = *dst64_ptr; \
418 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
420 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
421 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
422 uint64_t src64 = *src64_ptr; \
423 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
424 uint64_t src = src64 & src64_mask; \
426 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
429 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
431 #define MOV_S(thread, ip) \
433 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
434 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
435 uint64_t dst64 = *dst64_ptr; \
436 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
438 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
439 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
440 uint64_t src64 = *src64_ptr; \
441 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
443 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
452 #define MOV_I(thread, ip) \
454 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
455 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
456 uint64_t dst64 = *dst64_ptr; \
457 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
459 uint64_t src = (ip)->mov.src_val; \
461 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
464 #define METADATA_READ(thread, offset, n_bits) \
466 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
467 uint64_t m64 = *m64_ptr; \
468 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
472 #define METADATA_WRITE(thread, offset, n_bits, value) \
474 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
475 uint64_t m64 = *m64_ptr; \
476 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
478 uint64_t m_new = value; \
480 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
483 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
484 #define RTE_SWX_PIPELINE_THREADS_MAX 16
487 struct rte_swx_pipeline {
488 struct struct_type_tailq struct_types;
489 struct port_in_type_tailq port_in_types;
490 struct port_in_tailq ports_in;
491 struct port_out_type_tailq port_out_types;
492 struct port_out_tailq ports_out;
493 struct extern_type_tailq extern_types;
494 struct extern_obj_tailq extern_objs;
495 struct extern_func_tailq extern_funcs;
496 struct header_tailq headers;
497 struct struct_type *metadata_st;
498 uint32_t metadata_struct_id;
499 struct action_tailq actions;
500 struct table_type_tailq table_types;
501 struct table_tailq tables;
503 struct port_in_runtime *in;
504 struct port_out_runtime *out;
505 struct instruction **action_instructions;
506 struct rte_swx_table_state *table_state;
507 struct instruction *instructions;
508 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
512 uint32_t n_ports_out;
513 uint32_t n_extern_objs;
514 uint32_t n_extern_funcs;
520 uint32_t n_instructions;
528 static struct struct_type *
529 struct_type_find(struct rte_swx_pipeline *p, const char *name)
531 struct struct_type *elem;
533 TAILQ_FOREACH(elem, &p->struct_types, node)
534 if (strcmp(elem->name, name) == 0)
540 static struct field *
541 struct_type_field_find(struct struct_type *st, const char *name)
545 for (i = 0; i < st->n_fields; i++) {
546 struct field *f = &st->fields[i];
548 if (strcmp(f->name, name) == 0)
556 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
558 struct rte_swx_field_params *fields,
561 struct struct_type *st;
565 CHECK_NAME(name, EINVAL);
566 CHECK(fields, EINVAL);
567 CHECK(n_fields, EINVAL);
569 for (i = 0; i < n_fields; i++) {
570 struct rte_swx_field_params *f = &fields[i];
573 CHECK_NAME(f->name, EINVAL);
574 CHECK(f->n_bits, EINVAL);
575 CHECK(f->n_bits <= 64, EINVAL);
576 CHECK((f->n_bits & 7) == 0, EINVAL);
578 for (j = 0; j < i; j++) {
579 struct rte_swx_field_params *f_prev = &fields[j];
581 CHECK(strcmp(f->name, f_prev->name), EINVAL);
585 CHECK(!struct_type_find(p, name), EEXIST);
587 /* Node allocation. */
588 st = calloc(1, sizeof(struct struct_type));
591 st->fields = calloc(n_fields, sizeof(struct field));
597 /* Node initialization. */
598 strcpy(st->name, name);
599 for (i = 0; i < n_fields; i++) {
600 struct field *dst = &st->fields[i];
601 struct rte_swx_field_params *src = &fields[i];
603 strcpy(dst->name, src->name);
604 dst->n_bits = src->n_bits;
605 dst->offset = st->n_bits;
607 st->n_bits += src->n_bits;
609 st->n_fields = n_fields;
611 /* Node add to tailq. */
612 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
618 struct_build(struct rte_swx_pipeline *p)
622 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
623 struct thread *t = &p->threads[i];
625 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
626 CHECK(t->structs, ENOMEM);
633 struct_build_free(struct rte_swx_pipeline *p)
637 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
638 struct thread *t = &p->threads[i];
646 struct_free(struct rte_swx_pipeline *p)
648 struct_build_free(p);
652 struct struct_type *elem;
654 elem = TAILQ_FIRST(&p->struct_types);
658 TAILQ_REMOVE(&p->struct_types, elem, node);
667 static struct port_in_type *
668 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
670 struct port_in_type *elem;
675 TAILQ_FOREACH(elem, &p->port_in_types, node)
676 if (strcmp(elem->name, name) == 0)
683 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
685 struct rte_swx_port_in_ops *ops)
687 struct port_in_type *elem;
690 CHECK_NAME(name, EINVAL);
692 CHECK(ops->create, EINVAL);
693 CHECK(ops->free, EINVAL);
694 CHECK(ops->pkt_rx, EINVAL);
695 CHECK(ops->stats_read, EINVAL);
697 CHECK(!port_in_type_find(p, name), EEXIST);
699 /* Node allocation. */
700 elem = calloc(1, sizeof(struct port_in_type));
703 /* Node initialization. */
704 strcpy(elem->name, name);
705 memcpy(&elem->ops, ops, sizeof(*ops));
707 /* Node add to tailq. */
708 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
713 static struct port_in *
714 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
716 struct port_in *port;
718 TAILQ_FOREACH(port, &p->ports_in, node)
719 if (port->id == port_id)
726 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
728 const char *port_type_name,
731 struct port_in_type *type = NULL;
732 struct port_in *port = NULL;
737 CHECK(!port_in_find(p, port_id), EINVAL);
739 CHECK_NAME(port_type_name, EINVAL);
740 type = port_in_type_find(p, port_type_name);
743 obj = type->ops.create(args);
746 /* Node allocation. */
747 port = calloc(1, sizeof(struct port_in));
750 /* Node initialization. */
755 /* Node add to tailq. */
756 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
757 if (p->n_ports_in < port_id + 1)
758 p->n_ports_in = port_id + 1;
764 port_in_build(struct rte_swx_pipeline *p)
766 struct port_in *port;
769 CHECK(p->n_ports_in, EINVAL);
770 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
772 for (i = 0; i < p->n_ports_in; i++)
773 CHECK(port_in_find(p, i), EINVAL);
775 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
776 CHECK(p->in, ENOMEM);
778 TAILQ_FOREACH(port, &p->ports_in, node) {
779 struct port_in_runtime *in = &p->in[port->id];
781 in->pkt_rx = port->type->ops.pkt_rx;
789 port_in_build_free(struct rte_swx_pipeline *p)
796 port_in_free(struct rte_swx_pipeline *p)
798 port_in_build_free(p);
802 struct port_in *port;
804 port = TAILQ_FIRST(&p->ports_in);
808 TAILQ_REMOVE(&p->ports_in, port, node);
809 port->type->ops.free(port->obj);
813 /* Input port types. */
815 struct port_in_type *elem;
817 elem = TAILQ_FIRST(&p->port_in_types);
821 TAILQ_REMOVE(&p->port_in_types, elem, node);
829 static struct port_out_type *
830 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
832 struct port_out_type *elem;
837 TAILQ_FOREACH(elem, &p->port_out_types, node)
838 if (!strcmp(elem->name, name))
845 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
847 struct rte_swx_port_out_ops *ops)
849 struct port_out_type *elem;
852 CHECK_NAME(name, EINVAL);
854 CHECK(ops->create, EINVAL);
855 CHECK(ops->free, EINVAL);
856 CHECK(ops->pkt_tx, EINVAL);
857 CHECK(ops->stats_read, EINVAL);
859 CHECK(!port_out_type_find(p, name), EEXIST);
861 /* Node allocation. */
862 elem = calloc(1, sizeof(struct port_out_type));
865 /* Node initialization. */
866 strcpy(elem->name, name);
867 memcpy(&elem->ops, ops, sizeof(*ops));
869 /* Node add to tailq. */
870 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
875 static struct port_out *
876 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
878 struct port_out *port;
880 TAILQ_FOREACH(port, &p->ports_out, node)
881 if (port->id == port_id)
888 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
890 const char *port_type_name,
893 struct port_out_type *type = NULL;
894 struct port_out *port = NULL;
899 CHECK(!port_out_find(p, port_id), EINVAL);
901 CHECK_NAME(port_type_name, EINVAL);
902 type = port_out_type_find(p, port_type_name);
905 obj = type->ops.create(args);
908 /* Node allocation. */
909 port = calloc(1, sizeof(struct port_out));
912 /* Node initialization. */
917 /* Node add to tailq. */
918 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
919 if (p->n_ports_out < port_id + 1)
920 p->n_ports_out = port_id + 1;
926 port_out_build(struct rte_swx_pipeline *p)
928 struct port_out *port;
931 CHECK(p->n_ports_out, EINVAL);
933 for (i = 0; i < p->n_ports_out; i++)
934 CHECK(port_out_find(p, i), EINVAL);
936 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
937 CHECK(p->out, ENOMEM);
939 TAILQ_FOREACH(port, &p->ports_out, node) {
940 struct port_out_runtime *out = &p->out[port->id];
942 out->pkt_tx = port->type->ops.pkt_tx;
943 out->flush = port->type->ops.flush;
944 out->obj = port->obj;
951 port_out_build_free(struct rte_swx_pipeline *p)
958 port_out_free(struct rte_swx_pipeline *p)
960 port_out_build_free(p);
964 struct port_out *port;
966 port = TAILQ_FIRST(&p->ports_out);
970 TAILQ_REMOVE(&p->ports_out, port, node);
971 port->type->ops.free(port->obj);
975 /* Output port types. */
977 struct port_out_type *elem;
979 elem = TAILQ_FIRST(&p->port_out_types);
983 TAILQ_REMOVE(&p->port_out_types, elem, node);
991 static struct extern_type *
992 extern_type_find(struct rte_swx_pipeline *p, const char *name)
994 struct extern_type *elem;
996 TAILQ_FOREACH(elem, &p->extern_types, node)
997 if (strcmp(elem->name, name) == 0)
1003 static struct extern_type_member_func *
1004 extern_type_member_func_find(struct extern_type *type, const char *name)
1006 struct extern_type_member_func *elem;
1008 TAILQ_FOREACH(elem, &type->funcs, node)
1009 if (strcmp(elem->name, name) == 0)
1015 static struct extern_obj *
1016 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1018 struct extern_obj *elem;
1020 TAILQ_FOREACH(elem, &p->extern_objs, node)
1021 if (strcmp(elem->name, name) == 0)
1027 static struct field *
1028 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1030 struct extern_obj **object)
1032 struct extern_obj *obj;
1034 char *obj_name, *field_name;
1036 if ((name[0] != 'e') || (name[1] != '.'))
1039 obj_name = strdup(&name[2]);
1043 field_name = strchr(obj_name, '.');
1052 obj = extern_obj_find(p, obj_name);
1058 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1072 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1074 const char *mailbox_struct_type_name,
1075 rte_swx_extern_type_constructor_t constructor,
1076 rte_swx_extern_type_destructor_t destructor)
1078 struct extern_type *elem;
1079 struct struct_type *mailbox_struct_type;
1083 CHECK_NAME(name, EINVAL);
1084 CHECK(!extern_type_find(p, name), EEXIST);
1086 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1087 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1088 CHECK(mailbox_struct_type, EINVAL);
1090 CHECK(constructor, EINVAL);
1091 CHECK(destructor, EINVAL);
1093 /* Node allocation. */
1094 elem = calloc(1, sizeof(struct extern_type));
1095 CHECK(elem, ENOMEM);
1097 /* Node initialization. */
1098 strcpy(elem->name, name);
1099 elem->mailbox_struct_type = mailbox_struct_type;
1100 elem->constructor = constructor;
1101 elem->destructor = destructor;
1102 TAILQ_INIT(&elem->funcs);
1104 /* Node add to tailq. */
1105 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1111 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1112 const char *extern_type_name,
1114 rte_swx_extern_type_member_func_t member_func)
1116 struct extern_type *type;
1117 struct extern_type_member_func *type_member;
1121 CHECK(extern_type_name, EINVAL);
1122 type = extern_type_find(p, extern_type_name);
1123 CHECK(type, EINVAL);
1124 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1126 CHECK(name, EINVAL);
1127 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1129 CHECK(member_func, EINVAL);
1131 /* Node allocation. */
1132 type_member = calloc(1, sizeof(struct extern_type_member_func));
1133 CHECK(type_member, ENOMEM);
1135 /* Node initialization. */
1136 strcpy(type_member->name, name);
1137 type_member->func = member_func;
1138 type_member->id = type->n_funcs;
1140 /* Node add to tailq. */
1141 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1148 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1149 const char *extern_type_name,
1153 struct extern_type *type;
1154 struct extern_obj *obj;
1159 CHECK_NAME(extern_type_name, EINVAL);
1160 type = extern_type_find(p, extern_type_name);
1161 CHECK(type, EINVAL);
1163 CHECK_NAME(name, EINVAL);
1164 CHECK(!extern_obj_find(p, name), EEXIST);
1166 /* Node allocation. */
1167 obj = calloc(1, sizeof(struct extern_obj));
1170 /* Object construction. */
1171 obj_handle = type->constructor(args);
1177 /* Node initialization. */
1178 strcpy(obj->name, name);
1180 obj->obj = obj_handle;
1181 obj->struct_id = p->n_structs;
1182 obj->id = p->n_extern_objs;
1184 /* Node add to tailq. */
1185 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1193 extern_obj_build(struct rte_swx_pipeline *p)
1197 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1198 struct thread *t = &p->threads[i];
1199 struct extern_obj *obj;
1201 t->extern_objs = calloc(p->n_extern_objs,
1202 sizeof(struct extern_obj_runtime));
1203 CHECK(t->extern_objs, ENOMEM);
1205 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1206 struct extern_obj_runtime *r =
1207 &t->extern_objs[obj->id];
1208 struct extern_type_member_func *func;
1209 uint32_t mailbox_size =
1210 obj->type->mailbox_struct_type->n_bits / 8;
1214 r->mailbox = calloc(1, mailbox_size);
1215 CHECK(r->mailbox, ENOMEM);
1217 TAILQ_FOREACH(func, &obj->type->funcs, node)
1218 r->funcs[func->id] = func->func;
1220 t->structs[obj->struct_id] = r->mailbox;
1228 extern_obj_build_free(struct rte_swx_pipeline *p)
1232 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1233 struct thread *t = &p->threads[i];
1236 if (!t->extern_objs)
1239 for (j = 0; j < p->n_extern_objs; j++) {
1240 struct extern_obj_runtime *r = &t->extern_objs[j];
1245 free(t->extern_objs);
1246 t->extern_objs = NULL;
1251 extern_obj_free(struct rte_swx_pipeline *p)
1253 extern_obj_build_free(p);
1255 /* Extern objects. */
1257 struct extern_obj *elem;
1259 elem = TAILQ_FIRST(&p->extern_objs);
1263 TAILQ_REMOVE(&p->extern_objs, elem, node);
1265 elem->type->destructor(elem->obj);
1271 struct extern_type *elem;
1273 elem = TAILQ_FIRST(&p->extern_types);
1277 TAILQ_REMOVE(&p->extern_types, elem, node);
1280 struct extern_type_member_func *func;
1282 func = TAILQ_FIRST(&elem->funcs);
1286 TAILQ_REMOVE(&elem->funcs, func, node);
1297 static struct extern_func *
1298 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1300 struct extern_func *elem;
1302 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1303 if (strcmp(elem->name, name) == 0)
1309 static struct field *
1310 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1312 struct extern_func **function)
1314 struct extern_func *func;
1316 char *func_name, *field_name;
1318 if ((name[0] != 'f') || (name[1] != '.'))
1321 func_name = strdup(&name[2]);
1325 field_name = strchr(func_name, '.');
1334 func = extern_func_find(p, func_name);
1340 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1354 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1356 const char *mailbox_struct_type_name,
1357 rte_swx_extern_func_t func)
1359 struct extern_func *f;
1360 struct struct_type *mailbox_struct_type;
1364 CHECK_NAME(name, EINVAL);
1365 CHECK(!extern_func_find(p, name), EEXIST);
1367 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1368 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1369 CHECK(mailbox_struct_type, EINVAL);
1371 CHECK(func, EINVAL);
1373 /* Node allocation. */
1374 f = calloc(1, sizeof(struct extern_func));
1375 CHECK(func, ENOMEM);
1377 /* Node initialization. */
1378 strcpy(f->name, name);
1379 f->mailbox_struct_type = mailbox_struct_type;
1381 f->struct_id = p->n_structs;
1382 f->id = p->n_extern_funcs;
1384 /* Node add to tailq. */
1385 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1386 p->n_extern_funcs++;
1393 extern_func_build(struct rte_swx_pipeline *p)
1397 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1398 struct thread *t = &p->threads[i];
1399 struct extern_func *func;
1401 /* Memory allocation. */
1402 t->extern_funcs = calloc(p->n_extern_funcs,
1403 sizeof(struct extern_func_runtime));
1404 CHECK(t->extern_funcs, ENOMEM);
1406 /* Extern function. */
1407 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1408 struct extern_func_runtime *r =
1409 &t->extern_funcs[func->id];
1410 uint32_t mailbox_size =
1411 func->mailbox_struct_type->n_bits / 8;
1413 r->func = func->func;
1415 r->mailbox = calloc(1, mailbox_size);
1416 CHECK(r->mailbox, ENOMEM);
1418 t->structs[func->struct_id] = r->mailbox;
1426 extern_func_build_free(struct rte_swx_pipeline *p)
1430 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1431 struct thread *t = &p->threads[i];
1434 if (!t->extern_funcs)
1437 for (j = 0; j < p->n_extern_funcs; j++) {
1438 struct extern_func_runtime *r = &t->extern_funcs[j];
1443 free(t->extern_funcs);
1444 t->extern_funcs = NULL;
1449 extern_func_free(struct rte_swx_pipeline *p)
1451 extern_func_build_free(p);
1454 struct extern_func *elem;
1456 elem = TAILQ_FIRST(&p->extern_funcs);
1460 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1468 static struct header *
1469 header_find(struct rte_swx_pipeline *p, const char *name)
1471 struct header *elem;
1473 TAILQ_FOREACH(elem, &p->headers, node)
1474 if (strcmp(elem->name, name) == 0)
1480 static struct header *
1481 header_parse(struct rte_swx_pipeline *p,
1484 if (name[0] != 'h' || name[1] != '.')
1487 return header_find(p, &name[2]);
1490 static struct field *
1491 header_field_parse(struct rte_swx_pipeline *p,
1493 struct header **header)
1497 char *header_name, *field_name;
1499 if ((name[0] != 'h') || (name[1] != '.'))
1502 header_name = strdup(&name[2]);
1506 field_name = strchr(header_name, '.');
1515 h = header_find(p, header_name);
1521 f = struct_type_field_find(h->st, field_name);
1535 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1537 const char *struct_type_name)
1539 struct struct_type *st;
1541 size_t n_headers_max;
1544 CHECK_NAME(name, EINVAL);
1545 CHECK_NAME(struct_type_name, EINVAL);
1547 CHECK(!header_find(p, name), EEXIST);
1549 st = struct_type_find(p, struct_type_name);
1552 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1553 CHECK(p->n_headers < n_headers_max, ENOSPC);
1555 /* Node allocation. */
1556 h = calloc(1, sizeof(struct header));
1559 /* Node initialization. */
1560 strcpy(h->name, name);
1562 h->struct_id = p->n_structs;
1563 h->id = p->n_headers;
1565 /* Node add to tailq. */
1566 TAILQ_INSERT_TAIL(&p->headers, h, node);
1574 header_build(struct rte_swx_pipeline *p)
1577 uint32_t n_bytes = 0, i;
1579 TAILQ_FOREACH(h, &p->headers, node) {
1580 n_bytes += h->st->n_bits / 8;
1583 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1584 struct thread *t = &p->threads[i];
1585 uint32_t offset = 0;
1587 t->headers = calloc(p->n_headers,
1588 sizeof(struct header_runtime));
1589 CHECK(t->headers, ENOMEM);
1591 t->headers_out = calloc(p->n_headers,
1592 sizeof(struct header_out_runtime));
1593 CHECK(t->headers_out, ENOMEM);
1595 t->header_storage = calloc(1, n_bytes);
1596 CHECK(t->header_storage, ENOMEM);
1598 t->header_out_storage = calloc(1, n_bytes);
1599 CHECK(t->header_out_storage, ENOMEM);
1601 TAILQ_FOREACH(h, &p->headers, node) {
1602 uint8_t *header_storage;
1604 header_storage = &t->header_storage[offset];
1605 offset += h->st->n_bits / 8;
1607 t->headers[h->id].ptr0 = header_storage;
1608 t->structs[h->struct_id] = header_storage;
1616 header_build_free(struct rte_swx_pipeline *p)
1620 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1621 struct thread *t = &p->threads[i];
1623 free(t->headers_out);
1624 t->headers_out = NULL;
1629 free(t->header_out_storage);
1630 t->header_out_storage = NULL;
1632 free(t->header_storage);
1633 t->header_storage = NULL;
1638 header_free(struct rte_swx_pipeline *p)
1640 header_build_free(p);
1643 struct header *elem;
1645 elem = TAILQ_FIRST(&p->headers);
1649 TAILQ_REMOVE(&p->headers, elem, node);
1657 static struct field *
1658 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1660 if (!p->metadata_st)
1663 if (name[0] != 'm' || name[1] != '.')
1666 return struct_type_field_find(p->metadata_st, &name[2]);
1670 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1671 const char *struct_type_name)
1673 struct struct_type *st = NULL;
1677 CHECK_NAME(struct_type_name, EINVAL);
1678 st = struct_type_find(p, struct_type_name);
1680 CHECK(!p->metadata_st, EINVAL);
1682 p->metadata_st = st;
1683 p->metadata_struct_id = p->n_structs;
1691 metadata_build(struct rte_swx_pipeline *p)
1693 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1696 /* Thread-level initialization. */
1697 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1698 struct thread *t = &p->threads[i];
1701 metadata = calloc(1, n_bytes);
1702 CHECK(metadata, ENOMEM);
1704 t->metadata = metadata;
1705 t->structs[p->metadata_struct_id] = metadata;
1712 metadata_build_free(struct rte_swx_pipeline *p)
1716 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1717 struct thread *t = &p->threads[i];
1725 metadata_free(struct rte_swx_pipeline *p)
1727 metadata_build_free(p);
1733 static struct field *
1734 action_field_parse(struct action *action, const char *name);
1736 static struct field *
1737 struct_field_parse(struct rte_swx_pipeline *p,
1738 struct action *action,
1740 uint32_t *struct_id)
1747 struct header *header;
1749 f = header_field_parse(p, name, &header);
1753 *struct_id = header->struct_id;
1759 f = metadata_field_parse(p, name);
1763 *struct_id = p->metadata_struct_id;
1772 f = action_field_parse(action, name);
1782 struct extern_obj *obj;
1784 f = extern_obj_mailbox_field_parse(p, name, &obj);
1788 *struct_id = obj->struct_id;
1794 struct extern_func *func;
1796 f = extern_func_mailbox_field_parse(p, name, &func);
1800 *struct_id = func->struct_id;
1810 pipeline_port_inc(struct rte_swx_pipeline *p)
1812 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1816 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1818 t->ip = p->instructions;
1822 thread_ip_inc(struct rte_swx_pipeline *p);
1825 thread_ip_inc(struct rte_swx_pipeline *p)
1827 struct thread *t = &p->threads[p->thread_id];
1833 thread_ip_inc_cond(struct thread *t, int cond)
1839 thread_yield(struct rte_swx_pipeline *p)
1841 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1848 instr_rx_translate(struct rte_swx_pipeline *p,
1849 struct action *action,
1852 struct instruction *instr,
1853 struct instruction_data *data __rte_unused)
1857 CHECK(!action, EINVAL);
1858 CHECK(n_tokens == 2, EINVAL);
1860 f = metadata_field_parse(p, tokens[1]);
1863 instr->type = INSTR_RX;
1864 instr->io.io.offset = f->offset / 8;
1865 instr->io.io.n_bits = f->n_bits;
1870 instr_rx_exec(struct rte_swx_pipeline *p);
1873 instr_rx_exec(struct rte_swx_pipeline *p)
1875 struct thread *t = &p->threads[p->thread_id];
1876 struct instruction *ip = t->ip;
1877 struct port_in_runtime *port = &p->in[p->port_id];
1878 struct rte_swx_pkt *pkt = &t->pkt;
1882 pkt_received = port->pkt_rx(port->obj, pkt);
1883 t->ptr = &pkt->pkt[pkt->offset];
1884 rte_prefetch0(t->ptr);
1886 TRACE("[Thread %2u] rx %s from port %u\n",
1888 pkt_received ? "1 pkt" : "0 pkts",
1892 t->valid_headers = 0;
1893 t->n_headers_out = 0;
1896 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1899 t->table_state = p->table_state;
1902 pipeline_port_inc(p);
1903 thread_ip_inc_cond(t, pkt_received);
1911 instr_tx_translate(struct rte_swx_pipeline *p,
1912 struct action *action __rte_unused,
1915 struct instruction *instr,
1916 struct instruction_data *data __rte_unused)
1920 CHECK(n_tokens == 2, EINVAL);
1922 f = metadata_field_parse(p, tokens[1]);
1925 instr->type = INSTR_TX;
1926 instr->io.io.offset = f->offset / 8;
1927 instr->io.io.n_bits = f->n_bits;
1932 emit_handler(struct thread *t)
1934 struct header_out_runtime *h0 = &t->headers_out[0];
1935 struct header_out_runtime *h1 = &t->headers_out[1];
1936 uint32_t offset = 0, i;
1938 /* No header change or header decapsulation. */
1939 if ((t->n_headers_out == 1) &&
1940 (h0->ptr + h0->n_bytes == t->ptr)) {
1941 TRACE("Emit handler: no header change or header decap.\n");
1943 t->pkt.offset -= h0->n_bytes;
1944 t->pkt.length += h0->n_bytes;
1949 /* Header encapsulation (optionally, with prior header decasulation). */
1950 if ((t->n_headers_out == 2) &&
1951 (h1->ptr + h1->n_bytes == t->ptr) &&
1952 (h0->ptr == h0->ptr0)) {
1955 TRACE("Emit handler: header encapsulation.\n");
1957 offset = h0->n_bytes + h1->n_bytes;
1958 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1959 t->pkt.offset -= offset;
1960 t->pkt.length += offset;
1965 /* Header insertion. */
1968 /* Header extraction. */
1971 /* For any other case. */
1972 TRACE("Emit handler: complex case.\n");
1974 for (i = 0; i < t->n_headers_out; i++) {
1975 struct header_out_runtime *h = &t->headers_out[i];
1977 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1978 offset += h->n_bytes;
1982 memcpy(t->ptr - offset, t->header_out_storage, offset);
1983 t->pkt.offset -= offset;
1984 t->pkt.length += offset;
1989 instr_tx_exec(struct rte_swx_pipeline *p);
1992 instr_tx_exec(struct rte_swx_pipeline *p)
1994 struct thread *t = &p->threads[p->thread_id];
1995 struct instruction *ip = t->ip;
1996 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1997 struct port_out_runtime *port = &p->out[port_id];
1998 struct rte_swx_pkt *pkt = &t->pkt;
2000 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2008 port->pkt_tx(port->obj, pkt);
2011 thread_ip_reset(p, t);
2019 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2020 struct action *action,
2023 struct instruction *instr,
2024 struct instruction_data *data __rte_unused)
2028 CHECK(!action, EINVAL);
2029 CHECK(n_tokens == 2, EINVAL);
2031 h = header_parse(p, tokens[1]);
2034 instr->type = INSTR_HDR_EXTRACT;
2035 instr->io.hdr.header_id[0] = h->id;
2036 instr->io.hdr.struct_id[0] = h->struct_id;
2037 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2042 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2045 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2047 struct thread *t = &p->threads[p->thread_id];
2048 struct instruction *ip = t->ip;
2049 uint64_t valid_headers = t->valid_headers;
2050 uint8_t *ptr = t->ptr;
2051 uint32_t offset = t->pkt.offset;
2052 uint32_t length = t->pkt.length;
2055 for (i = 0; i < n_extract; i++) {
2056 uint32_t header_id = ip->io.hdr.header_id[i];
2057 uint32_t struct_id = ip->io.hdr.struct_id[i];
2058 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2060 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2066 t->structs[struct_id] = ptr;
2067 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2076 t->valid_headers = valid_headers;
2079 t->pkt.offset = offset;
2080 t->pkt.length = length;
2085 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2087 __instr_hdr_extract_exec(p, 1);
2094 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2096 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2099 __instr_hdr_extract_exec(p, 2);
2106 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2108 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2111 __instr_hdr_extract_exec(p, 3);
2118 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2120 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2123 __instr_hdr_extract_exec(p, 4);
2130 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2132 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2135 __instr_hdr_extract_exec(p, 5);
2142 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2144 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2147 __instr_hdr_extract_exec(p, 6);
2154 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2156 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2159 __instr_hdr_extract_exec(p, 7);
2166 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2168 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2171 __instr_hdr_extract_exec(p, 8);
2181 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2182 struct action *action __rte_unused,
2185 struct instruction *instr,
2186 struct instruction_data *data __rte_unused)
2190 CHECK(n_tokens == 2, EINVAL);
2192 h = header_parse(p, tokens[1]);
2195 instr->type = INSTR_HDR_EMIT;
2196 instr->io.hdr.header_id[0] = h->id;
2197 instr->io.hdr.struct_id[0] = h->struct_id;
2198 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2203 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2206 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2208 struct thread *t = &p->threads[p->thread_id];
2209 struct instruction *ip = t->ip;
2210 uint32_t n_headers_out = t->n_headers_out;
2211 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2212 uint8_t *ho_ptr = NULL;
2213 uint32_t ho_nbytes = 0, i;
2215 for (i = 0; i < n_emit; i++) {
2216 uint32_t header_id = ip->io.hdr.header_id[i];
2217 uint32_t struct_id = ip->io.hdr.struct_id[i];
2218 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2220 struct header_runtime *hi = &t->headers[header_id];
2221 uint8_t *hi_ptr = t->structs[struct_id];
2223 TRACE("[Thread %2u]: emit header %u\n",
2229 if (!t->n_headers_out) {
2230 ho = &t->headers_out[0];
2232 ho->ptr0 = hi->ptr0;
2236 ho_nbytes = n_bytes;
2243 ho_nbytes = ho->n_bytes;
2247 if (ho_ptr + ho_nbytes == hi_ptr) {
2248 ho_nbytes += n_bytes;
2250 ho->n_bytes = ho_nbytes;
2253 ho->ptr0 = hi->ptr0;
2257 ho_nbytes = n_bytes;
2263 ho->n_bytes = ho_nbytes;
2264 t->n_headers_out = n_headers_out;
2268 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2270 __instr_hdr_emit_exec(p, 1);
2277 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2279 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2282 __instr_hdr_emit_exec(p, 1);
2287 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2289 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2292 __instr_hdr_emit_exec(p, 2);
2297 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2299 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2302 __instr_hdr_emit_exec(p, 3);
2307 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2309 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2312 __instr_hdr_emit_exec(p, 4);
2317 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2319 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2322 __instr_hdr_emit_exec(p, 5);
2327 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2329 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2332 __instr_hdr_emit_exec(p, 6);
2337 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2339 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2342 __instr_hdr_emit_exec(p, 7);
2347 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2349 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2352 __instr_hdr_emit_exec(p, 8);
2360 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2361 struct action *action __rte_unused,
2364 struct instruction *instr,
2365 struct instruction_data *data __rte_unused)
2369 CHECK(n_tokens == 2, EINVAL);
2371 h = header_parse(p, tokens[1]);
2374 instr->type = INSTR_HDR_VALIDATE;
2375 instr->valid.header_id = h->id;
2380 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2382 struct thread *t = &p->threads[p->thread_id];
2383 struct instruction *ip = t->ip;
2384 uint32_t header_id = ip->valid.header_id;
2386 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2389 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2399 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2400 struct action *action __rte_unused,
2403 struct instruction *instr,
2404 struct instruction_data *data __rte_unused)
2408 CHECK(n_tokens == 2, EINVAL);
2410 h = header_parse(p, tokens[1]);
2413 instr->type = INSTR_HDR_INVALIDATE;
2414 instr->valid.header_id = h->id;
2419 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2421 struct thread *t = &p->threads[p->thread_id];
2422 struct instruction *ip = t->ip;
2423 uint32_t header_id = ip->valid.header_id;
2425 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2428 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2438 instr_mov_translate(struct rte_swx_pipeline *p,
2439 struct action *action,
2442 struct instruction *instr,
2443 struct instruction_data *data __rte_unused)
2445 char *dst = tokens[1], *src = tokens[2];
2446 struct field *fdst, *fsrc;
2447 uint32_t dst_struct_id, src_struct_id, src_val;
2449 CHECK(n_tokens == 3, EINVAL);
2451 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2452 CHECK(fdst, EINVAL);
2455 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2457 instr->type = INSTR_MOV;
2458 if ((dst[0] == 'h' && src[0] != 'h') ||
2459 (dst[0] != 'h' && src[0] == 'h'))
2460 instr->type = INSTR_MOV_S;
2462 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2463 instr->mov.dst.n_bits = fdst->n_bits;
2464 instr->mov.dst.offset = fdst->offset / 8;
2465 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2466 instr->mov.src.n_bits = fsrc->n_bits;
2467 instr->mov.src.offset = fsrc->offset / 8;
2472 src_val = strtoul(src, &src, 0);
2473 CHECK(!src[0], EINVAL);
2476 src_val = htonl(src_val);
2478 instr->type = INSTR_MOV_I;
2479 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2480 instr->mov.dst.n_bits = fdst->n_bits;
2481 instr->mov.dst.offset = fdst->offset / 8;
2482 instr->mov.src_val = (uint32_t)src_val;
2487 instr_mov_exec(struct rte_swx_pipeline *p)
2489 struct thread *t = &p->threads[p->thread_id];
2490 struct instruction *ip = t->ip;
2492 TRACE("[Thread %2u] mov\n",
2502 instr_mov_s_exec(struct rte_swx_pipeline *p)
2504 struct thread *t = &p->threads[p->thread_id];
2505 struct instruction *ip = t->ip;
2507 TRACE("[Thread %2u] mov (s)\n",
2517 instr_mov_i_exec(struct rte_swx_pipeline *p)
2519 struct thread *t = &p->threads[p->thread_id];
2520 struct instruction *ip = t->ip;
2522 TRACE("[Thread %2u] mov m.f %x\n",
2532 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
2535 instr_translate(struct rte_swx_pipeline *p,
2536 struct action *action,
2538 struct instruction *instr,
2539 struct instruction_data *data)
2541 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
2542 int n_tokens = 0, tpos = 0;
2544 /* Parse the instruction string into tokens. */
2548 token = strtok_r(string, " \t\v", &string);
2552 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
2554 tokens[n_tokens] = token;
2558 CHECK(n_tokens, EINVAL);
2560 /* Handle the optional instruction label. */
2561 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
2562 strcpy(data->label, tokens[0]);
2565 CHECK(n_tokens - tpos, EINVAL);
2568 /* Identify the instruction type. */
2569 if (!strcmp(tokens[tpos], "rx"))
2570 return instr_rx_translate(p,
2577 if (!strcmp(tokens[tpos], "tx"))
2578 return instr_tx_translate(p,
2585 if (!strcmp(tokens[tpos], "extract"))
2586 return instr_hdr_extract_translate(p,
2593 if (!strcmp(tokens[tpos], "emit"))
2594 return instr_hdr_emit_translate(p,
2601 if (!strcmp(tokens[tpos], "validate"))
2602 return instr_hdr_validate_translate(p,
2609 if (!strcmp(tokens[tpos], "invalidate"))
2610 return instr_hdr_invalidate_translate(p,
2617 if (!strcmp(tokens[tpos], "mov"))
2618 return instr_mov_translate(p,
2629 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
2631 uint32_t count = 0, i;
2636 for (i = 0; i < n; i++)
2637 if (!strcmp(label, data[i].jmp_label))
2644 instr_label_check(struct instruction_data *instruction_data,
2645 uint32_t n_instructions)
2649 /* Check that all instruction labels are unique. */
2650 for (i = 0; i < n_instructions; i++) {
2651 struct instruction_data *data = &instruction_data[i];
2652 char *label = data->label;
2658 for (j = i + 1; j < n_instructions; j++)
2659 CHECK(strcmp(label, data[j].label), EINVAL);
2662 /* Get users for each instruction label. */
2663 for (i = 0; i < n_instructions; i++) {
2664 struct instruction_data *data = &instruction_data[i];
2665 char *label = data->label;
2667 data->n_users = label_is_used(instruction_data,
2676 instruction_config(struct rte_swx_pipeline *p,
2678 const char **instructions,
2679 uint32_t n_instructions)
2681 struct instruction *instr = NULL;
2682 struct instruction_data *data = NULL;
2683 char *string = NULL;
2687 CHECK(n_instructions, EINVAL);
2688 CHECK(instructions, EINVAL);
2689 for (i = 0; i < n_instructions; i++)
2690 CHECK(instructions[i], EINVAL);
2692 /* Memory allocation. */
2693 instr = calloc(n_instructions, sizeof(struct instruction));
2699 data = calloc(n_instructions, sizeof(struct instruction_data));
2705 for (i = 0; i < n_instructions; i++) {
2706 string = strdup(instructions[i]);
2712 err = instr_translate(p, a, string, &instr[i], &data[i]);
2719 err = instr_label_check(data, n_instructions);
2726 a->instructions = instr;
2727 a->n_instructions = n_instructions;
2729 p->instructions = instr;
2730 p->n_instructions = n_instructions;
2742 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
2744 static instr_exec_t instruction_table[] = {
2745 [INSTR_RX] = instr_rx_exec,
2746 [INSTR_TX] = instr_tx_exec,
2748 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
2749 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
2750 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
2751 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
2752 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
2753 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
2754 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
2755 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
2757 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
2758 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
2759 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
2760 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
2761 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
2762 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
2763 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
2764 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
2765 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
2767 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
2768 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
2770 [INSTR_MOV] = instr_mov_exec,
2771 [INSTR_MOV_S] = instr_mov_s_exec,
2772 [INSTR_MOV_I] = instr_mov_i_exec,
2776 instr_exec(struct rte_swx_pipeline *p)
2778 struct thread *t = &p->threads[p->thread_id];
2779 struct instruction *ip = t->ip;
2780 instr_exec_t instr = instruction_table[ip->type];
2788 static struct action *
2789 action_find(struct rte_swx_pipeline *p, const char *name)
2791 struct action *elem;
2796 TAILQ_FOREACH(elem, &p->actions, node)
2797 if (strcmp(elem->name, name) == 0)
2803 static struct field *
2804 action_field_find(struct action *a, const char *name)
2806 return a->st ? struct_type_field_find(a->st, name) : NULL;
2809 static struct field *
2810 action_field_parse(struct action *action, const char *name)
2812 if (name[0] != 't' || name[1] != '.')
2815 return action_field_find(action, &name[2]);
2819 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
2821 const char *args_struct_type_name,
2822 const char **instructions,
2823 uint32_t n_instructions)
2825 struct struct_type *args_struct_type;
2831 CHECK_NAME(name, EINVAL);
2832 CHECK(!action_find(p, name), EEXIST);
2834 if (args_struct_type_name) {
2835 CHECK_NAME(args_struct_type_name, EINVAL);
2836 args_struct_type = struct_type_find(p, args_struct_type_name);
2837 CHECK(args_struct_type, EINVAL);
2839 args_struct_type = NULL;
2842 /* Node allocation. */
2843 a = calloc(1, sizeof(struct action));
2846 /* Node initialization. */
2847 strcpy(a->name, name);
2848 a->st = args_struct_type;
2849 a->id = p->n_actions;
2851 /* Instruction translation. */
2852 err = instruction_config(p, a, instructions, n_instructions);
2858 /* Node add to tailq. */
2859 TAILQ_INSERT_TAIL(&p->actions, a, node);
2866 action_build(struct rte_swx_pipeline *p)
2868 struct action *action;
2870 p->action_instructions = calloc(p->n_actions,
2871 sizeof(struct instruction *));
2872 CHECK(p->action_instructions, ENOMEM);
2874 TAILQ_FOREACH(action, &p->actions, node)
2875 p->action_instructions[action->id] = action->instructions;
2881 action_build_free(struct rte_swx_pipeline *p)
2883 free(p->action_instructions);
2884 p->action_instructions = NULL;
2888 action_free(struct rte_swx_pipeline *p)
2890 action_build_free(p);
2893 struct action *action;
2895 action = TAILQ_FIRST(&p->actions);
2899 TAILQ_REMOVE(&p->actions, action, node);
2900 free(action->instructions);
2908 static struct table_type *
2909 table_type_find(struct rte_swx_pipeline *p, const char *name)
2911 struct table_type *elem;
2913 TAILQ_FOREACH(elem, &p->table_types, node)
2914 if (strcmp(elem->name, name) == 0)
2920 static struct table_type *
2921 table_type_resolve(struct rte_swx_pipeline *p,
2922 const char *recommended_type_name,
2923 enum rte_swx_table_match_type match_type)
2925 struct table_type *elem;
2927 /* Only consider the recommended type if the match type is correct. */
2928 if (recommended_type_name)
2929 TAILQ_FOREACH(elem, &p->table_types, node)
2930 if (!strcmp(elem->name, recommended_type_name) &&
2931 (elem->match_type == match_type))
2934 /* Ignore the recommended type and get the first element with this match
2937 TAILQ_FOREACH(elem, &p->table_types, node)
2938 if (elem->match_type == match_type)
2944 static struct table *
2945 table_find(struct rte_swx_pipeline *p, const char *name)
2949 TAILQ_FOREACH(elem, &p->tables, node)
2950 if (strcmp(elem->name, name) == 0)
2956 static struct table *
2957 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
2959 struct table *table = NULL;
2961 TAILQ_FOREACH(table, &p->tables, node)
2962 if (table->id == id)
2969 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
2971 enum rte_swx_table_match_type match_type,
2972 struct rte_swx_table_ops *ops)
2974 struct table_type *elem;
2978 CHECK_NAME(name, EINVAL);
2979 CHECK(!table_type_find(p, name), EEXIST);
2982 CHECK(ops->create, EINVAL);
2983 CHECK(ops->lkp, EINVAL);
2984 CHECK(ops->free, EINVAL);
2986 /* Node allocation. */
2987 elem = calloc(1, sizeof(struct table_type));
2988 CHECK(elem, ENOMEM);
2990 /* Node initialization. */
2991 strcpy(elem->name, name);
2992 elem->match_type = match_type;
2993 memcpy(&elem->ops, ops, sizeof(*ops));
2995 /* Node add to tailq. */
2996 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
3001 static enum rte_swx_table_match_type
3002 table_match_type_resolve(struct rte_swx_match_field_params *fields,
3007 for (i = 0; i < n_fields; i++)
3008 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
3012 return RTE_SWX_TABLE_MATCH_EXACT;
3014 if ((i == n_fields - 1) &&
3015 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
3016 return RTE_SWX_TABLE_MATCH_LPM;
3018 return RTE_SWX_TABLE_MATCH_WILDCARD;
3022 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
3024 struct rte_swx_pipeline_table_params *params,
3025 const char *recommended_table_type_name,
3029 struct table_type *type;
3031 struct action *default_action;
3032 struct header *header = NULL;
3034 uint32_t offset_prev = 0, action_data_size_max = 0, i;
3038 CHECK_NAME(name, EINVAL);
3039 CHECK(!table_find(p, name), EEXIST);
3041 CHECK(params, EINVAL);
3044 CHECK(!params->n_fields || params->fields, EINVAL);
3045 for (i = 0; i < params->n_fields; i++) {
3046 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3048 struct field *hf, *mf;
3051 CHECK_NAME(field->name, EINVAL);
3053 hf = header_field_parse(p, field->name, &h);
3054 mf = metadata_field_parse(p, field->name);
3055 CHECK(hf || mf, EINVAL);
3057 offset = hf ? hf->offset : mf->offset;
3060 is_header = hf ? 1 : 0;
3061 header = hf ? h : NULL;
3062 offset_prev = offset;
3067 CHECK((is_header && hf && (h->id == header->id)) ||
3068 (!is_header && mf), EINVAL);
3070 CHECK(offset > offset_prev, EINVAL);
3071 offset_prev = offset;
3074 /* Action checks. */
3075 CHECK(params->n_actions, EINVAL);
3076 CHECK(params->action_names, EINVAL);
3077 for (i = 0; i < params->n_actions; i++) {
3078 const char *action_name = params->action_names[i];
3080 uint32_t action_data_size;
3082 CHECK(action_name, EINVAL);
3084 a = action_find(p, action_name);
3087 action_data_size = a->st ? a->st->n_bits / 8 : 0;
3088 if (action_data_size > action_data_size_max)
3089 action_data_size_max = action_data_size;
3092 CHECK(params->default_action_name, EINVAL);
3093 for (i = 0; i < p->n_actions; i++)
3094 if (!strcmp(params->action_names[i],
3095 params->default_action_name))
3097 CHECK(i < params->n_actions, EINVAL);
3098 default_action = action_find(p, params->default_action_name);
3099 CHECK((default_action->st && params->default_action_data) ||
3100 !params->default_action_data, EINVAL);
3102 /* Table type checks. */
3103 if (params->n_fields) {
3104 enum rte_swx_table_match_type match_type;
3106 match_type = table_match_type_resolve(params->fields,
3108 type = table_type_resolve(p,
3109 recommended_table_type_name,
3111 CHECK(type, EINVAL);
3116 /* Memory allocation. */
3117 t = calloc(1, sizeof(struct table));
3120 t->fields = calloc(params->n_fields, sizeof(struct match_field));
3126 t->actions = calloc(params->n_actions, sizeof(struct action *));
3133 if (action_data_size_max) {
3134 t->default_action_data = calloc(1, action_data_size_max);
3135 if (!t->default_action_data) {
3143 /* Node initialization. */
3144 strcpy(t->name, name);
3145 if (args && args[0])
3146 strcpy(t->args, args);
3149 for (i = 0; i < params->n_fields; i++) {
3150 struct rte_swx_match_field_params *field = ¶ms->fields[i];
3151 struct match_field *f = &t->fields[i];
3153 f->match_type = field->match_type;
3154 f->field = is_header ?
3155 header_field_parse(p, field->name, NULL) :
3156 metadata_field_parse(p, field->name);
3158 t->n_fields = params->n_fields;
3159 t->is_header = is_header;
3162 for (i = 0; i < params->n_actions; i++)
3163 t->actions[i] = action_find(p, params->action_names[i]);
3164 t->default_action = default_action;
3165 if (default_action->st)
3166 memcpy(t->default_action_data,
3167 params->default_action_data,
3168 default_action->st->n_bits / 8);
3169 t->n_actions = params->n_actions;
3170 t->default_action_is_const = params->default_action_is_const;
3171 t->action_data_size_max = action_data_size_max;
3174 t->id = p->n_tables;
3176 /* Node add to tailq. */
3177 TAILQ_INSERT_TAIL(&p->tables, t, node);
3183 static struct rte_swx_table_params *
3184 table_params_get(struct table *table)
3186 struct rte_swx_table_params *params;
3187 struct field *first, *last;
3189 uint32_t key_size, key_offset, action_data_size, i;
3191 /* Memory allocation. */
3192 params = calloc(1, sizeof(struct rte_swx_table_params));
3196 /* Key offset and size. */
3197 first = table->fields[0].field;
3198 last = table->fields[table->n_fields - 1].field;
3199 key_offset = first->offset / 8;
3200 key_size = (last->offset + last->n_bits - first->offset) / 8;
3202 /* Memory allocation. */
3203 key_mask = calloc(1, key_size);
3210 for (i = 0; i < table->n_fields; i++) {
3211 struct field *f = table->fields[i].field;
3212 uint32_t start = (f->offset - first->offset) / 8;
3213 size_t size = f->n_bits / 8;
3215 memset(&key_mask[start], 0xFF, size);
3218 /* Action data size. */
3219 action_data_size = 0;
3220 for (i = 0; i < table->n_actions; i++) {
3221 struct action *action = table->actions[i];
3222 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
3224 if (ads > action_data_size)
3225 action_data_size = ads;
3229 params->match_type = table->type->match_type;
3230 params->key_size = key_size;
3231 params->key_offset = key_offset;
3232 params->key_mask0 = key_mask;
3233 params->action_data_size = action_data_size;
3234 params->n_keys_max = table->size;
3240 table_params_free(struct rte_swx_table_params *params)
3245 free(params->key_mask0);
3250 table_state_build(struct rte_swx_pipeline *p)
3252 struct table *table;
3254 p->table_state = calloc(p->n_tables,
3255 sizeof(struct rte_swx_table_state));
3256 CHECK(p->table_state, ENOMEM);
3258 TAILQ_FOREACH(table, &p->tables, node) {
3259 struct rte_swx_table_state *ts = &p->table_state[table->id];
3262 struct rte_swx_table_params *params;
3265 params = table_params_get(table);
3266 CHECK(params, ENOMEM);
3268 ts->obj = table->type->ops.create(params,
3273 table_params_free(params);
3274 CHECK(ts->obj, ENODEV);
3277 /* ts->default_action_data. */
3278 if (table->action_data_size_max) {
3279 ts->default_action_data =
3280 malloc(table->action_data_size_max);
3281 CHECK(ts->default_action_data, ENOMEM);
3283 memcpy(ts->default_action_data,
3284 table->default_action_data,
3285 table->action_data_size_max);
3288 /* ts->default_action_id. */
3289 ts->default_action_id = table->default_action->id;
3296 table_state_build_free(struct rte_swx_pipeline *p)
3300 if (!p->table_state)
3303 for (i = 0; i < p->n_tables; i++) {
3304 struct rte_swx_table_state *ts = &p->table_state[i];
3305 struct table *table = table_find_by_id(p, i);
3308 if (table->type && ts->obj)
3309 table->type->ops.free(ts->obj);
3311 /* ts->default_action_data. */
3312 free(ts->default_action_data);
3315 free(p->table_state);
3316 p->table_state = NULL;
3320 table_state_free(struct rte_swx_pipeline *p)
3322 table_state_build_free(p);
3326 table_stub_lkp(void *table __rte_unused,
3327 void *mailbox __rte_unused,
3328 uint8_t **key __rte_unused,
3329 uint64_t *action_id __rte_unused,
3330 uint8_t **action_data __rte_unused,
3334 return 1; /* DONE. */
3338 table_build(struct rte_swx_pipeline *p)
3342 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3343 struct thread *t = &p->threads[i];
3344 struct table *table;
3346 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
3347 CHECK(t->tables, ENOMEM);
3349 TAILQ_FOREACH(table, &p->tables, node) {
3350 struct table_runtime *r = &t->tables[table->id];
3355 size = table->type->ops.mailbox_size_get();
3358 r->func = table->type->ops.lkp;
3362 r->mailbox = calloc(1, size);
3363 CHECK(r->mailbox, ENOMEM);
3367 r->key = table->is_header ?
3368 &t->structs[table->header->struct_id] :
3369 &t->structs[p->metadata_struct_id];
3371 r->func = table_stub_lkp;
3380 table_build_free(struct rte_swx_pipeline *p)
3384 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3385 struct thread *t = &p->threads[i];
3391 for (j = 0; j < p->n_tables; j++) {
3392 struct table_runtime *r = &t->tables[j];
3403 table_free(struct rte_swx_pipeline *p)
3405 table_build_free(p);
3411 elem = TAILQ_FIRST(&p->tables);
3415 TAILQ_REMOVE(&p->tables, elem, node);
3417 free(elem->actions);
3418 free(elem->default_action_data);
3424 struct table_type *elem;
3426 elem = TAILQ_FIRST(&p->table_types);
3430 TAILQ_REMOVE(&p->table_types, elem, node);
3439 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
3441 struct rte_swx_pipeline *pipeline;
3443 /* Check input parameters. */
3446 /* Memory allocation. */
3447 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
3448 CHECK(pipeline, ENOMEM);
3450 /* Initialization. */
3451 TAILQ_INIT(&pipeline->struct_types);
3452 TAILQ_INIT(&pipeline->port_in_types);
3453 TAILQ_INIT(&pipeline->ports_in);
3454 TAILQ_INIT(&pipeline->port_out_types);
3455 TAILQ_INIT(&pipeline->ports_out);
3456 TAILQ_INIT(&pipeline->extern_types);
3457 TAILQ_INIT(&pipeline->extern_objs);
3458 TAILQ_INIT(&pipeline->extern_funcs);
3459 TAILQ_INIT(&pipeline->headers);
3460 TAILQ_INIT(&pipeline->actions);
3461 TAILQ_INIT(&pipeline->table_types);
3462 TAILQ_INIT(&pipeline->tables);
3464 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
3465 pipeline->numa_node = numa_node;
3472 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
3477 free(p->instructions);
3479 table_state_free(p);
3484 extern_func_free(p);
3494 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
3495 const char **instructions,
3496 uint32_t n_instructions)
3501 err = instruction_config(p, NULL, instructions, n_instructions);
3505 /* Thread instruction pointer reset. */
3506 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
3507 struct thread *t = &p->threads[i];
3509 thread_ip_reset(p, t);
3516 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
3521 CHECK(p->build_done == 0, EEXIST);
3523 status = port_in_build(p);
3527 status = port_out_build(p);
3531 status = struct_build(p);
3535 status = extern_obj_build(p);
3539 status = extern_func_build(p);
3543 status = header_build(p);
3547 status = metadata_build(p);
3551 status = action_build(p);
3555 status = table_build(p);
3559 status = table_state_build(p);
3567 table_state_build_free(p);
3568 table_build_free(p);
3569 action_build_free(p);
3570 metadata_build_free(p);
3571 header_build_free(p);
3572 extern_func_build_free(p);
3573 extern_obj_build_free(p);
3574 port_out_build_free(p);
3575 port_in_build_free(p);
3576 struct_build_free(p);
3582 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
3586 for (i = 0; i < n_instructions; i++)
3594 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
3595 struct rte_swx_table_state **table_state)
3597 if (!p || !table_state || !p->build_done)
3600 *table_state = p->table_state;
3605 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
3606 struct rte_swx_table_state *table_state)
3608 if (!p || !table_state || !p->build_done)
3611 p->table_state = table_state;