1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
316 struct instr_operand {
331 uint8_t header_id[8];
332 uint8_t struct_id[8];
337 struct instr_hdr_validity {
341 struct instr_dst_src {
342 struct instr_operand dst;
344 struct instr_operand src;
351 uint8_t header_id[8];
352 uint8_t struct_id[8];
363 enum instruction_type type;
366 struct instr_hdr_validity valid;
367 struct instr_dst_src mov;
368 struct instr_dma dma;
369 struct instr_dst_src alu;
373 struct instruction_data {
374 char label[RTE_SWX_NAME_SIZE];
375 char jmp_label[RTE_SWX_NAME_SIZE];
376 uint32_t n_users; /* user = jmp instruction to this instruction. */
384 TAILQ_ENTRY(action) node;
385 char name[RTE_SWX_NAME_SIZE];
386 struct struct_type *st;
387 struct instruction *instructions;
388 uint32_t n_instructions;
392 TAILQ_HEAD(action_tailq, action);
398 TAILQ_ENTRY(table_type) node;
399 char name[RTE_SWX_NAME_SIZE];
400 enum rte_swx_table_match_type match_type;
401 struct rte_swx_table_ops ops;
404 TAILQ_HEAD(table_type_tailq, table_type);
407 enum rte_swx_table_match_type match_type;
412 TAILQ_ENTRY(table) node;
413 char name[RTE_SWX_NAME_SIZE];
414 char args[RTE_SWX_NAME_SIZE];
415 struct table_type *type; /* NULL when n_fields == 0. */
418 struct match_field *fields;
420 int is_header; /* Only valid when n_fields > 0. */
421 struct header *header; /* Only valid when n_fields > 0. */
424 struct action **actions;
425 struct action *default_action;
426 uint8_t *default_action_data;
428 int default_action_is_const;
429 uint32_t action_data_size_max;
435 TAILQ_HEAD(table_tailq, table);
437 struct table_runtime {
438 rte_swx_table_lookup_t func;
448 struct rte_swx_pkt pkt;
454 /* Packet headers. */
455 struct header_runtime *headers; /* Extracted or generated headers. */
456 struct header_out_runtime *headers_out; /* Emitted headers. */
457 uint8_t *header_storage;
458 uint8_t *header_out_storage;
459 uint64_t valid_headers;
460 uint32_t n_headers_out;
462 /* Packet meta-data. */
466 struct table_runtime *tables;
467 struct rte_swx_table_state *table_state;
469 int hit; /* 0 = Miss, 1 = Hit. */
471 /* Extern objects and functions. */
472 struct extern_obj_runtime *extern_objs;
473 struct extern_func_runtime *extern_funcs;
476 struct instruction *ip;
477 struct instruction *ret;
480 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
481 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
482 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
484 #define ALU(thread, ip, operator) \
486 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
487 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
488 uint64_t dst64 = *dst64_ptr; \
489 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
490 uint64_t dst = dst64 & dst64_mask; \
492 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
493 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
494 uint64_t src64 = *src64_ptr; \
495 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
496 uint64_t src = src64 & src64_mask; \
498 uint64_t result = dst operator src; \
500 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
503 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
505 #define ALU_S(thread, ip, operator) \
507 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
508 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
509 uint64_t dst64 = *dst64_ptr; \
510 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
511 uint64_t dst = dst64 & dst64_mask; \
513 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
514 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
515 uint64_t src64 = *src64_ptr; \
516 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
518 uint64_t result = dst operator src; \
520 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
525 #define ALU_HM(thread, ip, operator) \
527 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
528 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
529 uint64_t dst64 = *dst64_ptr; \
530 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
531 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
533 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
534 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
535 uint64_t src64 = *src64_ptr; \
536 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
537 uint64_t src = src64 & src64_mask; \
539 uint64_t result = dst operator src; \
540 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
542 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
545 #define ALU_HH(thread, ip, operator) \
547 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
548 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
549 uint64_t dst64 = *dst64_ptr; \
550 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
551 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
553 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
554 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
555 uint64_t src64 = *src64_ptr; \
556 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
558 uint64_t result = dst operator src; \
559 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
561 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
573 #define ALU_I(thread, ip, operator) \
575 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
576 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
577 uint64_t dst64 = *dst64_ptr; \
578 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
579 uint64_t dst = dst64 & dst64_mask; \
581 uint64_t src = (ip)->alu.src_val; \
583 uint64_t result = dst operator src; \
585 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
590 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
592 #define ALU_HI(thread, ip, operator) \
594 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
595 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
596 uint64_t dst64 = *dst64_ptr; \
597 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
598 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
600 uint64_t src = (ip)->alu.src_val; \
602 uint64_t result = dst operator src; \
603 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
605 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
614 #define MOV(thread, ip) \
616 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
617 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
618 uint64_t dst64 = *dst64_ptr; \
619 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
621 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
622 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
623 uint64_t src64 = *src64_ptr; \
624 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
625 uint64_t src = src64 & src64_mask; \
627 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
630 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
632 #define MOV_S(thread, ip) \
634 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
635 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
636 uint64_t dst64 = *dst64_ptr; \
637 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
639 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
640 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
641 uint64_t src64 = *src64_ptr; \
642 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
644 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
653 #define MOV_I(thread, ip) \
655 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
656 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
657 uint64_t dst64 = *dst64_ptr; \
658 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
660 uint64_t src = (ip)->mov.src_val; \
662 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
665 #define METADATA_READ(thread, offset, n_bits) \
667 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
668 uint64_t m64 = *m64_ptr; \
669 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
673 #define METADATA_WRITE(thread, offset, n_bits, value) \
675 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
676 uint64_t m64 = *m64_ptr; \
677 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
679 uint64_t m_new = value; \
681 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
684 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
685 #define RTE_SWX_PIPELINE_THREADS_MAX 16
688 struct rte_swx_pipeline {
689 struct struct_type_tailq struct_types;
690 struct port_in_type_tailq port_in_types;
691 struct port_in_tailq ports_in;
692 struct port_out_type_tailq port_out_types;
693 struct port_out_tailq ports_out;
694 struct extern_type_tailq extern_types;
695 struct extern_obj_tailq extern_objs;
696 struct extern_func_tailq extern_funcs;
697 struct header_tailq headers;
698 struct struct_type *metadata_st;
699 uint32_t metadata_struct_id;
700 struct action_tailq actions;
701 struct table_type_tailq table_types;
702 struct table_tailq tables;
704 struct port_in_runtime *in;
705 struct port_out_runtime *out;
706 struct instruction **action_instructions;
707 struct rte_swx_table_state *table_state;
708 struct instruction *instructions;
709 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
713 uint32_t n_ports_out;
714 uint32_t n_extern_objs;
715 uint32_t n_extern_funcs;
721 uint32_t n_instructions;
729 static struct struct_type *
730 struct_type_find(struct rte_swx_pipeline *p, const char *name)
732 struct struct_type *elem;
734 TAILQ_FOREACH(elem, &p->struct_types, node)
735 if (strcmp(elem->name, name) == 0)
741 static struct field *
742 struct_type_field_find(struct struct_type *st, const char *name)
746 for (i = 0; i < st->n_fields; i++) {
747 struct field *f = &st->fields[i];
749 if (strcmp(f->name, name) == 0)
757 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
759 struct rte_swx_field_params *fields,
762 struct struct_type *st;
766 CHECK_NAME(name, EINVAL);
767 CHECK(fields, EINVAL);
768 CHECK(n_fields, EINVAL);
770 for (i = 0; i < n_fields; i++) {
771 struct rte_swx_field_params *f = &fields[i];
774 CHECK_NAME(f->name, EINVAL);
775 CHECK(f->n_bits, EINVAL);
776 CHECK(f->n_bits <= 64, EINVAL);
777 CHECK((f->n_bits & 7) == 0, EINVAL);
779 for (j = 0; j < i; j++) {
780 struct rte_swx_field_params *f_prev = &fields[j];
782 CHECK(strcmp(f->name, f_prev->name), EINVAL);
786 CHECK(!struct_type_find(p, name), EEXIST);
788 /* Node allocation. */
789 st = calloc(1, sizeof(struct struct_type));
792 st->fields = calloc(n_fields, sizeof(struct field));
798 /* Node initialization. */
799 strcpy(st->name, name);
800 for (i = 0; i < n_fields; i++) {
801 struct field *dst = &st->fields[i];
802 struct rte_swx_field_params *src = &fields[i];
804 strcpy(dst->name, src->name);
805 dst->n_bits = src->n_bits;
806 dst->offset = st->n_bits;
808 st->n_bits += src->n_bits;
810 st->n_fields = n_fields;
812 /* Node add to tailq. */
813 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
819 struct_build(struct rte_swx_pipeline *p)
823 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
824 struct thread *t = &p->threads[i];
826 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
827 CHECK(t->structs, ENOMEM);
834 struct_build_free(struct rte_swx_pipeline *p)
838 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
839 struct thread *t = &p->threads[i];
847 struct_free(struct rte_swx_pipeline *p)
849 struct_build_free(p);
853 struct struct_type *elem;
855 elem = TAILQ_FIRST(&p->struct_types);
859 TAILQ_REMOVE(&p->struct_types, elem, node);
868 static struct port_in_type *
869 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
871 struct port_in_type *elem;
876 TAILQ_FOREACH(elem, &p->port_in_types, node)
877 if (strcmp(elem->name, name) == 0)
884 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
886 struct rte_swx_port_in_ops *ops)
888 struct port_in_type *elem;
891 CHECK_NAME(name, EINVAL);
893 CHECK(ops->create, EINVAL);
894 CHECK(ops->free, EINVAL);
895 CHECK(ops->pkt_rx, EINVAL);
896 CHECK(ops->stats_read, EINVAL);
898 CHECK(!port_in_type_find(p, name), EEXIST);
900 /* Node allocation. */
901 elem = calloc(1, sizeof(struct port_in_type));
904 /* Node initialization. */
905 strcpy(elem->name, name);
906 memcpy(&elem->ops, ops, sizeof(*ops));
908 /* Node add to tailq. */
909 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
914 static struct port_in *
915 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
917 struct port_in *port;
919 TAILQ_FOREACH(port, &p->ports_in, node)
920 if (port->id == port_id)
927 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
929 const char *port_type_name,
932 struct port_in_type *type = NULL;
933 struct port_in *port = NULL;
938 CHECK(!port_in_find(p, port_id), EINVAL);
940 CHECK_NAME(port_type_name, EINVAL);
941 type = port_in_type_find(p, port_type_name);
944 obj = type->ops.create(args);
947 /* Node allocation. */
948 port = calloc(1, sizeof(struct port_in));
951 /* Node initialization. */
956 /* Node add to tailq. */
957 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
958 if (p->n_ports_in < port_id + 1)
959 p->n_ports_in = port_id + 1;
965 port_in_build(struct rte_swx_pipeline *p)
967 struct port_in *port;
970 CHECK(p->n_ports_in, EINVAL);
971 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
973 for (i = 0; i < p->n_ports_in; i++)
974 CHECK(port_in_find(p, i), EINVAL);
976 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
977 CHECK(p->in, ENOMEM);
979 TAILQ_FOREACH(port, &p->ports_in, node) {
980 struct port_in_runtime *in = &p->in[port->id];
982 in->pkt_rx = port->type->ops.pkt_rx;
990 port_in_build_free(struct rte_swx_pipeline *p)
997 port_in_free(struct rte_swx_pipeline *p)
999 port_in_build_free(p);
1003 struct port_in *port;
1005 port = TAILQ_FIRST(&p->ports_in);
1009 TAILQ_REMOVE(&p->ports_in, port, node);
1010 port->type->ops.free(port->obj);
1014 /* Input port types. */
1016 struct port_in_type *elem;
1018 elem = TAILQ_FIRST(&p->port_in_types);
1022 TAILQ_REMOVE(&p->port_in_types, elem, node);
1030 static struct port_out_type *
1031 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1033 struct port_out_type *elem;
1038 TAILQ_FOREACH(elem, &p->port_out_types, node)
1039 if (!strcmp(elem->name, name))
1046 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1048 struct rte_swx_port_out_ops *ops)
1050 struct port_out_type *elem;
1053 CHECK_NAME(name, EINVAL);
1055 CHECK(ops->create, EINVAL);
1056 CHECK(ops->free, EINVAL);
1057 CHECK(ops->pkt_tx, EINVAL);
1058 CHECK(ops->stats_read, EINVAL);
1060 CHECK(!port_out_type_find(p, name), EEXIST);
1062 /* Node allocation. */
1063 elem = calloc(1, sizeof(struct port_out_type));
1064 CHECK(elem, ENOMEM);
1066 /* Node initialization. */
1067 strcpy(elem->name, name);
1068 memcpy(&elem->ops, ops, sizeof(*ops));
1070 /* Node add to tailq. */
1071 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1076 static struct port_out *
1077 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1079 struct port_out *port;
1081 TAILQ_FOREACH(port, &p->ports_out, node)
1082 if (port->id == port_id)
1089 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1091 const char *port_type_name,
1094 struct port_out_type *type = NULL;
1095 struct port_out *port = NULL;
1100 CHECK(!port_out_find(p, port_id), EINVAL);
1102 CHECK_NAME(port_type_name, EINVAL);
1103 type = port_out_type_find(p, port_type_name);
1104 CHECK(type, EINVAL);
1106 obj = type->ops.create(args);
1109 /* Node allocation. */
1110 port = calloc(1, sizeof(struct port_out));
1111 CHECK(port, ENOMEM);
1113 /* Node initialization. */
1118 /* Node add to tailq. */
1119 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1120 if (p->n_ports_out < port_id + 1)
1121 p->n_ports_out = port_id + 1;
1127 port_out_build(struct rte_swx_pipeline *p)
1129 struct port_out *port;
1132 CHECK(p->n_ports_out, EINVAL);
1134 for (i = 0; i < p->n_ports_out; i++)
1135 CHECK(port_out_find(p, i), EINVAL);
1137 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1138 CHECK(p->out, ENOMEM);
1140 TAILQ_FOREACH(port, &p->ports_out, node) {
1141 struct port_out_runtime *out = &p->out[port->id];
1143 out->pkt_tx = port->type->ops.pkt_tx;
1144 out->flush = port->type->ops.flush;
1145 out->obj = port->obj;
1152 port_out_build_free(struct rte_swx_pipeline *p)
1159 port_out_free(struct rte_swx_pipeline *p)
1161 port_out_build_free(p);
1165 struct port_out *port;
1167 port = TAILQ_FIRST(&p->ports_out);
1171 TAILQ_REMOVE(&p->ports_out, port, node);
1172 port->type->ops.free(port->obj);
1176 /* Output port types. */
1178 struct port_out_type *elem;
1180 elem = TAILQ_FIRST(&p->port_out_types);
1184 TAILQ_REMOVE(&p->port_out_types, elem, node);
1192 static struct extern_type *
1193 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1195 struct extern_type *elem;
1197 TAILQ_FOREACH(elem, &p->extern_types, node)
1198 if (strcmp(elem->name, name) == 0)
1204 static struct extern_type_member_func *
1205 extern_type_member_func_find(struct extern_type *type, const char *name)
1207 struct extern_type_member_func *elem;
1209 TAILQ_FOREACH(elem, &type->funcs, node)
1210 if (strcmp(elem->name, name) == 0)
1216 static struct extern_obj *
1217 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1219 struct extern_obj *elem;
1221 TAILQ_FOREACH(elem, &p->extern_objs, node)
1222 if (strcmp(elem->name, name) == 0)
1228 static struct field *
1229 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1231 struct extern_obj **object)
1233 struct extern_obj *obj;
1235 char *obj_name, *field_name;
1237 if ((name[0] != 'e') || (name[1] != '.'))
1240 obj_name = strdup(&name[2]);
1244 field_name = strchr(obj_name, '.');
1253 obj = extern_obj_find(p, obj_name);
1259 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1273 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1275 const char *mailbox_struct_type_name,
1276 rte_swx_extern_type_constructor_t constructor,
1277 rte_swx_extern_type_destructor_t destructor)
1279 struct extern_type *elem;
1280 struct struct_type *mailbox_struct_type;
1284 CHECK_NAME(name, EINVAL);
1285 CHECK(!extern_type_find(p, name), EEXIST);
1287 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1288 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1289 CHECK(mailbox_struct_type, EINVAL);
1291 CHECK(constructor, EINVAL);
1292 CHECK(destructor, EINVAL);
1294 /* Node allocation. */
1295 elem = calloc(1, sizeof(struct extern_type));
1296 CHECK(elem, ENOMEM);
1298 /* Node initialization. */
1299 strcpy(elem->name, name);
1300 elem->mailbox_struct_type = mailbox_struct_type;
1301 elem->constructor = constructor;
1302 elem->destructor = destructor;
1303 TAILQ_INIT(&elem->funcs);
1305 /* Node add to tailq. */
1306 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1312 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1313 const char *extern_type_name,
1315 rte_swx_extern_type_member_func_t member_func)
1317 struct extern_type *type;
1318 struct extern_type_member_func *type_member;
1322 CHECK(extern_type_name, EINVAL);
1323 type = extern_type_find(p, extern_type_name);
1324 CHECK(type, EINVAL);
1325 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1327 CHECK(name, EINVAL);
1328 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1330 CHECK(member_func, EINVAL);
1332 /* Node allocation. */
1333 type_member = calloc(1, sizeof(struct extern_type_member_func));
1334 CHECK(type_member, ENOMEM);
1336 /* Node initialization. */
1337 strcpy(type_member->name, name);
1338 type_member->func = member_func;
1339 type_member->id = type->n_funcs;
1341 /* Node add to tailq. */
1342 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1349 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1350 const char *extern_type_name,
1354 struct extern_type *type;
1355 struct extern_obj *obj;
1360 CHECK_NAME(extern_type_name, EINVAL);
1361 type = extern_type_find(p, extern_type_name);
1362 CHECK(type, EINVAL);
1364 CHECK_NAME(name, EINVAL);
1365 CHECK(!extern_obj_find(p, name), EEXIST);
1367 /* Node allocation. */
1368 obj = calloc(1, sizeof(struct extern_obj));
1371 /* Object construction. */
1372 obj_handle = type->constructor(args);
1378 /* Node initialization. */
1379 strcpy(obj->name, name);
1381 obj->obj = obj_handle;
1382 obj->struct_id = p->n_structs;
1383 obj->id = p->n_extern_objs;
1385 /* Node add to tailq. */
1386 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1394 extern_obj_build(struct rte_swx_pipeline *p)
1398 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1399 struct thread *t = &p->threads[i];
1400 struct extern_obj *obj;
1402 t->extern_objs = calloc(p->n_extern_objs,
1403 sizeof(struct extern_obj_runtime));
1404 CHECK(t->extern_objs, ENOMEM);
1406 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1407 struct extern_obj_runtime *r =
1408 &t->extern_objs[obj->id];
1409 struct extern_type_member_func *func;
1410 uint32_t mailbox_size =
1411 obj->type->mailbox_struct_type->n_bits / 8;
1415 r->mailbox = calloc(1, mailbox_size);
1416 CHECK(r->mailbox, ENOMEM);
1418 TAILQ_FOREACH(func, &obj->type->funcs, node)
1419 r->funcs[func->id] = func->func;
1421 t->structs[obj->struct_id] = r->mailbox;
1429 extern_obj_build_free(struct rte_swx_pipeline *p)
1433 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1434 struct thread *t = &p->threads[i];
1437 if (!t->extern_objs)
1440 for (j = 0; j < p->n_extern_objs; j++) {
1441 struct extern_obj_runtime *r = &t->extern_objs[j];
1446 free(t->extern_objs);
1447 t->extern_objs = NULL;
1452 extern_obj_free(struct rte_swx_pipeline *p)
1454 extern_obj_build_free(p);
1456 /* Extern objects. */
1458 struct extern_obj *elem;
1460 elem = TAILQ_FIRST(&p->extern_objs);
1464 TAILQ_REMOVE(&p->extern_objs, elem, node);
1466 elem->type->destructor(elem->obj);
1472 struct extern_type *elem;
1474 elem = TAILQ_FIRST(&p->extern_types);
1478 TAILQ_REMOVE(&p->extern_types, elem, node);
1481 struct extern_type_member_func *func;
1483 func = TAILQ_FIRST(&elem->funcs);
1487 TAILQ_REMOVE(&elem->funcs, func, node);
1498 static struct extern_func *
1499 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1501 struct extern_func *elem;
1503 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1504 if (strcmp(elem->name, name) == 0)
1510 static struct field *
1511 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1513 struct extern_func **function)
1515 struct extern_func *func;
1517 char *func_name, *field_name;
1519 if ((name[0] != 'f') || (name[1] != '.'))
1522 func_name = strdup(&name[2]);
1526 field_name = strchr(func_name, '.');
1535 func = extern_func_find(p, func_name);
1541 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1555 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1557 const char *mailbox_struct_type_name,
1558 rte_swx_extern_func_t func)
1560 struct extern_func *f;
1561 struct struct_type *mailbox_struct_type;
1565 CHECK_NAME(name, EINVAL);
1566 CHECK(!extern_func_find(p, name), EEXIST);
1568 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1569 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1570 CHECK(mailbox_struct_type, EINVAL);
1572 CHECK(func, EINVAL);
1574 /* Node allocation. */
1575 f = calloc(1, sizeof(struct extern_func));
1576 CHECK(func, ENOMEM);
1578 /* Node initialization. */
1579 strcpy(f->name, name);
1580 f->mailbox_struct_type = mailbox_struct_type;
1582 f->struct_id = p->n_structs;
1583 f->id = p->n_extern_funcs;
1585 /* Node add to tailq. */
1586 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1587 p->n_extern_funcs++;
1594 extern_func_build(struct rte_swx_pipeline *p)
1598 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1599 struct thread *t = &p->threads[i];
1600 struct extern_func *func;
1602 /* Memory allocation. */
1603 t->extern_funcs = calloc(p->n_extern_funcs,
1604 sizeof(struct extern_func_runtime));
1605 CHECK(t->extern_funcs, ENOMEM);
1607 /* Extern function. */
1608 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1609 struct extern_func_runtime *r =
1610 &t->extern_funcs[func->id];
1611 uint32_t mailbox_size =
1612 func->mailbox_struct_type->n_bits / 8;
1614 r->func = func->func;
1616 r->mailbox = calloc(1, mailbox_size);
1617 CHECK(r->mailbox, ENOMEM);
1619 t->structs[func->struct_id] = r->mailbox;
1627 extern_func_build_free(struct rte_swx_pipeline *p)
1631 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1632 struct thread *t = &p->threads[i];
1635 if (!t->extern_funcs)
1638 for (j = 0; j < p->n_extern_funcs; j++) {
1639 struct extern_func_runtime *r = &t->extern_funcs[j];
1644 free(t->extern_funcs);
1645 t->extern_funcs = NULL;
1650 extern_func_free(struct rte_swx_pipeline *p)
1652 extern_func_build_free(p);
1655 struct extern_func *elem;
1657 elem = TAILQ_FIRST(&p->extern_funcs);
1661 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1669 static struct header *
1670 header_find(struct rte_swx_pipeline *p, const char *name)
1672 struct header *elem;
1674 TAILQ_FOREACH(elem, &p->headers, node)
1675 if (strcmp(elem->name, name) == 0)
1681 static struct header *
1682 header_parse(struct rte_swx_pipeline *p,
1685 if (name[0] != 'h' || name[1] != '.')
1688 return header_find(p, &name[2]);
1691 static struct field *
1692 header_field_parse(struct rte_swx_pipeline *p,
1694 struct header **header)
1698 char *header_name, *field_name;
1700 if ((name[0] != 'h') || (name[1] != '.'))
1703 header_name = strdup(&name[2]);
1707 field_name = strchr(header_name, '.');
1716 h = header_find(p, header_name);
1722 f = struct_type_field_find(h->st, field_name);
1736 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1738 const char *struct_type_name)
1740 struct struct_type *st;
1742 size_t n_headers_max;
1745 CHECK_NAME(name, EINVAL);
1746 CHECK_NAME(struct_type_name, EINVAL);
1748 CHECK(!header_find(p, name), EEXIST);
1750 st = struct_type_find(p, struct_type_name);
1753 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1754 CHECK(p->n_headers < n_headers_max, ENOSPC);
1756 /* Node allocation. */
1757 h = calloc(1, sizeof(struct header));
1760 /* Node initialization. */
1761 strcpy(h->name, name);
1763 h->struct_id = p->n_structs;
1764 h->id = p->n_headers;
1766 /* Node add to tailq. */
1767 TAILQ_INSERT_TAIL(&p->headers, h, node);
1775 header_build(struct rte_swx_pipeline *p)
1778 uint32_t n_bytes = 0, i;
1780 TAILQ_FOREACH(h, &p->headers, node) {
1781 n_bytes += h->st->n_bits / 8;
1784 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1785 struct thread *t = &p->threads[i];
1786 uint32_t offset = 0;
1788 t->headers = calloc(p->n_headers,
1789 sizeof(struct header_runtime));
1790 CHECK(t->headers, ENOMEM);
1792 t->headers_out = calloc(p->n_headers,
1793 sizeof(struct header_out_runtime));
1794 CHECK(t->headers_out, ENOMEM);
1796 t->header_storage = calloc(1, n_bytes);
1797 CHECK(t->header_storage, ENOMEM);
1799 t->header_out_storage = calloc(1, n_bytes);
1800 CHECK(t->header_out_storage, ENOMEM);
1802 TAILQ_FOREACH(h, &p->headers, node) {
1803 uint8_t *header_storage;
1805 header_storage = &t->header_storage[offset];
1806 offset += h->st->n_bits / 8;
1808 t->headers[h->id].ptr0 = header_storage;
1809 t->structs[h->struct_id] = header_storage;
1817 header_build_free(struct rte_swx_pipeline *p)
1821 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1822 struct thread *t = &p->threads[i];
1824 free(t->headers_out);
1825 t->headers_out = NULL;
1830 free(t->header_out_storage);
1831 t->header_out_storage = NULL;
1833 free(t->header_storage);
1834 t->header_storage = NULL;
1839 header_free(struct rte_swx_pipeline *p)
1841 header_build_free(p);
1844 struct header *elem;
1846 elem = TAILQ_FIRST(&p->headers);
1850 TAILQ_REMOVE(&p->headers, elem, node);
1858 static struct field *
1859 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1861 if (!p->metadata_st)
1864 if (name[0] != 'm' || name[1] != '.')
1867 return struct_type_field_find(p->metadata_st, &name[2]);
1871 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1872 const char *struct_type_name)
1874 struct struct_type *st = NULL;
1878 CHECK_NAME(struct_type_name, EINVAL);
1879 st = struct_type_find(p, struct_type_name);
1881 CHECK(!p->metadata_st, EINVAL);
1883 p->metadata_st = st;
1884 p->metadata_struct_id = p->n_structs;
1892 metadata_build(struct rte_swx_pipeline *p)
1894 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1897 /* Thread-level initialization. */
1898 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1899 struct thread *t = &p->threads[i];
1902 metadata = calloc(1, n_bytes);
1903 CHECK(metadata, ENOMEM);
1905 t->metadata = metadata;
1906 t->structs[p->metadata_struct_id] = metadata;
1913 metadata_build_free(struct rte_swx_pipeline *p)
1917 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1918 struct thread *t = &p->threads[i];
1926 metadata_free(struct rte_swx_pipeline *p)
1928 metadata_build_free(p);
1934 static struct field *
1935 action_field_parse(struct action *action, const char *name);
1937 static struct field *
1938 struct_field_parse(struct rte_swx_pipeline *p,
1939 struct action *action,
1941 uint32_t *struct_id)
1948 struct header *header;
1950 f = header_field_parse(p, name, &header);
1954 *struct_id = header->struct_id;
1960 f = metadata_field_parse(p, name);
1964 *struct_id = p->metadata_struct_id;
1973 f = action_field_parse(action, name);
1983 struct extern_obj *obj;
1985 f = extern_obj_mailbox_field_parse(p, name, &obj);
1989 *struct_id = obj->struct_id;
1995 struct extern_func *func;
1997 f = extern_func_mailbox_field_parse(p, name, &func);
2001 *struct_id = func->struct_id;
2011 pipeline_port_inc(struct rte_swx_pipeline *p)
2013 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2017 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2019 t->ip = p->instructions;
2023 thread_ip_inc(struct rte_swx_pipeline *p);
2026 thread_ip_inc(struct rte_swx_pipeline *p)
2028 struct thread *t = &p->threads[p->thread_id];
2034 thread_ip_inc_cond(struct thread *t, int cond)
2040 thread_yield(struct rte_swx_pipeline *p)
2042 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2049 instr_rx_translate(struct rte_swx_pipeline *p,
2050 struct action *action,
2053 struct instruction *instr,
2054 struct instruction_data *data __rte_unused)
2058 CHECK(!action, EINVAL);
2059 CHECK(n_tokens == 2, EINVAL);
2061 f = metadata_field_parse(p, tokens[1]);
2064 instr->type = INSTR_RX;
2065 instr->io.io.offset = f->offset / 8;
2066 instr->io.io.n_bits = f->n_bits;
2071 instr_rx_exec(struct rte_swx_pipeline *p);
2074 instr_rx_exec(struct rte_swx_pipeline *p)
2076 struct thread *t = &p->threads[p->thread_id];
2077 struct instruction *ip = t->ip;
2078 struct port_in_runtime *port = &p->in[p->port_id];
2079 struct rte_swx_pkt *pkt = &t->pkt;
2083 pkt_received = port->pkt_rx(port->obj, pkt);
2084 t->ptr = &pkt->pkt[pkt->offset];
2085 rte_prefetch0(t->ptr);
2087 TRACE("[Thread %2u] rx %s from port %u\n",
2089 pkt_received ? "1 pkt" : "0 pkts",
2093 t->valid_headers = 0;
2094 t->n_headers_out = 0;
2097 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2100 t->table_state = p->table_state;
2103 pipeline_port_inc(p);
2104 thread_ip_inc_cond(t, pkt_received);
2112 instr_tx_translate(struct rte_swx_pipeline *p,
2113 struct action *action __rte_unused,
2116 struct instruction *instr,
2117 struct instruction_data *data __rte_unused)
2121 CHECK(n_tokens == 2, EINVAL);
2123 f = metadata_field_parse(p, tokens[1]);
2126 instr->type = INSTR_TX;
2127 instr->io.io.offset = f->offset / 8;
2128 instr->io.io.n_bits = f->n_bits;
2133 emit_handler(struct thread *t)
2135 struct header_out_runtime *h0 = &t->headers_out[0];
2136 struct header_out_runtime *h1 = &t->headers_out[1];
2137 uint32_t offset = 0, i;
2139 /* No header change or header decapsulation. */
2140 if ((t->n_headers_out == 1) &&
2141 (h0->ptr + h0->n_bytes == t->ptr)) {
2142 TRACE("Emit handler: no header change or header decap.\n");
2144 t->pkt.offset -= h0->n_bytes;
2145 t->pkt.length += h0->n_bytes;
2150 /* Header encapsulation (optionally, with prior header decasulation). */
2151 if ((t->n_headers_out == 2) &&
2152 (h1->ptr + h1->n_bytes == t->ptr) &&
2153 (h0->ptr == h0->ptr0)) {
2156 TRACE("Emit handler: header encapsulation.\n");
2158 offset = h0->n_bytes + h1->n_bytes;
2159 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2160 t->pkt.offset -= offset;
2161 t->pkt.length += offset;
2166 /* Header insertion. */
2169 /* Header extraction. */
2172 /* For any other case. */
2173 TRACE("Emit handler: complex case.\n");
2175 for (i = 0; i < t->n_headers_out; i++) {
2176 struct header_out_runtime *h = &t->headers_out[i];
2178 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2179 offset += h->n_bytes;
2183 memcpy(t->ptr - offset, t->header_out_storage, offset);
2184 t->pkt.offset -= offset;
2185 t->pkt.length += offset;
2190 instr_tx_exec(struct rte_swx_pipeline *p);
2193 instr_tx_exec(struct rte_swx_pipeline *p)
2195 struct thread *t = &p->threads[p->thread_id];
2196 struct instruction *ip = t->ip;
2197 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2198 struct port_out_runtime *port = &p->out[port_id];
2199 struct rte_swx_pkt *pkt = &t->pkt;
2201 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2209 port->pkt_tx(port->obj, pkt);
2212 thread_ip_reset(p, t);
2220 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2221 struct action *action,
2224 struct instruction *instr,
2225 struct instruction_data *data __rte_unused)
2229 CHECK(!action, EINVAL);
2230 CHECK(n_tokens == 2, EINVAL);
2232 h = header_parse(p, tokens[1]);
2235 instr->type = INSTR_HDR_EXTRACT;
2236 instr->io.hdr.header_id[0] = h->id;
2237 instr->io.hdr.struct_id[0] = h->struct_id;
2238 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2243 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2246 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2248 struct thread *t = &p->threads[p->thread_id];
2249 struct instruction *ip = t->ip;
2250 uint64_t valid_headers = t->valid_headers;
2251 uint8_t *ptr = t->ptr;
2252 uint32_t offset = t->pkt.offset;
2253 uint32_t length = t->pkt.length;
2256 for (i = 0; i < n_extract; i++) {
2257 uint32_t header_id = ip->io.hdr.header_id[i];
2258 uint32_t struct_id = ip->io.hdr.struct_id[i];
2259 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2261 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2267 t->structs[struct_id] = ptr;
2268 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2277 t->valid_headers = valid_headers;
2280 t->pkt.offset = offset;
2281 t->pkt.length = length;
2286 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2288 __instr_hdr_extract_exec(p, 1);
2295 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2297 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2300 __instr_hdr_extract_exec(p, 2);
2307 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2309 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2312 __instr_hdr_extract_exec(p, 3);
2319 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2321 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2324 __instr_hdr_extract_exec(p, 4);
2331 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2333 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2336 __instr_hdr_extract_exec(p, 5);
2343 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2345 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2348 __instr_hdr_extract_exec(p, 6);
2355 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2357 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2360 __instr_hdr_extract_exec(p, 7);
2367 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2369 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2372 __instr_hdr_extract_exec(p, 8);
2382 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2383 struct action *action __rte_unused,
2386 struct instruction *instr,
2387 struct instruction_data *data __rte_unused)
2391 CHECK(n_tokens == 2, EINVAL);
2393 h = header_parse(p, tokens[1]);
2396 instr->type = INSTR_HDR_EMIT;
2397 instr->io.hdr.header_id[0] = h->id;
2398 instr->io.hdr.struct_id[0] = h->struct_id;
2399 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2404 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2407 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2409 struct thread *t = &p->threads[p->thread_id];
2410 struct instruction *ip = t->ip;
2411 uint32_t n_headers_out = t->n_headers_out;
2412 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2413 uint8_t *ho_ptr = NULL;
2414 uint32_t ho_nbytes = 0, i;
2416 for (i = 0; i < n_emit; i++) {
2417 uint32_t header_id = ip->io.hdr.header_id[i];
2418 uint32_t struct_id = ip->io.hdr.struct_id[i];
2419 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2421 struct header_runtime *hi = &t->headers[header_id];
2422 uint8_t *hi_ptr = t->structs[struct_id];
2424 TRACE("[Thread %2u]: emit header %u\n",
2430 if (!t->n_headers_out) {
2431 ho = &t->headers_out[0];
2433 ho->ptr0 = hi->ptr0;
2437 ho_nbytes = n_bytes;
2444 ho_nbytes = ho->n_bytes;
2448 if (ho_ptr + ho_nbytes == hi_ptr) {
2449 ho_nbytes += n_bytes;
2451 ho->n_bytes = ho_nbytes;
2454 ho->ptr0 = hi->ptr0;
2458 ho_nbytes = n_bytes;
2464 ho->n_bytes = ho_nbytes;
2465 t->n_headers_out = n_headers_out;
2469 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2471 __instr_hdr_emit_exec(p, 1);
2478 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2480 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2483 __instr_hdr_emit_exec(p, 1);
2488 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2490 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2493 __instr_hdr_emit_exec(p, 2);
2498 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2500 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2503 __instr_hdr_emit_exec(p, 3);
2508 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2510 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2513 __instr_hdr_emit_exec(p, 4);
2518 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2520 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2523 __instr_hdr_emit_exec(p, 5);
2528 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2530 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2533 __instr_hdr_emit_exec(p, 6);
2538 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2540 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2543 __instr_hdr_emit_exec(p, 7);
2548 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2550 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2553 __instr_hdr_emit_exec(p, 8);
2561 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2562 struct action *action __rte_unused,
2565 struct instruction *instr,
2566 struct instruction_data *data __rte_unused)
2570 CHECK(n_tokens == 2, EINVAL);
2572 h = header_parse(p, tokens[1]);
2575 instr->type = INSTR_HDR_VALIDATE;
2576 instr->valid.header_id = h->id;
2581 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2583 struct thread *t = &p->threads[p->thread_id];
2584 struct instruction *ip = t->ip;
2585 uint32_t header_id = ip->valid.header_id;
2587 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2590 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2600 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2601 struct action *action __rte_unused,
2604 struct instruction *instr,
2605 struct instruction_data *data __rte_unused)
2609 CHECK(n_tokens == 2, EINVAL);
2611 h = header_parse(p, tokens[1]);
2614 instr->type = INSTR_HDR_INVALIDATE;
2615 instr->valid.header_id = h->id;
2620 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2622 struct thread *t = &p->threads[p->thread_id];
2623 struct instruction *ip = t->ip;
2624 uint32_t header_id = ip->valid.header_id;
2626 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2629 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2639 instr_mov_translate(struct rte_swx_pipeline *p,
2640 struct action *action,
2643 struct instruction *instr,
2644 struct instruction_data *data __rte_unused)
2646 char *dst = tokens[1], *src = tokens[2];
2647 struct field *fdst, *fsrc;
2648 uint32_t dst_struct_id, src_struct_id, src_val;
2650 CHECK(n_tokens == 3, EINVAL);
2652 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2653 CHECK(fdst, EINVAL);
2656 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2658 instr->type = INSTR_MOV;
2659 if ((dst[0] == 'h' && src[0] != 'h') ||
2660 (dst[0] != 'h' && src[0] == 'h'))
2661 instr->type = INSTR_MOV_S;
2663 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2664 instr->mov.dst.n_bits = fdst->n_bits;
2665 instr->mov.dst.offset = fdst->offset / 8;
2666 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2667 instr->mov.src.n_bits = fsrc->n_bits;
2668 instr->mov.src.offset = fsrc->offset / 8;
2673 src_val = strtoul(src, &src, 0);
2674 CHECK(!src[0], EINVAL);
2677 src_val = htonl(src_val);
2679 instr->type = INSTR_MOV_I;
2680 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2681 instr->mov.dst.n_bits = fdst->n_bits;
2682 instr->mov.dst.offset = fdst->offset / 8;
2683 instr->mov.src_val = (uint32_t)src_val;
2688 instr_mov_exec(struct rte_swx_pipeline *p)
2690 struct thread *t = &p->threads[p->thread_id];
2691 struct instruction *ip = t->ip;
2693 TRACE("[Thread %2u] mov\n",
2703 instr_mov_s_exec(struct rte_swx_pipeline *p)
2705 struct thread *t = &p->threads[p->thread_id];
2706 struct instruction *ip = t->ip;
2708 TRACE("[Thread %2u] mov (s)\n",
2718 instr_mov_i_exec(struct rte_swx_pipeline *p)
2720 struct thread *t = &p->threads[p->thread_id];
2721 struct instruction *ip = t->ip;
2723 TRACE("[Thread %2u] mov m.f %x\n",
2737 instr_dma_translate(struct rte_swx_pipeline *p,
2738 struct action *action,
2741 struct instruction *instr,
2742 struct instruction_data *data __rte_unused)
2744 char *dst = tokens[1];
2745 char *src = tokens[2];
2749 CHECK(action, EINVAL);
2750 CHECK(n_tokens == 3, EINVAL);
2752 h = header_parse(p, dst);
2755 tf = action_field_parse(action, src);
2758 instr->type = INSTR_DMA_HT;
2759 instr->dma.dst.header_id[0] = h->id;
2760 instr->dma.dst.struct_id[0] = h->struct_id;
2761 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2762 instr->dma.src.offset[0] = tf->offset / 8;
2768 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2771 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2773 struct thread *t = &p->threads[p->thread_id];
2774 struct instruction *ip = t->ip;
2775 uint8_t *action_data = t->structs[0];
2776 uint64_t valid_headers = t->valid_headers;
2779 for (i = 0; i < n_dma; i++) {
2780 uint32_t header_id = ip->dma.dst.header_id[i];
2781 uint32_t struct_id = ip->dma.dst.struct_id[i];
2782 uint32_t offset = ip->dma.src.offset[i];
2783 uint32_t n_bytes = ip->dma.n_bytes[i];
2785 struct header_runtime *h = &t->headers[header_id];
2786 uint8_t *h_ptr0 = h->ptr0;
2787 uint8_t *h_ptr = t->structs[struct_id];
2789 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2791 void *src = &action_data[offset];
2793 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2796 memcpy(dst, src, n_bytes);
2797 t->structs[struct_id] = dst;
2798 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2801 t->valid_headers = valid_headers;
2805 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2807 __instr_dma_ht_exec(p, 1);
2814 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2816 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2819 __instr_dma_ht_exec(p, 2);
2826 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2828 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2831 __instr_dma_ht_exec(p, 3);
2838 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2840 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2843 __instr_dma_ht_exec(p, 4);
2850 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2852 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2855 __instr_dma_ht_exec(p, 5);
2862 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2864 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2867 __instr_dma_ht_exec(p, 6);
2874 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2876 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2879 __instr_dma_ht_exec(p, 7);
2886 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2888 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2891 __instr_dma_ht_exec(p, 8);
2901 instr_alu_add_translate(struct rte_swx_pipeline *p,
2902 struct action *action,
2905 struct instruction *instr,
2906 struct instruction_data *data __rte_unused)
2908 char *dst = tokens[1], *src = tokens[2];
2909 struct field *fdst, *fsrc;
2910 uint32_t dst_struct_id, src_struct_id, src_val;
2912 CHECK(n_tokens == 3, EINVAL);
2914 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2915 CHECK(fdst, EINVAL);
2917 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2918 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2920 instr->type = INSTR_ALU_ADD;
2921 if (dst[0] == 'h' && src[0] == 'm')
2922 instr->type = INSTR_ALU_ADD_HM;
2923 if (dst[0] == 'm' && src[0] == 'h')
2924 instr->type = INSTR_ALU_ADD_MH;
2925 if (dst[0] == 'h' && src[0] == 'h')
2926 instr->type = INSTR_ALU_ADD_HH;
2928 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2929 instr->alu.dst.n_bits = fdst->n_bits;
2930 instr->alu.dst.offset = fdst->offset / 8;
2931 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2932 instr->alu.src.n_bits = fsrc->n_bits;
2933 instr->alu.src.offset = fsrc->offset / 8;
2937 /* ADD_MI, ADD_HI. */
2938 src_val = strtoul(src, &src, 0);
2939 CHECK(!src[0], EINVAL);
2941 instr->type = INSTR_ALU_ADD_MI;
2943 instr->type = INSTR_ALU_ADD_HI;
2945 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2946 instr->alu.dst.n_bits = fdst->n_bits;
2947 instr->alu.dst.offset = fdst->offset / 8;
2948 instr->alu.src_val = (uint32_t)src_val;
2953 instr_alu_sub_translate(struct rte_swx_pipeline *p,
2954 struct action *action,
2957 struct instruction *instr,
2958 struct instruction_data *data __rte_unused)
2960 char *dst = tokens[1], *src = tokens[2];
2961 struct field *fdst, *fsrc;
2962 uint32_t dst_struct_id, src_struct_id, src_val;
2964 CHECK(n_tokens == 3, EINVAL);
2966 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2967 CHECK(fdst, EINVAL);
2969 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
2970 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2972 instr->type = INSTR_ALU_SUB;
2973 if (dst[0] == 'h' && src[0] == 'm')
2974 instr->type = INSTR_ALU_SUB_HM;
2975 if (dst[0] == 'm' && src[0] == 'h')
2976 instr->type = INSTR_ALU_SUB_MH;
2977 if (dst[0] == 'h' && src[0] == 'h')
2978 instr->type = INSTR_ALU_SUB_HH;
2980 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2981 instr->alu.dst.n_bits = fdst->n_bits;
2982 instr->alu.dst.offset = fdst->offset / 8;
2983 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2984 instr->alu.src.n_bits = fsrc->n_bits;
2985 instr->alu.src.offset = fsrc->offset / 8;
2989 /* SUB_MI, SUB_HI. */
2990 src_val = strtoul(src, &src, 0);
2991 CHECK(!src[0], EINVAL);
2993 instr->type = INSTR_ALU_SUB_MI;
2995 instr->type = INSTR_ALU_SUB_HI;
2997 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2998 instr->alu.dst.n_bits = fdst->n_bits;
2999 instr->alu.dst.offset = fdst->offset / 8;
3000 instr->alu.src_val = (uint32_t)src_val;
3005 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3006 struct action *action __rte_unused,
3009 struct instruction *instr,
3010 struct instruction_data *data __rte_unused)
3012 char *dst = tokens[1], *src = tokens[2];
3013 struct header *hdst, *hsrc;
3014 struct field *fdst, *fsrc;
3016 CHECK(n_tokens == 3, EINVAL);
3018 fdst = header_field_parse(p, dst, &hdst);
3019 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3022 fsrc = header_field_parse(p, src, &hsrc);
3024 instr->type = INSTR_ALU_CKADD_FIELD;
3025 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3026 instr->alu.dst.n_bits = fdst->n_bits;
3027 instr->alu.dst.offset = fdst->offset / 8;
3028 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3029 instr->alu.src.n_bits = fsrc->n_bits;
3030 instr->alu.src.offset = fsrc->offset / 8;
3034 /* CKADD_STRUCT, CKADD_STRUCT20. */
3035 hsrc = header_parse(p, src);
3036 CHECK(hsrc, EINVAL);
3038 instr->type = INSTR_ALU_CKADD_STRUCT;
3039 if ((hsrc->st->n_bits / 8) == 20)
3040 instr->type = INSTR_ALU_CKADD_STRUCT20;
3042 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3043 instr->alu.dst.n_bits = fdst->n_bits;
3044 instr->alu.dst.offset = fdst->offset / 8;
3045 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3046 instr->alu.src.n_bits = hsrc->st->n_bits;
3047 instr->alu.src.offset = 0; /* Unused. */
3052 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3053 struct action *action __rte_unused,
3056 struct instruction *instr,
3057 struct instruction_data *data __rte_unused)
3059 char *dst = tokens[1], *src = tokens[2];
3060 struct header *hdst, *hsrc;
3061 struct field *fdst, *fsrc;
3063 CHECK(n_tokens == 3, EINVAL);
3065 fdst = header_field_parse(p, dst, &hdst);
3066 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3068 fsrc = header_field_parse(p, src, &hsrc);
3069 CHECK(fsrc, EINVAL);
3071 instr->type = INSTR_ALU_CKSUB_FIELD;
3072 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3073 instr->alu.dst.n_bits = fdst->n_bits;
3074 instr->alu.dst.offset = fdst->offset / 8;
3075 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3076 instr->alu.src.n_bits = fsrc->n_bits;
3077 instr->alu.src.offset = fsrc->offset / 8;
3082 instr_alu_and_translate(struct rte_swx_pipeline *p,
3083 struct action *action,
3086 struct instruction *instr,
3087 struct instruction_data *data __rte_unused)
3089 char *dst = tokens[1], *src = tokens[2];
3090 struct field *fdst, *fsrc;
3091 uint32_t dst_struct_id, src_struct_id, src_val;
3093 CHECK(n_tokens == 3, EINVAL);
3095 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3096 CHECK(fdst, EINVAL);
3099 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3101 instr->type = INSTR_ALU_AND;
3102 if ((dst[0] == 'h' && src[0] != 'h') ||
3103 (dst[0] != 'h' && src[0] == 'h'))
3104 instr->type = INSTR_ALU_AND_S;
3106 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3107 instr->alu.dst.n_bits = fdst->n_bits;
3108 instr->alu.dst.offset = fdst->offset / 8;
3109 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3110 instr->alu.src.n_bits = fsrc->n_bits;
3111 instr->alu.src.offset = fsrc->offset / 8;
3116 src_val = strtoul(src, &src, 0);
3117 CHECK(!src[0], EINVAL);
3120 src_val = htonl(src_val);
3122 instr->type = INSTR_ALU_AND_I;
3123 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3124 instr->alu.dst.n_bits = fdst->n_bits;
3125 instr->alu.dst.offset = fdst->offset / 8;
3126 instr->alu.src_val = (uint32_t)src_val;
3131 instr_alu_add_exec(struct rte_swx_pipeline *p)
3133 struct thread *t = &p->threads[p->thread_id];
3134 struct instruction *ip = t->ip;
3136 TRACE("[Thread %2u] add\n", p->thread_id);
3146 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3148 struct thread *t = &p->threads[p->thread_id];
3149 struct instruction *ip = t->ip;
3151 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3161 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3163 struct thread *t = &p->threads[p->thread_id];
3164 struct instruction *ip = t->ip;
3166 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3176 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3178 struct thread *t = &p->threads[p->thread_id];
3179 struct instruction *ip = t->ip;
3181 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3191 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3193 struct thread *t = &p->threads[p->thread_id];
3194 struct instruction *ip = t->ip;
3196 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3206 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3208 struct thread *t = &p->threads[p->thread_id];
3209 struct instruction *ip = t->ip;
3211 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3221 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3223 struct thread *t = &p->threads[p->thread_id];
3224 struct instruction *ip = t->ip;
3226 TRACE("[Thread %2u] sub\n", p->thread_id);
3236 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3238 struct thread *t = &p->threads[p->thread_id];
3239 struct instruction *ip = t->ip;
3241 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3251 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3253 struct thread *t = &p->threads[p->thread_id];
3254 struct instruction *ip = t->ip;
3256 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3266 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3268 struct thread *t = &p->threads[p->thread_id];
3269 struct instruction *ip = t->ip;
3271 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3281 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3283 struct thread *t = &p->threads[p->thread_id];
3284 struct instruction *ip = t->ip;
3286 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3296 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3298 struct thread *t = &p->threads[p->thread_id];
3299 struct instruction *ip = t->ip;
3301 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3311 instr_alu_and_exec(struct rte_swx_pipeline *p)
3313 struct thread *t = &p->threads[p->thread_id];
3314 struct instruction *ip = t->ip;
3316 TRACE("[Thread %2u] and\n", p->thread_id);
3326 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
3328 struct thread *t = &p->threads[p->thread_id];
3329 struct instruction *ip = t->ip;
3331 TRACE("[Thread %2u] and (s)\n", p->thread_id);
3341 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
3343 struct thread *t = &p->threads[p->thread_id];
3344 struct instruction *ip = t->ip;
3346 TRACE("[Thread %2u] and (i)\n", p->thread_id);
3356 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
3358 struct thread *t = &p->threads[p->thread_id];
3359 struct instruction *ip = t->ip;
3360 uint8_t *dst_struct, *src_struct;
3361 uint16_t *dst16_ptr, dst;
3362 uint64_t *src64_ptr, src64, src64_mask, src;
3365 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3368 dst_struct = t->structs[ip->alu.dst.struct_id];
3369 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3372 src_struct = t->structs[ip->alu.src.struct_id];
3373 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3375 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3376 src = src64 & src64_mask;
3381 /* The first input (r) is a 16-bit number. The second and the third
3382 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3383 * three numbers (output r) is a 34-bit number.
3385 r += (src >> 32) + (src & 0xFFFFFFFF);
3387 /* The first input is a 16-bit number. The second input is an 18-bit
3388 * number. In the worst case scenario, the sum of the two numbers is a
3391 r = (r & 0xFFFF) + (r >> 16);
3393 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3394 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3396 r = (r & 0xFFFF) + (r >> 16);
3398 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3399 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3400 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
3401 * therefore the output r is always a 16-bit number.
3403 r = (r & 0xFFFF) + (r >> 16);
3408 *dst16_ptr = (uint16_t)r;
3415 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
3417 struct thread *t = &p->threads[p->thread_id];
3418 struct instruction *ip = t->ip;
3419 uint8_t *dst_struct, *src_struct;
3420 uint16_t *dst16_ptr, dst;
3421 uint64_t *src64_ptr, src64, src64_mask, src;
3424 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
3427 dst_struct = t->structs[ip->alu.dst.struct_id];
3428 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3431 src_struct = t->structs[ip->alu.src.struct_id];
3432 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3434 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3435 src = src64 & src64_mask;
3440 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
3441 * the following sequence of operations in 2's complement arithmetic:
3442 * a '- b = (a - b) % 0xFFFF.
3444 * In order to prevent an underflow for the below subtraction, in which
3445 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
3446 * minuend), we first add a multiple of the 0xFFFF modulus to the
3447 * minuend. The number we add to the minuend needs to be a 34-bit number
3448 * or higher, so for readability reasons we picked the 36-bit multiple.
3449 * We are effectively turning the 16-bit minuend into a 36-bit number:
3450 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
3452 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
3454 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
3455 * result (the output r) is a 36-bit number.
3457 r -= (src >> 32) + (src & 0xFFFFFFFF);
3459 /* The first input is a 16-bit number. The second input is a 20-bit
3460 * number. Their sum is a 21-bit number.
3462 r = (r & 0xFFFF) + (r >> 16);
3464 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3465 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
3467 r = (r & 0xFFFF) + (r >> 16);
3469 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3470 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3471 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3472 * generated, therefore the output r is always a 16-bit number.
3474 r = (r & 0xFFFF) + (r >> 16);
3479 *dst16_ptr = (uint16_t)r;
3486 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
3488 struct thread *t = &p->threads[p->thread_id];
3489 struct instruction *ip = t->ip;
3490 uint8_t *dst_struct, *src_struct;
3491 uint16_t *dst16_ptr;
3492 uint32_t *src32_ptr;
3495 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3498 dst_struct = t->structs[ip->alu.dst.struct_id];
3499 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3501 src_struct = t->structs[ip->alu.src.struct_id];
3502 src32_ptr = (uint32_t *)&src_struct[0];
3504 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
3505 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
3506 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
3507 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
3508 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
3510 /* The first input is a 16-bit number. The second input is a 19-bit
3511 * number. Their sum is a 20-bit number.
3513 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3515 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3516 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
3518 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3520 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3521 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3522 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
3523 * generated, therefore the output r is always a 16-bit number.
3525 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3528 r0 = r0 ? r0 : 0xFFFF;
3530 *dst16_ptr = (uint16_t)r0;
3537 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
3539 struct thread *t = &p->threads[p->thread_id];
3540 struct instruction *ip = t->ip;
3541 uint8_t *dst_struct, *src_struct;
3542 uint16_t *dst16_ptr;
3543 uint32_t *src32_ptr;
3547 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
3550 dst_struct = t->structs[ip->alu.dst.struct_id];
3551 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3553 src_struct = t->structs[ip->alu.src.struct_id];
3554 src32_ptr = (uint32_t *)&src_struct[0];
3556 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
3557 * Therefore, in the worst case scenario, a 35-bit number is added to a
3558 * 16-bit number (the input r), so the output r is 36-bit number.
3560 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
3563 /* The first input is a 16-bit number. The second input is a 20-bit
3564 * number. Their sum is a 21-bit number.
3566 r = (r & 0xFFFF) + (r >> 16);
3568 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3569 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
3571 r = (r & 0xFFFF) + (r >> 16);
3573 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3574 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3575 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3576 * generated, therefore the output r is always a 16-bit number.
3578 r = (r & 0xFFFF) + (r >> 16);
3583 *dst16_ptr = (uint16_t)r;
3589 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
3592 instr_translate(struct rte_swx_pipeline *p,
3593 struct action *action,
3595 struct instruction *instr,
3596 struct instruction_data *data)
3598 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
3599 int n_tokens = 0, tpos = 0;
3601 /* Parse the instruction string into tokens. */
3605 token = strtok_r(string, " \t\v", &string);
3609 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
3611 tokens[n_tokens] = token;
3615 CHECK(n_tokens, EINVAL);
3617 /* Handle the optional instruction label. */
3618 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
3619 strcpy(data->label, tokens[0]);
3622 CHECK(n_tokens - tpos, EINVAL);
3625 /* Identify the instruction type. */
3626 if (!strcmp(tokens[tpos], "rx"))
3627 return instr_rx_translate(p,
3634 if (!strcmp(tokens[tpos], "tx"))
3635 return instr_tx_translate(p,
3642 if (!strcmp(tokens[tpos], "extract"))
3643 return instr_hdr_extract_translate(p,
3650 if (!strcmp(tokens[tpos], "emit"))
3651 return instr_hdr_emit_translate(p,
3658 if (!strcmp(tokens[tpos], "validate"))
3659 return instr_hdr_validate_translate(p,
3666 if (!strcmp(tokens[tpos], "invalidate"))
3667 return instr_hdr_invalidate_translate(p,
3674 if (!strcmp(tokens[tpos], "mov"))
3675 return instr_mov_translate(p,
3682 if (!strcmp(tokens[tpos], "dma"))
3683 return instr_dma_translate(p,
3690 if (!strcmp(tokens[tpos], "add"))
3691 return instr_alu_add_translate(p,
3698 if (!strcmp(tokens[tpos], "sub"))
3699 return instr_alu_sub_translate(p,
3706 if (!strcmp(tokens[tpos], "ckadd"))
3707 return instr_alu_ckadd_translate(p,
3714 if (!strcmp(tokens[tpos], "cksub"))
3715 return instr_alu_cksub_translate(p,
3722 if (!strcmp(tokens[tpos], "and"))
3723 return instr_alu_and_translate(p,
3734 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
3736 uint32_t count = 0, i;
3741 for (i = 0; i < n; i++)
3742 if (!strcmp(label, data[i].jmp_label))
3749 instr_label_check(struct instruction_data *instruction_data,
3750 uint32_t n_instructions)
3754 /* Check that all instruction labels are unique. */
3755 for (i = 0; i < n_instructions; i++) {
3756 struct instruction_data *data = &instruction_data[i];
3757 char *label = data->label;
3763 for (j = i + 1; j < n_instructions; j++)
3764 CHECK(strcmp(label, data[j].label), EINVAL);
3767 /* Get users for each instruction label. */
3768 for (i = 0; i < n_instructions; i++) {
3769 struct instruction_data *data = &instruction_data[i];
3770 char *label = data->label;
3772 data->n_users = label_is_used(instruction_data,
3781 instruction_config(struct rte_swx_pipeline *p,
3783 const char **instructions,
3784 uint32_t n_instructions)
3786 struct instruction *instr = NULL;
3787 struct instruction_data *data = NULL;
3788 char *string = NULL;
3792 CHECK(n_instructions, EINVAL);
3793 CHECK(instructions, EINVAL);
3794 for (i = 0; i < n_instructions; i++)
3795 CHECK(instructions[i], EINVAL);
3797 /* Memory allocation. */
3798 instr = calloc(n_instructions, sizeof(struct instruction));
3804 data = calloc(n_instructions, sizeof(struct instruction_data));
3810 for (i = 0; i < n_instructions; i++) {
3811 string = strdup(instructions[i]);
3817 err = instr_translate(p, a, string, &instr[i], &data[i]);
3824 err = instr_label_check(data, n_instructions);
3831 a->instructions = instr;
3832 a->n_instructions = n_instructions;
3834 p->instructions = instr;
3835 p->n_instructions = n_instructions;
3847 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
3849 static instr_exec_t instruction_table[] = {
3850 [INSTR_RX] = instr_rx_exec,
3851 [INSTR_TX] = instr_tx_exec,
3853 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
3854 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
3855 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
3856 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
3857 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
3858 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
3859 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
3860 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
3862 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
3863 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
3864 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
3865 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
3866 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
3867 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
3868 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
3869 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
3870 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
3872 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
3873 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
3875 [INSTR_MOV] = instr_mov_exec,
3876 [INSTR_MOV_S] = instr_mov_s_exec,
3877 [INSTR_MOV_I] = instr_mov_i_exec,
3879 [INSTR_DMA_HT] = instr_dma_ht_exec,
3880 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
3881 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
3882 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
3883 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
3884 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
3885 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
3886 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
3888 [INSTR_ALU_ADD] = instr_alu_add_exec,
3889 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
3890 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
3891 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
3892 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
3893 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
3895 [INSTR_ALU_SUB] = instr_alu_sub_exec,
3896 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
3897 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
3898 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
3899 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
3900 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
3902 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
3903 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
3904 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
3905 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
3907 [INSTR_ALU_AND] = instr_alu_and_exec,
3908 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
3909 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
3913 instr_exec(struct rte_swx_pipeline *p)
3915 struct thread *t = &p->threads[p->thread_id];
3916 struct instruction *ip = t->ip;
3917 instr_exec_t instr = instruction_table[ip->type];
3925 static struct action *
3926 action_find(struct rte_swx_pipeline *p, const char *name)
3928 struct action *elem;
3933 TAILQ_FOREACH(elem, &p->actions, node)
3934 if (strcmp(elem->name, name) == 0)
3940 static struct field *
3941 action_field_find(struct action *a, const char *name)
3943 return a->st ? struct_type_field_find(a->st, name) : NULL;
3946 static struct field *
3947 action_field_parse(struct action *action, const char *name)
3949 if (name[0] != 't' || name[1] != '.')
3952 return action_field_find(action, &name[2]);
3956 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
3958 const char *args_struct_type_name,
3959 const char **instructions,
3960 uint32_t n_instructions)
3962 struct struct_type *args_struct_type;
3968 CHECK_NAME(name, EINVAL);
3969 CHECK(!action_find(p, name), EEXIST);
3971 if (args_struct_type_name) {
3972 CHECK_NAME(args_struct_type_name, EINVAL);
3973 args_struct_type = struct_type_find(p, args_struct_type_name);
3974 CHECK(args_struct_type, EINVAL);
3976 args_struct_type = NULL;
3979 /* Node allocation. */
3980 a = calloc(1, sizeof(struct action));
3983 /* Node initialization. */
3984 strcpy(a->name, name);
3985 a->st = args_struct_type;
3986 a->id = p->n_actions;
3988 /* Instruction translation. */
3989 err = instruction_config(p, a, instructions, n_instructions);
3995 /* Node add to tailq. */
3996 TAILQ_INSERT_TAIL(&p->actions, a, node);
4003 action_build(struct rte_swx_pipeline *p)
4005 struct action *action;
4007 p->action_instructions = calloc(p->n_actions,
4008 sizeof(struct instruction *));
4009 CHECK(p->action_instructions, ENOMEM);
4011 TAILQ_FOREACH(action, &p->actions, node)
4012 p->action_instructions[action->id] = action->instructions;
4018 action_build_free(struct rte_swx_pipeline *p)
4020 free(p->action_instructions);
4021 p->action_instructions = NULL;
4025 action_free(struct rte_swx_pipeline *p)
4027 action_build_free(p);
4030 struct action *action;
4032 action = TAILQ_FIRST(&p->actions);
4036 TAILQ_REMOVE(&p->actions, action, node);
4037 free(action->instructions);
4045 static struct table_type *
4046 table_type_find(struct rte_swx_pipeline *p, const char *name)
4048 struct table_type *elem;
4050 TAILQ_FOREACH(elem, &p->table_types, node)
4051 if (strcmp(elem->name, name) == 0)
4057 static struct table_type *
4058 table_type_resolve(struct rte_swx_pipeline *p,
4059 const char *recommended_type_name,
4060 enum rte_swx_table_match_type match_type)
4062 struct table_type *elem;
4064 /* Only consider the recommended type if the match type is correct. */
4065 if (recommended_type_name)
4066 TAILQ_FOREACH(elem, &p->table_types, node)
4067 if (!strcmp(elem->name, recommended_type_name) &&
4068 (elem->match_type == match_type))
4071 /* Ignore the recommended type and get the first element with this match
4074 TAILQ_FOREACH(elem, &p->table_types, node)
4075 if (elem->match_type == match_type)
4081 static struct table *
4082 table_find(struct rte_swx_pipeline *p, const char *name)
4086 TAILQ_FOREACH(elem, &p->tables, node)
4087 if (strcmp(elem->name, name) == 0)
4093 static struct table *
4094 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4096 struct table *table = NULL;
4098 TAILQ_FOREACH(table, &p->tables, node)
4099 if (table->id == id)
4106 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4108 enum rte_swx_table_match_type match_type,
4109 struct rte_swx_table_ops *ops)
4111 struct table_type *elem;
4115 CHECK_NAME(name, EINVAL);
4116 CHECK(!table_type_find(p, name), EEXIST);
4119 CHECK(ops->create, EINVAL);
4120 CHECK(ops->lkp, EINVAL);
4121 CHECK(ops->free, EINVAL);
4123 /* Node allocation. */
4124 elem = calloc(1, sizeof(struct table_type));
4125 CHECK(elem, ENOMEM);
4127 /* Node initialization. */
4128 strcpy(elem->name, name);
4129 elem->match_type = match_type;
4130 memcpy(&elem->ops, ops, sizeof(*ops));
4132 /* Node add to tailq. */
4133 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4138 static enum rte_swx_table_match_type
4139 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4144 for (i = 0; i < n_fields; i++)
4145 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4149 return RTE_SWX_TABLE_MATCH_EXACT;
4151 if ((i == n_fields - 1) &&
4152 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4153 return RTE_SWX_TABLE_MATCH_LPM;
4155 return RTE_SWX_TABLE_MATCH_WILDCARD;
4159 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
4161 struct rte_swx_pipeline_table_params *params,
4162 const char *recommended_table_type_name,
4166 struct table_type *type;
4168 struct action *default_action;
4169 struct header *header = NULL;
4171 uint32_t offset_prev = 0, action_data_size_max = 0, i;
4175 CHECK_NAME(name, EINVAL);
4176 CHECK(!table_find(p, name), EEXIST);
4178 CHECK(params, EINVAL);
4181 CHECK(!params->n_fields || params->fields, EINVAL);
4182 for (i = 0; i < params->n_fields; i++) {
4183 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4185 struct field *hf, *mf;
4188 CHECK_NAME(field->name, EINVAL);
4190 hf = header_field_parse(p, field->name, &h);
4191 mf = metadata_field_parse(p, field->name);
4192 CHECK(hf || mf, EINVAL);
4194 offset = hf ? hf->offset : mf->offset;
4197 is_header = hf ? 1 : 0;
4198 header = hf ? h : NULL;
4199 offset_prev = offset;
4204 CHECK((is_header && hf && (h->id == header->id)) ||
4205 (!is_header && mf), EINVAL);
4207 CHECK(offset > offset_prev, EINVAL);
4208 offset_prev = offset;
4211 /* Action checks. */
4212 CHECK(params->n_actions, EINVAL);
4213 CHECK(params->action_names, EINVAL);
4214 for (i = 0; i < params->n_actions; i++) {
4215 const char *action_name = params->action_names[i];
4217 uint32_t action_data_size;
4219 CHECK(action_name, EINVAL);
4221 a = action_find(p, action_name);
4224 action_data_size = a->st ? a->st->n_bits / 8 : 0;
4225 if (action_data_size > action_data_size_max)
4226 action_data_size_max = action_data_size;
4229 CHECK(params->default_action_name, EINVAL);
4230 for (i = 0; i < p->n_actions; i++)
4231 if (!strcmp(params->action_names[i],
4232 params->default_action_name))
4234 CHECK(i < params->n_actions, EINVAL);
4235 default_action = action_find(p, params->default_action_name);
4236 CHECK((default_action->st && params->default_action_data) ||
4237 !params->default_action_data, EINVAL);
4239 /* Table type checks. */
4240 if (params->n_fields) {
4241 enum rte_swx_table_match_type match_type;
4243 match_type = table_match_type_resolve(params->fields,
4245 type = table_type_resolve(p,
4246 recommended_table_type_name,
4248 CHECK(type, EINVAL);
4253 /* Memory allocation. */
4254 t = calloc(1, sizeof(struct table));
4257 t->fields = calloc(params->n_fields, sizeof(struct match_field));
4263 t->actions = calloc(params->n_actions, sizeof(struct action *));
4270 if (action_data_size_max) {
4271 t->default_action_data = calloc(1, action_data_size_max);
4272 if (!t->default_action_data) {
4280 /* Node initialization. */
4281 strcpy(t->name, name);
4282 if (args && args[0])
4283 strcpy(t->args, args);
4286 for (i = 0; i < params->n_fields; i++) {
4287 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4288 struct match_field *f = &t->fields[i];
4290 f->match_type = field->match_type;
4291 f->field = is_header ?
4292 header_field_parse(p, field->name, NULL) :
4293 metadata_field_parse(p, field->name);
4295 t->n_fields = params->n_fields;
4296 t->is_header = is_header;
4299 for (i = 0; i < params->n_actions; i++)
4300 t->actions[i] = action_find(p, params->action_names[i]);
4301 t->default_action = default_action;
4302 if (default_action->st)
4303 memcpy(t->default_action_data,
4304 params->default_action_data,
4305 default_action->st->n_bits / 8);
4306 t->n_actions = params->n_actions;
4307 t->default_action_is_const = params->default_action_is_const;
4308 t->action_data_size_max = action_data_size_max;
4311 t->id = p->n_tables;
4313 /* Node add to tailq. */
4314 TAILQ_INSERT_TAIL(&p->tables, t, node);
4320 static struct rte_swx_table_params *
4321 table_params_get(struct table *table)
4323 struct rte_swx_table_params *params;
4324 struct field *first, *last;
4326 uint32_t key_size, key_offset, action_data_size, i;
4328 /* Memory allocation. */
4329 params = calloc(1, sizeof(struct rte_swx_table_params));
4333 /* Key offset and size. */
4334 first = table->fields[0].field;
4335 last = table->fields[table->n_fields - 1].field;
4336 key_offset = first->offset / 8;
4337 key_size = (last->offset + last->n_bits - first->offset) / 8;
4339 /* Memory allocation. */
4340 key_mask = calloc(1, key_size);
4347 for (i = 0; i < table->n_fields; i++) {
4348 struct field *f = table->fields[i].field;
4349 uint32_t start = (f->offset - first->offset) / 8;
4350 size_t size = f->n_bits / 8;
4352 memset(&key_mask[start], 0xFF, size);
4355 /* Action data size. */
4356 action_data_size = 0;
4357 for (i = 0; i < table->n_actions; i++) {
4358 struct action *action = table->actions[i];
4359 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
4361 if (ads > action_data_size)
4362 action_data_size = ads;
4366 params->match_type = table->type->match_type;
4367 params->key_size = key_size;
4368 params->key_offset = key_offset;
4369 params->key_mask0 = key_mask;
4370 params->action_data_size = action_data_size;
4371 params->n_keys_max = table->size;
4377 table_params_free(struct rte_swx_table_params *params)
4382 free(params->key_mask0);
4387 table_state_build(struct rte_swx_pipeline *p)
4389 struct table *table;
4391 p->table_state = calloc(p->n_tables,
4392 sizeof(struct rte_swx_table_state));
4393 CHECK(p->table_state, ENOMEM);
4395 TAILQ_FOREACH(table, &p->tables, node) {
4396 struct rte_swx_table_state *ts = &p->table_state[table->id];
4399 struct rte_swx_table_params *params;
4402 params = table_params_get(table);
4403 CHECK(params, ENOMEM);
4405 ts->obj = table->type->ops.create(params,
4410 table_params_free(params);
4411 CHECK(ts->obj, ENODEV);
4414 /* ts->default_action_data. */
4415 if (table->action_data_size_max) {
4416 ts->default_action_data =
4417 malloc(table->action_data_size_max);
4418 CHECK(ts->default_action_data, ENOMEM);
4420 memcpy(ts->default_action_data,
4421 table->default_action_data,
4422 table->action_data_size_max);
4425 /* ts->default_action_id. */
4426 ts->default_action_id = table->default_action->id;
4433 table_state_build_free(struct rte_swx_pipeline *p)
4437 if (!p->table_state)
4440 for (i = 0; i < p->n_tables; i++) {
4441 struct rte_swx_table_state *ts = &p->table_state[i];
4442 struct table *table = table_find_by_id(p, i);
4445 if (table->type && ts->obj)
4446 table->type->ops.free(ts->obj);
4448 /* ts->default_action_data. */
4449 free(ts->default_action_data);
4452 free(p->table_state);
4453 p->table_state = NULL;
4457 table_state_free(struct rte_swx_pipeline *p)
4459 table_state_build_free(p);
4463 table_stub_lkp(void *table __rte_unused,
4464 void *mailbox __rte_unused,
4465 uint8_t **key __rte_unused,
4466 uint64_t *action_id __rte_unused,
4467 uint8_t **action_data __rte_unused,
4471 return 1; /* DONE. */
4475 table_build(struct rte_swx_pipeline *p)
4479 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4480 struct thread *t = &p->threads[i];
4481 struct table *table;
4483 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
4484 CHECK(t->tables, ENOMEM);
4486 TAILQ_FOREACH(table, &p->tables, node) {
4487 struct table_runtime *r = &t->tables[table->id];
4492 size = table->type->ops.mailbox_size_get();
4495 r->func = table->type->ops.lkp;
4499 r->mailbox = calloc(1, size);
4500 CHECK(r->mailbox, ENOMEM);
4504 r->key = table->is_header ?
4505 &t->structs[table->header->struct_id] :
4506 &t->structs[p->metadata_struct_id];
4508 r->func = table_stub_lkp;
4517 table_build_free(struct rte_swx_pipeline *p)
4521 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4522 struct thread *t = &p->threads[i];
4528 for (j = 0; j < p->n_tables; j++) {
4529 struct table_runtime *r = &t->tables[j];
4540 table_free(struct rte_swx_pipeline *p)
4542 table_build_free(p);
4548 elem = TAILQ_FIRST(&p->tables);
4552 TAILQ_REMOVE(&p->tables, elem, node);
4554 free(elem->actions);
4555 free(elem->default_action_data);
4561 struct table_type *elem;
4563 elem = TAILQ_FIRST(&p->table_types);
4567 TAILQ_REMOVE(&p->table_types, elem, node);
4576 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
4578 struct rte_swx_pipeline *pipeline;
4580 /* Check input parameters. */
4583 /* Memory allocation. */
4584 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
4585 CHECK(pipeline, ENOMEM);
4587 /* Initialization. */
4588 TAILQ_INIT(&pipeline->struct_types);
4589 TAILQ_INIT(&pipeline->port_in_types);
4590 TAILQ_INIT(&pipeline->ports_in);
4591 TAILQ_INIT(&pipeline->port_out_types);
4592 TAILQ_INIT(&pipeline->ports_out);
4593 TAILQ_INIT(&pipeline->extern_types);
4594 TAILQ_INIT(&pipeline->extern_objs);
4595 TAILQ_INIT(&pipeline->extern_funcs);
4596 TAILQ_INIT(&pipeline->headers);
4597 TAILQ_INIT(&pipeline->actions);
4598 TAILQ_INIT(&pipeline->table_types);
4599 TAILQ_INIT(&pipeline->tables);
4601 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
4602 pipeline->numa_node = numa_node;
4609 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
4614 free(p->instructions);
4616 table_state_free(p);
4621 extern_func_free(p);
4631 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
4632 const char **instructions,
4633 uint32_t n_instructions)
4638 err = instruction_config(p, NULL, instructions, n_instructions);
4642 /* Thread instruction pointer reset. */
4643 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4644 struct thread *t = &p->threads[i];
4646 thread_ip_reset(p, t);
4653 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
4658 CHECK(p->build_done == 0, EEXIST);
4660 status = port_in_build(p);
4664 status = port_out_build(p);
4668 status = struct_build(p);
4672 status = extern_obj_build(p);
4676 status = extern_func_build(p);
4680 status = header_build(p);
4684 status = metadata_build(p);
4688 status = action_build(p);
4692 status = table_build(p);
4696 status = table_state_build(p);
4704 table_state_build_free(p);
4705 table_build_free(p);
4706 action_build_free(p);
4707 metadata_build_free(p);
4708 header_build_free(p);
4709 extern_func_build_free(p);
4710 extern_obj_build_free(p);
4711 port_out_build_free(p);
4712 port_in_build_free(p);
4713 struct_build_free(p);
4719 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
4723 for (i = 0; i < n_instructions; i++)
4731 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
4732 struct rte_swx_table_state **table_state)
4734 if (!p || !table_state || !p->build_done)
4737 *table_state = p->table_state;
4742 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
4743 struct rte_swx_table_state *table_state)
4745 if (!p || !table_state || !p->build_done)
4748 p->table_state = table_state;