1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
324 struct instr_operand {
339 uint8_t header_id[8];
340 uint8_t struct_id[8];
345 struct instr_hdr_validity {
349 struct instr_dst_src {
350 struct instr_operand dst;
352 struct instr_operand src;
359 uint8_t header_id[8];
360 uint8_t struct_id[8];
371 enum instruction_type type;
374 struct instr_hdr_validity valid;
375 struct instr_dst_src mov;
376 struct instr_dma dma;
377 struct instr_dst_src alu;
381 struct instruction_data {
382 char label[RTE_SWX_NAME_SIZE];
383 char jmp_label[RTE_SWX_NAME_SIZE];
384 uint32_t n_users; /* user = jmp instruction to this instruction. */
392 TAILQ_ENTRY(action) node;
393 char name[RTE_SWX_NAME_SIZE];
394 struct struct_type *st;
395 struct instruction *instructions;
396 uint32_t n_instructions;
400 TAILQ_HEAD(action_tailq, action);
406 TAILQ_ENTRY(table_type) node;
407 char name[RTE_SWX_NAME_SIZE];
408 enum rte_swx_table_match_type match_type;
409 struct rte_swx_table_ops ops;
412 TAILQ_HEAD(table_type_tailq, table_type);
415 enum rte_swx_table_match_type match_type;
420 TAILQ_ENTRY(table) node;
421 char name[RTE_SWX_NAME_SIZE];
422 char args[RTE_SWX_NAME_SIZE];
423 struct table_type *type; /* NULL when n_fields == 0. */
426 struct match_field *fields;
428 int is_header; /* Only valid when n_fields > 0. */
429 struct header *header; /* Only valid when n_fields > 0. */
432 struct action **actions;
433 struct action *default_action;
434 uint8_t *default_action_data;
436 int default_action_is_const;
437 uint32_t action_data_size_max;
443 TAILQ_HEAD(table_tailq, table);
445 struct table_runtime {
446 rte_swx_table_lookup_t func;
456 struct rte_swx_pkt pkt;
462 /* Packet headers. */
463 struct header_runtime *headers; /* Extracted or generated headers. */
464 struct header_out_runtime *headers_out; /* Emitted headers. */
465 uint8_t *header_storage;
466 uint8_t *header_out_storage;
467 uint64_t valid_headers;
468 uint32_t n_headers_out;
470 /* Packet meta-data. */
474 struct table_runtime *tables;
475 struct rte_swx_table_state *table_state;
477 int hit; /* 0 = Miss, 1 = Hit. */
479 /* Extern objects and functions. */
480 struct extern_obj_runtime *extern_objs;
481 struct extern_func_runtime *extern_funcs;
484 struct instruction *ip;
485 struct instruction *ret;
488 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
489 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
490 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
492 #define ALU(thread, ip, operator) \
494 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
495 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
496 uint64_t dst64 = *dst64_ptr; \
497 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
498 uint64_t dst = dst64 & dst64_mask; \
500 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
501 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
502 uint64_t src64 = *src64_ptr; \
503 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
504 uint64_t src = src64 & src64_mask; \
506 uint64_t result = dst operator src; \
508 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
511 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
513 #define ALU_S(thread, ip, operator) \
515 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
516 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
517 uint64_t dst64 = *dst64_ptr; \
518 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
519 uint64_t dst = dst64 & dst64_mask; \
521 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
522 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
523 uint64_t src64 = *src64_ptr; \
524 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
526 uint64_t result = dst operator src; \
528 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
533 #define ALU_HM(thread, ip, operator) \
535 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
536 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
537 uint64_t dst64 = *dst64_ptr; \
538 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
539 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
541 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
542 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
543 uint64_t src64 = *src64_ptr; \
544 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
545 uint64_t src = src64 & src64_mask; \
547 uint64_t result = dst operator src; \
548 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
550 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
553 #define ALU_HH(thread, ip, operator) \
555 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
556 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
557 uint64_t dst64 = *dst64_ptr; \
558 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
559 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
561 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
562 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
563 uint64_t src64 = *src64_ptr; \
564 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
566 uint64_t result = dst operator src; \
567 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
569 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
581 #define ALU_I(thread, ip, operator) \
583 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
584 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
585 uint64_t dst64 = *dst64_ptr; \
586 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
587 uint64_t dst = dst64 & dst64_mask; \
589 uint64_t src = (ip)->alu.src_val; \
591 uint64_t result = dst operator src; \
593 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
598 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
600 #define ALU_HI(thread, ip, operator) \
602 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
603 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
604 uint64_t dst64 = *dst64_ptr; \
605 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
606 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
608 uint64_t src = (ip)->alu.src_val; \
610 uint64_t result = dst operator src; \
611 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
613 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
622 #define MOV(thread, ip) \
624 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
625 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
626 uint64_t dst64 = *dst64_ptr; \
627 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
629 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
630 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
631 uint64_t src64 = *src64_ptr; \
632 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
633 uint64_t src = src64 & src64_mask; \
635 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
638 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
640 #define MOV_S(thread, ip) \
642 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
643 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
644 uint64_t dst64 = *dst64_ptr; \
645 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
647 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
648 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
649 uint64_t src64 = *src64_ptr; \
650 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
652 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
661 #define MOV_I(thread, ip) \
663 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
664 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
665 uint64_t dst64 = *dst64_ptr; \
666 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
668 uint64_t src = (ip)->mov.src_val; \
670 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
673 #define METADATA_READ(thread, offset, n_bits) \
675 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
676 uint64_t m64 = *m64_ptr; \
677 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
681 #define METADATA_WRITE(thread, offset, n_bits, value) \
683 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
684 uint64_t m64 = *m64_ptr; \
685 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
687 uint64_t m_new = value; \
689 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
692 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
693 #define RTE_SWX_PIPELINE_THREADS_MAX 16
696 struct rte_swx_pipeline {
697 struct struct_type_tailq struct_types;
698 struct port_in_type_tailq port_in_types;
699 struct port_in_tailq ports_in;
700 struct port_out_type_tailq port_out_types;
701 struct port_out_tailq ports_out;
702 struct extern_type_tailq extern_types;
703 struct extern_obj_tailq extern_objs;
704 struct extern_func_tailq extern_funcs;
705 struct header_tailq headers;
706 struct struct_type *metadata_st;
707 uint32_t metadata_struct_id;
708 struct action_tailq actions;
709 struct table_type_tailq table_types;
710 struct table_tailq tables;
712 struct port_in_runtime *in;
713 struct port_out_runtime *out;
714 struct instruction **action_instructions;
715 struct rte_swx_table_state *table_state;
716 struct instruction *instructions;
717 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
721 uint32_t n_ports_out;
722 uint32_t n_extern_objs;
723 uint32_t n_extern_funcs;
729 uint32_t n_instructions;
737 static struct struct_type *
738 struct_type_find(struct rte_swx_pipeline *p, const char *name)
740 struct struct_type *elem;
742 TAILQ_FOREACH(elem, &p->struct_types, node)
743 if (strcmp(elem->name, name) == 0)
749 static struct field *
750 struct_type_field_find(struct struct_type *st, const char *name)
754 for (i = 0; i < st->n_fields; i++) {
755 struct field *f = &st->fields[i];
757 if (strcmp(f->name, name) == 0)
765 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
767 struct rte_swx_field_params *fields,
770 struct struct_type *st;
774 CHECK_NAME(name, EINVAL);
775 CHECK(fields, EINVAL);
776 CHECK(n_fields, EINVAL);
778 for (i = 0; i < n_fields; i++) {
779 struct rte_swx_field_params *f = &fields[i];
782 CHECK_NAME(f->name, EINVAL);
783 CHECK(f->n_bits, EINVAL);
784 CHECK(f->n_bits <= 64, EINVAL);
785 CHECK((f->n_bits & 7) == 0, EINVAL);
787 for (j = 0; j < i; j++) {
788 struct rte_swx_field_params *f_prev = &fields[j];
790 CHECK(strcmp(f->name, f_prev->name), EINVAL);
794 CHECK(!struct_type_find(p, name), EEXIST);
796 /* Node allocation. */
797 st = calloc(1, sizeof(struct struct_type));
800 st->fields = calloc(n_fields, sizeof(struct field));
806 /* Node initialization. */
807 strcpy(st->name, name);
808 for (i = 0; i < n_fields; i++) {
809 struct field *dst = &st->fields[i];
810 struct rte_swx_field_params *src = &fields[i];
812 strcpy(dst->name, src->name);
813 dst->n_bits = src->n_bits;
814 dst->offset = st->n_bits;
816 st->n_bits += src->n_bits;
818 st->n_fields = n_fields;
820 /* Node add to tailq. */
821 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
827 struct_build(struct rte_swx_pipeline *p)
831 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
832 struct thread *t = &p->threads[i];
834 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
835 CHECK(t->structs, ENOMEM);
842 struct_build_free(struct rte_swx_pipeline *p)
846 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
847 struct thread *t = &p->threads[i];
855 struct_free(struct rte_swx_pipeline *p)
857 struct_build_free(p);
861 struct struct_type *elem;
863 elem = TAILQ_FIRST(&p->struct_types);
867 TAILQ_REMOVE(&p->struct_types, elem, node);
876 static struct port_in_type *
877 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
879 struct port_in_type *elem;
884 TAILQ_FOREACH(elem, &p->port_in_types, node)
885 if (strcmp(elem->name, name) == 0)
892 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
894 struct rte_swx_port_in_ops *ops)
896 struct port_in_type *elem;
899 CHECK_NAME(name, EINVAL);
901 CHECK(ops->create, EINVAL);
902 CHECK(ops->free, EINVAL);
903 CHECK(ops->pkt_rx, EINVAL);
904 CHECK(ops->stats_read, EINVAL);
906 CHECK(!port_in_type_find(p, name), EEXIST);
908 /* Node allocation. */
909 elem = calloc(1, sizeof(struct port_in_type));
912 /* Node initialization. */
913 strcpy(elem->name, name);
914 memcpy(&elem->ops, ops, sizeof(*ops));
916 /* Node add to tailq. */
917 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
922 static struct port_in *
923 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
925 struct port_in *port;
927 TAILQ_FOREACH(port, &p->ports_in, node)
928 if (port->id == port_id)
935 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
937 const char *port_type_name,
940 struct port_in_type *type = NULL;
941 struct port_in *port = NULL;
946 CHECK(!port_in_find(p, port_id), EINVAL);
948 CHECK_NAME(port_type_name, EINVAL);
949 type = port_in_type_find(p, port_type_name);
952 obj = type->ops.create(args);
955 /* Node allocation. */
956 port = calloc(1, sizeof(struct port_in));
959 /* Node initialization. */
964 /* Node add to tailq. */
965 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
966 if (p->n_ports_in < port_id + 1)
967 p->n_ports_in = port_id + 1;
973 port_in_build(struct rte_swx_pipeline *p)
975 struct port_in *port;
978 CHECK(p->n_ports_in, EINVAL);
979 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
981 for (i = 0; i < p->n_ports_in; i++)
982 CHECK(port_in_find(p, i), EINVAL);
984 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
985 CHECK(p->in, ENOMEM);
987 TAILQ_FOREACH(port, &p->ports_in, node) {
988 struct port_in_runtime *in = &p->in[port->id];
990 in->pkt_rx = port->type->ops.pkt_rx;
998 port_in_build_free(struct rte_swx_pipeline *p)
1005 port_in_free(struct rte_swx_pipeline *p)
1007 port_in_build_free(p);
1011 struct port_in *port;
1013 port = TAILQ_FIRST(&p->ports_in);
1017 TAILQ_REMOVE(&p->ports_in, port, node);
1018 port->type->ops.free(port->obj);
1022 /* Input port types. */
1024 struct port_in_type *elem;
1026 elem = TAILQ_FIRST(&p->port_in_types);
1030 TAILQ_REMOVE(&p->port_in_types, elem, node);
1038 static struct port_out_type *
1039 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1041 struct port_out_type *elem;
1046 TAILQ_FOREACH(elem, &p->port_out_types, node)
1047 if (!strcmp(elem->name, name))
1054 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1056 struct rte_swx_port_out_ops *ops)
1058 struct port_out_type *elem;
1061 CHECK_NAME(name, EINVAL);
1063 CHECK(ops->create, EINVAL);
1064 CHECK(ops->free, EINVAL);
1065 CHECK(ops->pkt_tx, EINVAL);
1066 CHECK(ops->stats_read, EINVAL);
1068 CHECK(!port_out_type_find(p, name), EEXIST);
1070 /* Node allocation. */
1071 elem = calloc(1, sizeof(struct port_out_type));
1072 CHECK(elem, ENOMEM);
1074 /* Node initialization. */
1075 strcpy(elem->name, name);
1076 memcpy(&elem->ops, ops, sizeof(*ops));
1078 /* Node add to tailq. */
1079 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1084 static struct port_out *
1085 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1087 struct port_out *port;
1089 TAILQ_FOREACH(port, &p->ports_out, node)
1090 if (port->id == port_id)
1097 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1099 const char *port_type_name,
1102 struct port_out_type *type = NULL;
1103 struct port_out *port = NULL;
1108 CHECK(!port_out_find(p, port_id), EINVAL);
1110 CHECK_NAME(port_type_name, EINVAL);
1111 type = port_out_type_find(p, port_type_name);
1112 CHECK(type, EINVAL);
1114 obj = type->ops.create(args);
1117 /* Node allocation. */
1118 port = calloc(1, sizeof(struct port_out));
1119 CHECK(port, ENOMEM);
1121 /* Node initialization. */
1126 /* Node add to tailq. */
1127 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1128 if (p->n_ports_out < port_id + 1)
1129 p->n_ports_out = port_id + 1;
1135 port_out_build(struct rte_swx_pipeline *p)
1137 struct port_out *port;
1140 CHECK(p->n_ports_out, EINVAL);
1142 for (i = 0; i < p->n_ports_out; i++)
1143 CHECK(port_out_find(p, i), EINVAL);
1145 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1146 CHECK(p->out, ENOMEM);
1148 TAILQ_FOREACH(port, &p->ports_out, node) {
1149 struct port_out_runtime *out = &p->out[port->id];
1151 out->pkt_tx = port->type->ops.pkt_tx;
1152 out->flush = port->type->ops.flush;
1153 out->obj = port->obj;
1160 port_out_build_free(struct rte_swx_pipeline *p)
1167 port_out_free(struct rte_swx_pipeline *p)
1169 port_out_build_free(p);
1173 struct port_out *port;
1175 port = TAILQ_FIRST(&p->ports_out);
1179 TAILQ_REMOVE(&p->ports_out, port, node);
1180 port->type->ops.free(port->obj);
1184 /* Output port types. */
1186 struct port_out_type *elem;
1188 elem = TAILQ_FIRST(&p->port_out_types);
1192 TAILQ_REMOVE(&p->port_out_types, elem, node);
1200 static struct extern_type *
1201 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1203 struct extern_type *elem;
1205 TAILQ_FOREACH(elem, &p->extern_types, node)
1206 if (strcmp(elem->name, name) == 0)
1212 static struct extern_type_member_func *
1213 extern_type_member_func_find(struct extern_type *type, const char *name)
1215 struct extern_type_member_func *elem;
1217 TAILQ_FOREACH(elem, &type->funcs, node)
1218 if (strcmp(elem->name, name) == 0)
1224 static struct extern_obj *
1225 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1227 struct extern_obj *elem;
1229 TAILQ_FOREACH(elem, &p->extern_objs, node)
1230 if (strcmp(elem->name, name) == 0)
1236 static struct field *
1237 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1239 struct extern_obj **object)
1241 struct extern_obj *obj;
1243 char *obj_name, *field_name;
1245 if ((name[0] != 'e') || (name[1] != '.'))
1248 obj_name = strdup(&name[2]);
1252 field_name = strchr(obj_name, '.');
1261 obj = extern_obj_find(p, obj_name);
1267 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1281 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1283 const char *mailbox_struct_type_name,
1284 rte_swx_extern_type_constructor_t constructor,
1285 rte_swx_extern_type_destructor_t destructor)
1287 struct extern_type *elem;
1288 struct struct_type *mailbox_struct_type;
1292 CHECK_NAME(name, EINVAL);
1293 CHECK(!extern_type_find(p, name), EEXIST);
1295 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1296 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1297 CHECK(mailbox_struct_type, EINVAL);
1299 CHECK(constructor, EINVAL);
1300 CHECK(destructor, EINVAL);
1302 /* Node allocation. */
1303 elem = calloc(1, sizeof(struct extern_type));
1304 CHECK(elem, ENOMEM);
1306 /* Node initialization. */
1307 strcpy(elem->name, name);
1308 elem->mailbox_struct_type = mailbox_struct_type;
1309 elem->constructor = constructor;
1310 elem->destructor = destructor;
1311 TAILQ_INIT(&elem->funcs);
1313 /* Node add to tailq. */
1314 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1320 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1321 const char *extern_type_name,
1323 rte_swx_extern_type_member_func_t member_func)
1325 struct extern_type *type;
1326 struct extern_type_member_func *type_member;
1330 CHECK(extern_type_name, EINVAL);
1331 type = extern_type_find(p, extern_type_name);
1332 CHECK(type, EINVAL);
1333 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1335 CHECK(name, EINVAL);
1336 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1338 CHECK(member_func, EINVAL);
1340 /* Node allocation. */
1341 type_member = calloc(1, sizeof(struct extern_type_member_func));
1342 CHECK(type_member, ENOMEM);
1344 /* Node initialization. */
1345 strcpy(type_member->name, name);
1346 type_member->func = member_func;
1347 type_member->id = type->n_funcs;
1349 /* Node add to tailq. */
1350 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1357 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1358 const char *extern_type_name,
1362 struct extern_type *type;
1363 struct extern_obj *obj;
1368 CHECK_NAME(extern_type_name, EINVAL);
1369 type = extern_type_find(p, extern_type_name);
1370 CHECK(type, EINVAL);
1372 CHECK_NAME(name, EINVAL);
1373 CHECK(!extern_obj_find(p, name), EEXIST);
1375 /* Node allocation. */
1376 obj = calloc(1, sizeof(struct extern_obj));
1379 /* Object construction. */
1380 obj_handle = type->constructor(args);
1386 /* Node initialization. */
1387 strcpy(obj->name, name);
1389 obj->obj = obj_handle;
1390 obj->struct_id = p->n_structs;
1391 obj->id = p->n_extern_objs;
1393 /* Node add to tailq. */
1394 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1402 extern_obj_build(struct rte_swx_pipeline *p)
1406 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1407 struct thread *t = &p->threads[i];
1408 struct extern_obj *obj;
1410 t->extern_objs = calloc(p->n_extern_objs,
1411 sizeof(struct extern_obj_runtime));
1412 CHECK(t->extern_objs, ENOMEM);
1414 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1415 struct extern_obj_runtime *r =
1416 &t->extern_objs[obj->id];
1417 struct extern_type_member_func *func;
1418 uint32_t mailbox_size =
1419 obj->type->mailbox_struct_type->n_bits / 8;
1423 r->mailbox = calloc(1, mailbox_size);
1424 CHECK(r->mailbox, ENOMEM);
1426 TAILQ_FOREACH(func, &obj->type->funcs, node)
1427 r->funcs[func->id] = func->func;
1429 t->structs[obj->struct_id] = r->mailbox;
1437 extern_obj_build_free(struct rte_swx_pipeline *p)
1441 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1442 struct thread *t = &p->threads[i];
1445 if (!t->extern_objs)
1448 for (j = 0; j < p->n_extern_objs; j++) {
1449 struct extern_obj_runtime *r = &t->extern_objs[j];
1454 free(t->extern_objs);
1455 t->extern_objs = NULL;
1460 extern_obj_free(struct rte_swx_pipeline *p)
1462 extern_obj_build_free(p);
1464 /* Extern objects. */
1466 struct extern_obj *elem;
1468 elem = TAILQ_FIRST(&p->extern_objs);
1472 TAILQ_REMOVE(&p->extern_objs, elem, node);
1474 elem->type->destructor(elem->obj);
1480 struct extern_type *elem;
1482 elem = TAILQ_FIRST(&p->extern_types);
1486 TAILQ_REMOVE(&p->extern_types, elem, node);
1489 struct extern_type_member_func *func;
1491 func = TAILQ_FIRST(&elem->funcs);
1495 TAILQ_REMOVE(&elem->funcs, func, node);
1506 static struct extern_func *
1507 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1509 struct extern_func *elem;
1511 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1512 if (strcmp(elem->name, name) == 0)
1518 static struct field *
1519 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1521 struct extern_func **function)
1523 struct extern_func *func;
1525 char *func_name, *field_name;
1527 if ((name[0] != 'f') || (name[1] != '.'))
1530 func_name = strdup(&name[2]);
1534 field_name = strchr(func_name, '.');
1543 func = extern_func_find(p, func_name);
1549 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1563 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1565 const char *mailbox_struct_type_name,
1566 rte_swx_extern_func_t func)
1568 struct extern_func *f;
1569 struct struct_type *mailbox_struct_type;
1573 CHECK_NAME(name, EINVAL);
1574 CHECK(!extern_func_find(p, name), EEXIST);
1576 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1577 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1578 CHECK(mailbox_struct_type, EINVAL);
1580 CHECK(func, EINVAL);
1582 /* Node allocation. */
1583 f = calloc(1, sizeof(struct extern_func));
1584 CHECK(func, ENOMEM);
1586 /* Node initialization. */
1587 strcpy(f->name, name);
1588 f->mailbox_struct_type = mailbox_struct_type;
1590 f->struct_id = p->n_structs;
1591 f->id = p->n_extern_funcs;
1593 /* Node add to tailq. */
1594 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1595 p->n_extern_funcs++;
1602 extern_func_build(struct rte_swx_pipeline *p)
1606 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1607 struct thread *t = &p->threads[i];
1608 struct extern_func *func;
1610 /* Memory allocation. */
1611 t->extern_funcs = calloc(p->n_extern_funcs,
1612 sizeof(struct extern_func_runtime));
1613 CHECK(t->extern_funcs, ENOMEM);
1615 /* Extern function. */
1616 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1617 struct extern_func_runtime *r =
1618 &t->extern_funcs[func->id];
1619 uint32_t mailbox_size =
1620 func->mailbox_struct_type->n_bits / 8;
1622 r->func = func->func;
1624 r->mailbox = calloc(1, mailbox_size);
1625 CHECK(r->mailbox, ENOMEM);
1627 t->structs[func->struct_id] = r->mailbox;
1635 extern_func_build_free(struct rte_swx_pipeline *p)
1639 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1640 struct thread *t = &p->threads[i];
1643 if (!t->extern_funcs)
1646 for (j = 0; j < p->n_extern_funcs; j++) {
1647 struct extern_func_runtime *r = &t->extern_funcs[j];
1652 free(t->extern_funcs);
1653 t->extern_funcs = NULL;
1658 extern_func_free(struct rte_swx_pipeline *p)
1660 extern_func_build_free(p);
1663 struct extern_func *elem;
1665 elem = TAILQ_FIRST(&p->extern_funcs);
1669 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1677 static struct header *
1678 header_find(struct rte_swx_pipeline *p, const char *name)
1680 struct header *elem;
1682 TAILQ_FOREACH(elem, &p->headers, node)
1683 if (strcmp(elem->name, name) == 0)
1689 static struct header *
1690 header_parse(struct rte_swx_pipeline *p,
1693 if (name[0] != 'h' || name[1] != '.')
1696 return header_find(p, &name[2]);
1699 static struct field *
1700 header_field_parse(struct rte_swx_pipeline *p,
1702 struct header **header)
1706 char *header_name, *field_name;
1708 if ((name[0] != 'h') || (name[1] != '.'))
1711 header_name = strdup(&name[2]);
1715 field_name = strchr(header_name, '.');
1724 h = header_find(p, header_name);
1730 f = struct_type_field_find(h->st, field_name);
1744 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1746 const char *struct_type_name)
1748 struct struct_type *st;
1750 size_t n_headers_max;
1753 CHECK_NAME(name, EINVAL);
1754 CHECK_NAME(struct_type_name, EINVAL);
1756 CHECK(!header_find(p, name), EEXIST);
1758 st = struct_type_find(p, struct_type_name);
1761 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1762 CHECK(p->n_headers < n_headers_max, ENOSPC);
1764 /* Node allocation. */
1765 h = calloc(1, sizeof(struct header));
1768 /* Node initialization. */
1769 strcpy(h->name, name);
1771 h->struct_id = p->n_structs;
1772 h->id = p->n_headers;
1774 /* Node add to tailq. */
1775 TAILQ_INSERT_TAIL(&p->headers, h, node);
1783 header_build(struct rte_swx_pipeline *p)
1786 uint32_t n_bytes = 0, i;
1788 TAILQ_FOREACH(h, &p->headers, node) {
1789 n_bytes += h->st->n_bits / 8;
1792 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1793 struct thread *t = &p->threads[i];
1794 uint32_t offset = 0;
1796 t->headers = calloc(p->n_headers,
1797 sizeof(struct header_runtime));
1798 CHECK(t->headers, ENOMEM);
1800 t->headers_out = calloc(p->n_headers,
1801 sizeof(struct header_out_runtime));
1802 CHECK(t->headers_out, ENOMEM);
1804 t->header_storage = calloc(1, n_bytes);
1805 CHECK(t->header_storage, ENOMEM);
1807 t->header_out_storage = calloc(1, n_bytes);
1808 CHECK(t->header_out_storage, ENOMEM);
1810 TAILQ_FOREACH(h, &p->headers, node) {
1811 uint8_t *header_storage;
1813 header_storage = &t->header_storage[offset];
1814 offset += h->st->n_bits / 8;
1816 t->headers[h->id].ptr0 = header_storage;
1817 t->structs[h->struct_id] = header_storage;
1825 header_build_free(struct rte_swx_pipeline *p)
1829 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1830 struct thread *t = &p->threads[i];
1832 free(t->headers_out);
1833 t->headers_out = NULL;
1838 free(t->header_out_storage);
1839 t->header_out_storage = NULL;
1841 free(t->header_storage);
1842 t->header_storage = NULL;
1847 header_free(struct rte_swx_pipeline *p)
1849 header_build_free(p);
1852 struct header *elem;
1854 elem = TAILQ_FIRST(&p->headers);
1858 TAILQ_REMOVE(&p->headers, elem, node);
1866 static struct field *
1867 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1869 if (!p->metadata_st)
1872 if (name[0] != 'm' || name[1] != '.')
1875 return struct_type_field_find(p->metadata_st, &name[2]);
1879 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1880 const char *struct_type_name)
1882 struct struct_type *st = NULL;
1886 CHECK_NAME(struct_type_name, EINVAL);
1887 st = struct_type_find(p, struct_type_name);
1889 CHECK(!p->metadata_st, EINVAL);
1891 p->metadata_st = st;
1892 p->metadata_struct_id = p->n_structs;
1900 metadata_build(struct rte_swx_pipeline *p)
1902 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1905 /* Thread-level initialization. */
1906 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1907 struct thread *t = &p->threads[i];
1910 metadata = calloc(1, n_bytes);
1911 CHECK(metadata, ENOMEM);
1913 t->metadata = metadata;
1914 t->structs[p->metadata_struct_id] = metadata;
1921 metadata_build_free(struct rte_swx_pipeline *p)
1925 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1926 struct thread *t = &p->threads[i];
1934 metadata_free(struct rte_swx_pipeline *p)
1936 metadata_build_free(p);
1942 static struct field *
1943 action_field_parse(struct action *action, const char *name);
1945 static struct field *
1946 struct_field_parse(struct rte_swx_pipeline *p,
1947 struct action *action,
1949 uint32_t *struct_id)
1956 struct header *header;
1958 f = header_field_parse(p, name, &header);
1962 *struct_id = header->struct_id;
1968 f = metadata_field_parse(p, name);
1972 *struct_id = p->metadata_struct_id;
1981 f = action_field_parse(action, name);
1991 struct extern_obj *obj;
1993 f = extern_obj_mailbox_field_parse(p, name, &obj);
1997 *struct_id = obj->struct_id;
2003 struct extern_func *func;
2005 f = extern_func_mailbox_field_parse(p, name, &func);
2009 *struct_id = func->struct_id;
2019 pipeline_port_inc(struct rte_swx_pipeline *p)
2021 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2025 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2027 t->ip = p->instructions;
2031 thread_ip_inc(struct rte_swx_pipeline *p);
2034 thread_ip_inc(struct rte_swx_pipeline *p)
2036 struct thread *t = &p->threads[p->thread_id];
2042 thread_ip_inc_cond(struct thread *t, int cond)
2048 thread_yield(struct rte_swx_pipeline *p)
2050 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2057 instr_rx_translate(struct rte_swx_pipeline *p,
2058 struct action *action,
2061 struct instruction *instr,
2062 struct instruction_data *data __rte_unused)
2066 CHECK(!action, EINVAL);
2067 CHECK(n_tokens == 2, EINVAL);
2069 f = metadata_field_parse(p, tokens[1]);
2072 instr->type = INSTR_RX;
2073 instr->io.io.offset = f->offset / 8;
2074 instr->io.io.n_bits = f->n_bits;
2079 instr_rx_exec(struct rte_swx_pipeline *p);
2082 instr_rx_exec(struct rte_swx_pipeline *p)
2084 struct thread *t = &p->threads[p->thread_id];
2085 struct instruction *ip = t->ip;
2086 struct port_in_runtime *port = &p->in[p->port_id];
2087 struct rte_swx_pkt *pkt = &t->pkt;
2091 pkt_received = port->pkt_rx(port->obj, pkt);
2092 t->ptr = &pkt->pkt[pkt->offset];
2093 rte_prefetch0(t->ptr);
2095 TRACE("[Thread %2u] rx %s from port %u\n",
2097 pkt_received ? "1 pkt" : "0 pkts",
2101 t->valid_headers = 0;
2102 t->n_headers_out = 0;
2105 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2108 t->table_state = p->table_state;
2111 pipeline_port_inc(p);
2112 thread_ip_inc_cond(t, pkt_received);
2120 instr_tx_translate(struct rte_swx_pipeline *p,
2121 struct action *action __rte_unused,
2124 struct instruction *instr,
2125 struct instruction_data *data __rte_unused)
2129 CHECK(n_tokens == 2, EINVAL);
2131 f = metadata_field_parse(p, tokens[1]);
2134 instr->type = INSTR_TX;
2135 instr->io.io.offset = f->offset / 8;
2136 instr->io.io.n_bits = f->n_bits;
2141 emit_handler(struct thread *t)
2143 struct header_out_runtime *h0 = &t->headers_out[0];
2144 struct header_out_runtime *h1 = &t->headers_out[1];
2145 uint32_t offset = 0, i;
2147 /* No header change or header decapsulation. */
2148 if ((t->n_headers_out == 1) &&
2149 (h0->ptr + h0->n_bytes == t->ptr)) {
2150 TRACE("Emit handler: no header change or header decap.\n");
2152 t->pkt.offset -= h0->n_bytes;
2153 t->pkt.length += h0->n_bytes;
2158 /* Header encapsulation (optionally, with prior header decasulation). */
2159 if ((t->n_headers_out == 2) &&
2160 (h1->ptr + h1->n_bytes == t->ptr) &&
2161 (h0->ptr == h0->ptr0)) {
2164 TRACE("Emit handler: header encapsulation.\n");
2166 offset = h0->n_bytes + h1->n_bytes;
2167 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2168 t->pkt.offset -= offset;
2169 t->pkt.length += offset;
2174 /* Header insertion. */
2177 /* Header extraction. */
2180 /* For any other case. */
2181 TRACE("Emit handler: complex case.\n");
2183 for (i = 0; i < t->n_headers_out; i++) {
2184 struct header_out_runtime *h = &t->headers_out[i];
2186 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2187 offset += h->n_bytes;
2191 memcpy(t->ptr - offset, t->header_out_storage, offset);
2192 t->pkt.offset -= offset;
2193 t->pkt.length += offset;
2198 instr_tx_exec(struct rte_swx_pipeline *p);
2201 instr_tx_exec(struct rte_swx_pipeline *p)
2203 struct thread *t = &p->threads[p->thread_id];
2204 struct instruction *ip = t->ip;
2205 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2206 struct port_out_runtime *port = &p->out[port_id];
2207 struct rte_swx_pkt *pkt = &t->pkt;
2209 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2217 port->pkt_tx(port->obj, pkt);
2220 thread_ip_reset(p, t);
2228 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2229 struct action *action,
2232 struct instruction *instr,
2233 struct instruction_data *data __rte_unused)
2237 CHECK(!action, EINVAL);
2238 CHECK(n_tokens == 2, EINVAL);
2240 h = header_parse(p, tokens[1]);
2243 instr->type = INSTR_HDR_EXTRACT;
2244 instr->io.hdr.header_id[0] = h->id;
2245 instr->io.hdr.struct_id[0] = h->struct_id;
2246 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2251 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2254 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2256 struct thread *t = &p->threads[p->thread_id];
2257 struct instruction *ip = t->ip;
2258 uint64_t valid_headers = t->valid_headers;
2259 uint8_t *ptr = t->ptr;
2260 uint32_t offset = t->pkt.offset;
2261 uint32_t length = t->pkt.length;
2264 for (i = 0; i < n_extract; i++) {
2265 uint32_t header_id = ip->io.hdr.header_id[i];
2266 uint32_t struct_id = ip->io.hdr.struct_id[i];
2267 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2269 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2275 t->structs[struct_id] = ptr;
2276 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2285 t->valid_headers = valid_headers;
2288 t->pkt.offset = offset;
2289 t->pkt.length = length;
2294 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2296 __instr_hdr_extract_exec(p, 1);
2303 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2305 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2308 __instr_hdr_extract_exec(p, 2);
2315 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2317 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2320 __instr_hdr_extract_exec(p, 3);
2327 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2329 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2332 __instr_hdr_extract_exec(p, 4);
2339 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2341 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2344 __instr_hdr_extract_exec(p, 5);
2351 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2353 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2356 __instr_hdr_extract_exec(p, 6);
2363 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2365 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2368 __instr_hdr_extract_exec(p, 7);
2375 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2377 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2380 __instr_hdr_extract_exec(p, 8);
2390 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2391 struct action *action __rte_unused,
2394 struct instruction *instr,
2395 struct instruction_data *data __rte_unused)
2399 CHECK(n_tokens == 2, EINVAL);
2401 h = header_parse(p, tokens[1]);
2404 instr->type = INSTR_HDR_EMIT;
2405 instr->io.hdr.header_id[0] = h->id;
2406 instr->io.hdr.struct_id[0] = h->struct_id;
2407 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2412 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2415 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2417 struct thread *t = &p->threads[p->thread_id];
2418 struct instruction *ip = t->ip;
2419 uint32_t n_headers_out = t->n_headers_out;
2420 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2421 uint8_t *ho_ptr = NULL;
2422 uint32_t ho_nbytes = 0, i;
2424 for (i = 0; i < n_emit; i++) {
2425 uint32_t header_id = ip->io.hdr.header_id[i];
2426 uint32_t struct_id = ip->io.hdr.struct_id[i];
2427 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2429 struct header_runtime *hi = &t->headers[header_id];
2430 uint8_t *hi_ptr = t->structs[struct_id];
2432 TRACE("[Thread %2u]: emit header %u\n",
2438 if (!t->n_headers_out) {
2439 ho = &t->headers_out[0];
2441 ho->ptr0 = hi->ptr0;
2445 ho_nbytes = n_bytes;
2452 ho_nbytes = ho->n_bytes;
2456 if (ho_ptr + ho_nbytes == hi_ptr) {
2457 ho_nbytes += n_bytes;
2459 ho->n_bytes = ho_nbytes;
2462 ho->ptr0 = hi->ptr0;
2466 ho_nbytes = n_bytes;
2472 ho->n_bytes = ho_nbytes;
2473 t->n_headers_out = n_headers_out;
2477 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2479 __instr_hdr_emit_exec(p, 1);
2486 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2488 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2491 __instr_hdr_emit_exec(p, 1);
2496 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2498 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2501 __instr_hdr_emit_exec(p, 2);
2506 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2508 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2511 __instr_hdr_emit_exec(p, 3);
2516 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2518 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2521 __instr_hdr_emit_exec(p, 4);
2526 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2528 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2531 __instr_hdr_emit_exec(p, 5);
2536 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2538 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2541 __instr_hdr_emit_exec(p, 6);
2546 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2548 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2551 __instr_hdr_emit_exec(p, 7);
2556 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2558 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2561 __instr_hdr_emit_exec(p, 8);
2569 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2570 struct action *action __rte_unused,
2573 struct instruction *instr,
2574 struct instruction_data *data __rte_unused)
2578 CHECK(n_tokens == 2, EINVAL);
2580 h = header_parse(p, tokens[1]);
2583 instr->type = INSTR_HDR_VALIDATE;
2584 instr->valid.header_id = h->id;
2589 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2591 struct thread *t = &p->threads[p->thread_id];
2592 struct instruction *ip = t->ip;
2593 uint32_t header_id = ip->valid.header_id;
2595 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2598 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2608 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2609 struct action *action __rte_unused,
2612 struct instruction *instr,
2613 struct instruction_data *data __rte_unused)
2617 CHECK(n_tokens == 2, EINVAL);
2619 h = header_parse(p, tokens[1]);
2622 instr->type = INSTR_HDR_INVALIDATE;
2623 instr->valid.header_id = h->id;
2628 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2630 struct thread *t = &p->threads[p->thread_id];
2631 struct instruction *ip = t->ip;
2632 uint32_t header_id = ip->valid.header_id;
2634 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2637 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2647 instr_mov_translate(struct rte_swx_pipeline *p,
2648 struct action *action,
2651 struct instruction *instr,
2652 struct instruction_data *data __rte_unused)
2654 char *dst = tokens[1], *src = tokens[2];
2655 struct field *fdst, *fsrc;
2656 uint32_t dst_struct_id, src_struct_id, src_val;
2658 CHECK(n_tokens == 3, EINVAL);
2660 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2661 CHECK(fdst, EINVAL);
2664 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2666 instr->type = INSTR_MOV;
2667 if ((dst[0] == 'h' && src[0] != 'h') ||
2668 (dst[0] != 'h' && src[0] == 'h'))
2669 instr->type = INSTR_MOV_S;
2671 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2672 instr->mov.dst.n_bits = fdst->n_bits;
2673 instr->mov.dst.offset = fdst->offset / 8;
2674 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2675 instr->mov.src.n_bits = fsrc->n_bits;
2676 instr->mov.src.offset = fsrc->offset / 8;
2681 src_val = strtoul(src, &src, 0);
2682 CHECK(!src[0], EINVAL);
2685 src_val = htonl(src_val);
2687 instr->type = INSTR_MOV_I;
2688 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2689 instr->mov.dst.n_bits = fdst->n_bits;
2690 instr->mov.dst.offset = fdst->offset / 8;
2691 instr->mov.src_val = (uint32_t)src_val;
2696 instr_mov_exec(struct rte_swx_pipeline *p)
2698 struct thread *t = &p->threads[p->thread_id];
2699 struct instruction *ip = t->ip;
2701 TRACE("[Thread %2u] mov\n",
2711 instr_mov_s_exec(struct rte_swx_pipeline *p)
2713 struct thread *t = &p->threads[p->thread_id];
2714 struct instruction *ip = t->ip;
2716 TRACE("[Thread %2u] mov (s)\n",
2726 instr_mov_i_exec(struct rte_swx_pipeline *p)
2728 struct thread *t = &p->threads[p->thread_id];
2729 struct instruction *ip = t->ip;
2731 TRACE("[Thread %2u] mov m.f %x\n",
2745 instr_dma_translate(struct rte_swx_pipeline *p,
2746 struct action *action,
2749 struct instruction *instr,
2750 struct instruction_data *data __rte_unused)
2752 char *dst = tokens[1];
2753 char *src = tokens[2];
2757 CHECK(action, EINVAL);
2758 CHECK(n_tokens == 3, EINVAL);
2760 h = header_parse(p, dst);
2763 tf = action_field_parse(action, src);
2766 instr->type = INSTR_DMA_HT;
2767 instr->dma.dst.header_id[0] = h->id;
2768 instr->dma.dst.struct_id[0] = h->struct_id;
2769 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2770 instr->dma.src.offset[0] = tf->offset / 8;
2776 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2779 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2781 struct thread *t = &p->threads[p->thread_id];
2782 struct instruction *ip = t->ip;
2783 uint8_t *action_data = t->structs[0];
2784 uint64_t valid_headers = t->valid_headers;
2787 for (i = 0; i < n_dma; i++) {
2788 uint32_t header_id = ip->dma.dst.header_id[i];
2789 uint32_t struct_id = ip->dma.dst.struct_id[i];
2790 uint32_t offset = ip->dma.src.offset[i];
2791 uint32_t n_bytes = ip->dma.n_bytes[i];
2793 struct header_runtime *h = &t->headers[header_id];
2794 uint8_t *h_ptr0 = h->ptr0;
2795 uint8_t *h_ptr = t->structs[struct_id];
2797 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2799 void *src = &action_data[offset];
2801 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2804 memcpy(dst, src, n_bytes);
2805 t->structs[struct_id] = dst;
2806 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2809 t->valid_headers = valid_headers;
2813 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2815 __instr_dma_ht_exec(p, 1);
2822 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2824 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2827 __instr_dma_ht_exec(p, 2);
2834 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2836 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2839 __instr_dma_ht_exec(p, 3);
2846 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2848 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2851 __instr_dma_ht_exec(p, 4);
2858 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2860 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2863 __instr_dma_ht_exec(p, 5);
2870 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2872 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2875 __instr_dma_ht_exec(p, 6);
2882 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
2884 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2887 __instr_dma_ht_exec(p, 7);
2894 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
2896 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2899 __instr_dma_ht_exec(p, 8);
2909 instr_alu_add_translate(struct rte_swx_pipeline *p,
2910 struct action *action,
2913 struct instruction *instr,
2914 struct instruction_data *data __rte_unused)
2916 char *dst = tokens[1], *src = tokens[2];
2917 struct field *fdst, *fsrc;
2918 uint32_t dst_struct_id, src_struct_id, src_val;
2920 CHECK(n_tokens == 3, EINVAL);
2922 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2923 CHECK(fdst, EINVAL);
2925 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
2926 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2928 instr->type = INSTR_ALU_ADD;
2929 if (dst[0] == 'h' && src[0] == 'm')
2930 instr->type = INSTR_ALU_ADD_HM;
2931 if (dst[0] == 'm' && src[0] == 'h')
2932 instr->type = INSTR_ALU_ADD_MH;
2933 if (dst[0] == 'h' && src[0] == 'h')
2934 instr->type = INSTR_ALU_ADD_HH;
2936 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2937 instr->alu.dst.n_bits = fdst->n_bits;
2938 instr->alu.dst.offset = fdst->offset / 8;
2939 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2940 instr->alu.src.n_bits = fsrc->n_bits;
2941 instr->alu.src.offset = fsrc->offset / 8;
2945 /* ADD_MI, ADD_HI. */
2946 src_val = strtoul(src, &src, 0);
2947 CHECK(!src[0], EINVAL);
2949 instr->type = INSTR_ALU_ADD_MI;
2951 instr->type = INSTR_ALU_ADD_HI;
2953 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2954 instr->alu.dst.n_bits = fdst->n_bits;
2955 instr->alu.dst.offset = fdst->offset / 8;
2956 instr->alu.src_val = (uint32_t)src_val;
2961 instr_alu_sub_translate(struct rte_swx_pipeline *p,
2962 struct action *action,
2965 struct instruction *instr,
2966 struct instruction_data *data __rte_unused)
2968 char *dst = tokens[1], *src = tokens[2];
2969 struct field *fdst, *fsrc;
2970 uint32_t dst_struct_id, src_struct_id, src_val;
2972 CHECK(n_tokens == 3, EINVAL);
2974 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2975 CHECK(fdst, EINVAL);
2977 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
2978 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2980 instr->type = INSTR_ALU_SUB;
2981 if (dst[0] == 'h' && src[0] == 'm')
2982 instr->type = INSTR_ALU_SUB_HM;
2983 if (dst[0] == 'm' && src[0] == 'h')
2984 instr->type = INSTR_ALU_SUB_MH;
2985 if (dst[0] == 'h' && src[0] == 'h')
2986 instr->type = INSTR_ALU_SUB_HH;
2988 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
2989 instr->alu.dst.n_bits = fdst->n_bits;
2990 instr->alu.dst.offset = fdst->offset / 8;
2991 instr->alu.src.struct_id = (uint8_t)src_struct_id;
2992 instr->alu.src.n_bits = fsrc->n_bits;
2993 instr->alu.src.offset = fsrc->offset / 8;
2997 /* SUB_MI, SUB_HI. */
2998 src_val = strtoul(src, &src, 0);
2999 CHECK(!src[0], EINVAL);
3001 instr->type = INSTR_ALU_SUB_MI;
3003 instr->type = INSTR_ALU_SUB_HI;
3005 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3006 instr->alu.dst.n_bits = fdst->n_bits;
3007 instr->alu.dst.offset = fdst->offset / 8;
3008 instr->alu.src_val = (uint32_t)src_val;
3013 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3014 struct action *action __rte_unused,
3017 struct instruction *instr,
3018 struct instruction_data *data __rte_unused)
3020 char *dst = tokens[1], *src = tokens[2];
3021 struct header *hdst, *hsrc;
3022 struct field *fdst, *fsrc;
3024 CHECK(n_tokens == 3, EINVAL);
3026 fdst = header_field_parse(p, dst, &hdst);
3027 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3030 fsrc = header_field_parse(p, src, &hsrc);
3032 instr->type = INSTR_ALU_CKADD_FIELD;
3033 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3034 instr->alu.dst.n_bits = fdst->n_bits;
3035 instr->alu.dst.offset = fdst->offset / 8;
3036 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3037 instr->alu.src.n_bits = fsrc->n_bits;
3038 instr->alu.src.offset = fsrc->offset / 8;
3042 /* CKADD_STRUCT, CKADD_STRUCT20. */
3043 hsrc = header_parse(p, src);
3044 CHECK(hsrc, EINVAL);
3046 instr->type = INSTR_ALU_CKADD_STRUCT;
3047 if ((hsrc->st->n_bits / 8) == 20)
3048 instr->type = INSTR_ALU_CKADD_STRUCT20;
3050 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3051 instr->alu.dst.n_bits = fdst->n_bits;
3052 instr->alu.dst.offset = fdst->offset / 8;
3053 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3054 instr->alu.src.n_bits = hsrc->st->n_bits;
3055 instr->alu.src.offset = 0; /* Unused. */
3060 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3061 struct action *action __rte_unused,
3064 struct instruction *instr,
3065 struct instruction_data *data __rte_unused)
3067 char *dst = tokens[1], *src = tokens[2];
3068 struct header *hdst, *hsrc;
3069 struct field *fdst, *fsrc;
3071 CHECK(n_tokens == 3, EINVAL);
3073 fdst = header_field_parse(p, dst, &hdst);
3074 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3076 fsrc = header_field_parse(p, src, &hsrc);
3077 CHECK(fsrc, EINVAL);
3079 instr->type = INSTR_ALU_CKSUB_FIELD;
3080 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3081 instr->alu.dst.n_bits = fdst->n_bits;
3082 instr->alu.dst.offset = fdst->offset / 8;
3083 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3084 instr->alu.src.n_bits = fsrc->n_bits;
3085 instr->alu.src.offset = fsrc->offset / 8;
3090 instr_alu_and_translate(struct rte_swx_pipeline *p,
3091 struct action *action,
3094 struct instruction *instr,
3095 struct instruction_data *data __rte_unused)
3097 char *dst = tokens[1], *src = tokens[2];
3098 struct field *fdst, *fsrc;
3099 uint32_t dst_struct_id, src_struct_id, src_val;
3101 CHECK(n_tokens == 3, EINVAL);
3103 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3104 CHECK(fdst, EINVAL);
3107 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3109 instr->type = INSTR_ALU_AND;
3110 if ((dst[0] == 'h' && src[0] != 'h') ||
3111 (dst[0] != 'h' && src[0] == 'h'))
3112 instr->type = INSTR_ALU_AND_S;
3114 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3115 instr->alu.dst.n_bits = fdst->n_bits;
3116 instr->alu.dst.offset = fdst->offset / 8;
3117 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3118 instr->alu.src.n_bits = fsrc->n_bits;
3119 instr->alu.src.offset = fsrc->offset / 8;
3124 src_val = strtoul(src, &src, 0);
3125 CHECK(!src[0], EINVAL);
3128 src_val = htonl(src_val);
3130 instr->type = INSTR_ALU_AND_I;
3131 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3132 instr->alu.dst.n_bits = fdst->n_bits;
3133 instr->alu.dst.offset = fdst->offset / 8;
3134 instr->alu.src_val = (uint32_t)src_val;
3139 instr_alu_or_translate(struct rte_swx_pipeline *p,
3140 struct action *action,
3143 struct instruction *instr,
3144 struct instruction_data *data __rte_unused)
3146 char *dst = tokens[1], *src = tokens[2];
3147 struct field *fdst, *fsrc;
3148 uint32_t dst_struct_id, src_struct_id, src_val;
3150 CHECK(n_tokens == 3, EINVAL);
3152 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3153 CHECK(fdst, EINVAL);
3156 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3158 instr->type = INSTR_ALU_OR;
3159 if ((dst[0] == 'h' && src[0] != 'h') ||
3160 (dst[0] != 'h' && src[0] == 'h'))
3161 instr->type = INSTR_ALU_OR_S;
3163 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3164 instr->alu.dst.n_bits = fdst->n_bits;
3165 instr->alu.dst.offset = fdst->offset / 8;
3166 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3167 instr->alu.src.n_bits = fsrc->n_bits;
3168 instr->alu.src.offset = fsrc->offset / 8;
3173 src_val = strtoul(src, &src, 0);
3174 CHECK(!src[0], EINVAL);
3177 src_val = htonl(src_val);
3179 instr->type = INSTR_ALU_OR_I;
3180 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3181 instr->alu.dst.n_bits = fdst->n_bits;
3182 instr->alu.dst.offset = fdst->offset / 8;
3183 instr->alu.src_val = (uint32_t)src_val;
3188 instr_alu_add_exec(struct rte_swx_pipeline *p)
3190 struct thread *t = &p->threads[p->thread_id];
3191 struct instruction *ip = t->ip;
3193 TRACE("[Thread %2u] add\n", p->thread_id);
3203 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3205 struct thread *t = &p->threads[p->thread_id];
3206 struct instruction *ip = t->ip;
3208 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3218 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3220 struct thread *t = &p->threads[p->thread_id];
3221 struct instruction *ip = t->ip;
3223 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3233 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3235 struct thread *t = &p->threads[p->thread_id];
3236 struct instruction *ip = t->ip;
3238 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3248 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3250 struct thread *t = &p->threads[p->thread_id];
3251 struct instruction *ip = t->ip;
3253 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3263 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3265 struct thread *t = &p->threads[p->thread_id];
3266 struct instruction *ip = t->ip;
3268 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3278 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3280 struct thread *t = &p->threads[p->thread_id];
3281 struct instruction *ip = t->ip;
3283 TRACE("[Thread %2u] sub\n", p->thread_id);
3293 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3295 struct thread *t = &p->threads[p->thread_id];
3296 struct instruction *ip = t->ip;
3298 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3308 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3310 struct thread *t = &p->threads[p->thread_id];
3311 struct instruction *ip = t->ip;
3313 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3323 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3325 struct thread *t = &p->threads[p->thread_id];
3326 struct instruction *ip = t->ip;
3328 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3338 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3340 struct thread *t = &p->threads[p->thread_id];
3341 struct instruction *ip = t->ip;
3343 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3353 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3355 struct thread *t = &p->threads[p->thread_id];
3356 struct instruction *ip = t->ip;
3358 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3368 instr_alu_and_exec(struct rte_swx_pipeline *p)
3370 struct thread *t = &p->threads[p->thread_id];
3371 struct instruction *ip = t->ip;
3373 TRACE("[Thread %2u] and\n", p->thread_id);
3383 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
3385 struct thread *t = &p->threads[p->thread_id];
3386 struct instruction *ip = t->ip;
3388 TRACE("[Thread %2u] and (s)\n", p->thread_id);
3398 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
3400 struct thread *t = &p->threads[p->thread_id];
3401 struct instruction *ip = t->ip;
3403 TRACE("[Thread %2u] and (i)\n", p->thread_id);
3413 instr_alu_or_exec(struct rte_swx_pipeline *p)
3415 struct thread *t = &p->threads[p->thread_id];
3416 struct instruction *ip = t->ip;
3418 TRACE("[Thread %2u] or\n", p->thread_id);
3428 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
3430 struct thread *t = &p->threads[p->thread_id];
3431 struct instruction *ip = t->ip;
3433 TRACE("[Thread %2u] or (s)\n", p->thread_id);
3443 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
3445 struct thread *t = &p->threads[p->thread_id];
3446 struct instruction *ip = t->ip;
3448 TRACE("[Thread %2u] or (i)\n", p->thread_id);
3458 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
3460 struct thread *t = &p->threads[p->thread_id];
3461 struct instruction *ip = t->ip;
3462 uint8_t *dst_struct, *src_struct;
3463 uint16_t *dst16_ptr, dst;
3464 uint64_t *src64_ptr, src64, src64_mask, src;
3467 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3470 dst_struct = t->structs[ip->alu.dst.struct_id];
3471 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3474 src_struct = t->structs[ip->alu.src.struct_id];
3475 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3477 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3478 src = src64 & src64_mask;
3483 /* The first input (r) is a 16-bit number. The second and the third
3484 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3485 * three numbers (output r) is a 34-bit number.
3487 r += (src >> 32) + (src & 0xFFFFFFFF);
3489 /* The first input is a 16-bit number. The second input is an 18-bit
3490 * number. In the worst case scenario, the sum of the two numbers is a
3493 r = (r & 0xFFFF) + (r >> 16);
3495 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3496 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3498 r = (r & 0xFFFF) + (r >> 16);
3500 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3501 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3502 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
3503 * therefore the output r is always a 16-bit number.
3505 r = (r & 0xFFFF) + (r >> 16);
3510 *dst16_ptr = (uint16_t)r;
3517 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
3519 struct thread *t = &p->threads[p->thread_id];
3520 struct instruction *ip = t->ip;
3521 uint8_t *dst_struct, *src_struct;
3522 uint16_t *dst16_ptr, dst;
3523 uint64_t *src64_ptr, src64, src64_mask, src;
3526 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
3529 dst_struct = t->structs[ip->alu.dst.struct_id];
3530 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3533 src_struct = t->structs[ip->alu.src.struct_id];
3534 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3536 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3537 src = src64 & src64_mask;
3542 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
3543 * the following sequence of operations in 2's complement arithmetic:
3544 * a '- b = (a - b) % 0xFFFF.
3546 * In order to prevent an underflow for the below subtraction, in which
3547 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
3548 * minuend), we first add a multiple of the 0xFFFF modulus to the
3549 * minuend. The number we add to the minuend needs to be a 34-bit number
3550 * or higher, so for readability reasons we picked the 36-bit multiple.
3551 * We are effectively turning the 16-bit minuend into a 36-bit number:
3552 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
3554 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
3556 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
3557 * result (the output r) is a 36-bit number.
3559 r -= (src >> 32) + (src & 0xFFFFFFFF);
3561 /* The first input is a 16-bit number. The second input is a 20-bit
3562 * number. Their sum is a 21-bit number.
3564 r = (r & 0xFFFF) + (r >> 16);
3566 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3567 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
3569 r = (r & 0xFFFF) + (r >> 16);
3571 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3572 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3573 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3574 * generated, therefore the output r is always a 16-bit number.
3576 r = (r & 0xFFFF) + (r >> 16);
3581 *dst16_ptr = (uint16_t)r;
3588 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
3590 struct thread *t = &p->threads[p->thread_id];
3591 struct instruction *ip = t->ip;
3592 uint8_t *dst_struct, *src_struct;
3593 uint16_t *dst16_ptr;
3594 uint32_t *src32_ptr;
3597 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3600 dst_struct = t->structs[ip->alu.dst.struct_id];
3601 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3603 src_struct = t->structs[ip->alu.src.struct_id];
3604 src32_ptr = (uint32_t *)&src_struct[0];
3606 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
3607 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
3608 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
3609 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
3610 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
3612 /* The first input is a 16-bit number. The second input is a 19-bit
3613 * number. Their sum is a 20-bit number.
3615 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3617 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3618 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
3620 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3622 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3623 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3624 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
3625 * generated, therefore the output r is always a 16-bit number.
3627 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3630 r0 = r0 ? r0 : 0xFFFF;
3632 *dst16_ptr = (uint16_t)r0;
3639 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
3641 struct thread *t = &p->threads[p->thread_id];
3642 struct instruction *ip = t->ip;
3643 uint8_t *dst_struct, *src_struct;
3644 uint16_t *dst16_ptr;
3645 uint32_t *src32_ptr;
3649 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
3652 dst_struct = t->structs[ip->alu.dst.struct_id];
3653 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3655 src_struct = t->structs[ip->alu.src.struct_id];
3656 src32_ptr = (uint32_t *)&src_struct[0];
3658 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
3659 * Therefore, in the worst case scenario, a 35-bit number is added to a
3660 * 16-bit number (the input r), so the output r is 36-bit number.
3662 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
3665 /* The first input is a 16-bit number. The second input is a 20-bit
3666 * number. Their sum is a 21-bit number.
3668 r = (r & 0xFFFF) + (r >> 16);
3670 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3671 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
3673 r = (r & 0xFFFF) + (r >> 16);
3675 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3676 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3677 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3678 * generated, therefore the output r is always a 16-bit number.
3680 r = (r & 0xFFFF) + (r >> 16);
3685 *dst16_ptr = (uint16_t)r;
3691 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
3694 instr_translate(struct rte_swx_pipeline *p,
3695 struct action *action,
3697 struct instruction *instr,
3698 struct instruction_data *data)
3700 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
3701 int n_tokens = 0, tpos = 0;
3703 /* Parse the instruction string into tokens. */
3707 token = strtok_r(string, " \t\v", &string);
3711 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
3713 tokens[n_tokens] = token;
3717 CHECK(n_tokens, EINVAL);
3719 /* Handle the optional instruction label. */
3720 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
3721 strcpy(data->label, tokens[0]);
3724 CHECK(n_tokens - tpos, EINVAL);
3727 /* Identify the instruction type. */
3728 if (!strcmp(tokens[tpos], "rx"))
3729 return instr_rx_translate(p,
3736 if (!strcmp(tokens[tpos], "tx"))
3737 return instr_tx_translate(p,
3744 if (!strcmp(tokens[tpos], "extract"))
3745 return instr_hdr_extract_translate(p,
3752 if (!strcmp(tokens[tpos], "emit"))
3753 return instr_hdr_emit_translate(p,
3760 if (!strcmp(tokens[tpos], "validate"))
3761 return instr_hdr_validate_translate(p,
3768 if (!strcmp(tokens[tpos], "invalidate"))
3769 return instr_hdr_invalidate_translate(p,
3776 if (!strcmp(tokens[tpos], "mov"))
3777 return instr_mov_translate(p,
3784 if (!strcmp(tokens[tpos], "dma"))
3785 return instr_dma_translate(p,
3792 if (!strcmp(tokens[tpos], "add"))
3793 return instr_alu_add_translate(p,
3800 if (!strcmp(tokens[tpos], "sub"))
3801 return instr_alu_sub_translate(p,
3808 if (!strcmp(tokens[tpos], "ckadd"))
3809 return instr_alu_ckadd_translate(p,
3816 if (!strcmp(tokens[tpos], "cksub"))
3817 return instr_alu_cksub_translate(p,
3824 if (!strcmp(tokens[tpos], "and"))
3825 return instr_alu_and_translate(p,
3832 if (!strcmp(tokens[tpos], "or"))
3833 return instr_alu_or_translate(p,
3844 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
3846 uint32_t count = 0, i;
3851 for (i = 0; i < n; i++)
3852 if (!strcmp(label, data[i].jmp_label))
3859 instr_label_check(struct instruction_data *instruction_data,
3860 uint32_t n_instructions)
3864 /* Check that all instruction labels are unique. */
3865 for (i = 0; i < n_instructions; i++) {
3866 struct instruction_data *data = &instruction_data[i];
3867 char *label = data->label;
3873 for (j = i + 1; j < n_instructions; j++)
3874 CHECK(strcmp(label, data[j].label), EINVAL);
3877 /* Get users for each instruction label. */
3878 for (i = 0; i < n_instructions; i++) {
3879 struct instruction_data *data = &instruction_data[i];
3880 char *label = data->label;
3882 data->n_users = label_is_used(instruction_data,
3891 instruction_config(struct rte_swx_pipeline *p,
3893 const char **instructions,
3894 uint32_t n_instructions)
3896 struct instruction *instr = NULL;
3897 struct instruction_data *data = NULL;
3898 char *string = NULL;
3902 CHECK(n_instructions, EINVAL);
3903 CHECK(instructions, EINVAL);
3904 for (i = 0; i < n_instructions; i++)
3905 CHECK(instructions[i], EINVAL);
3907 /* Memory allocation. */
3908 instr = calloc(n_instructions, sizeof(struct instruction));
3914 data = calloc(n_instructions, sizeof(struct instruction_data));
3920 for (i = 0; i < n_instructions; i++) {
3921 string = strdup(instructions[i]);
3927 err = instr_translate(p, a, string, &instr[i], &data[i]);
3934 err = instr_label_check(data, n_instructions);
3941 a->instructions = instr;
3942 a->n_instructions = n_instructions;
3944 p->instructions = instr;
3945 p->n_instructions = n_instructions;
3957 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
3959 static instr_exec_t instruction_table[] = {
3960 [INSTR_RX] = instr_rx_exec,
3961 [INSTR_TX] = instr_tx_exec,
3963 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
3964 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
3965 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
3966 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
3967 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
3968 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
3969 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
3970 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
3972 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
3973 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
3974 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
3975 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
3976 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
3977 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
3978 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
3979 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
3980 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
3982 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
3983 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
3985 [INSTR_MOV] = instr_mov_exec,
3986 [INSTR_MOV_S] = instr_mov_s_exec,
3987 [INSTR_MOV_I] = instr_mov_i_exec,
3989 [INSTR_DMA_HT] = instr_dma_ht_exec,
3990 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
3991 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
3992 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
3993 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
3994 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
3995 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
3996 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
3998 [INSTR_ALU_ADD] = instr_alu_add_exec,
3999 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
4000 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
4001 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
4002 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
4003 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
4005 [INSTR_ALU_SUB] = instr_alu_sub_exec,
4006 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
4007 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
4008 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
4009 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
4010 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
4012 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
4013 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
4014 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
4015 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
4017 [INSTR_ALU_AND] = instr_alu_and_exec,
4018 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
4019 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
4021 [INSTR_ALU_OR] = instr_alu_or_exec,
4022 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
4023 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
4027 instr_exec(struct rte_swx_pipeline *p)
4029 struct thread *t = &p->threads[p->thread_id];
4030 struct instruction *ip = t->ip;
4031 instr_exec_t instr = instruction_table[ip->type];
4039 static struct action *
4040 action_find(struct rte_swx_pipeline *p, const char *name)
4042 struct action *elem;
4047 TAILQ_FOREACH(elem, &p->actions, node)
4048 if (strcmp(elem->name, name) == 0)
4054 static struct field *
4055 action_field_find(struct action *a, const char *name)
4057 return a->st ? struct_type_field_find(a->st, name) : NULL;
4060 static struct field *
4061 action_field_parse(struct action *action, const char *name)
4063 if (name[0] != 't' || name[1] != '.')
4066 return action_field_find(action, &name[2]);
4070 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
4072 const char *args_struct_type_name,
4073 const char **instructions,
4074 uint32_t n_instructions)
4076 struct struct_type *args_struct_type;
4082 CHECK_NAME(name, EINVAL);
4083 CHECK(!action_find(p, name), EEXIST);
4085 if (args_struct_type_name) {
4086 CHECK_NAME(args_struct_type_name, EINVAL);
4087 args_struct_type = struct_type_find(p, args_struct_type_name);
4088 CHECK(args_struct_type, EINVAL);
4090 args_struct_type = NULL;
4093 /* Node allocation. */
4094 a = calloc(1, sizeof(struct action));
4097 /* Node initialization. */
4098 strcpy(a->name, name);
4099 a->st = args_struct_type;
4100 a->id = p->n_actions;
4102 /* Instruction translation. */
4103 err = instruction_config(p, a, instructions, n_instructions);
4109 /* Node add to tailq. */
4110 TAILQ_INSERT_TAIL(&p->actions, a, node);
4117 action_build(struct rte_swx_pipeline *p)
4119 struct action *action;
4121 p->action_instructions = calloc(p->n_actions,
4122 sizeof(struct instruction *));
4123 CHECK(p->action_instructions, ENOMEM);
4125 TAILQ_FOREACH(action, &p->actions, node)
4126 p->action_instructions[action->id] = action->instructions;
4132 action_build_free(struct rte_swx_pipeline *p)
4134 free(p->action_instructions);
4135 p->action_instructions = NULL;
4139 action_free(struct rte_swx_pipeline *p)
4141 action_build_free(p);
4144 struct action *action;
4146 action = TAILQ_FIRST(&p->actions);
4150 TAILQ_REMOVE(&p->actions, action, node);
4151 free(action->instructions);
4159 static struct table_type *
4160 table_type_find(struct rte_swx_pipeline *p, const char *name)
4162 struct table_type *elem;
4164 TAILQ_FOREACH(elem, &p->table_types, node)
4165 if (strcmp(elem->name, name) == 0)
4171 static struct table_type *
4172 table_type_resolve(struct rte_swx_pipeline *p,
4173 const char *recommended_type_name,
4174 enum rte_swx_table_match_type match_type)
4176 struct table_type *elem;
4178 /* Only consider the recommended type if the match type is correct. */
4179 if (recommended_type_name)
4180 TAILQ_FOREACH(elem, &p->table_types, node)
4181 if (!strcmp(elem->name, recommended_type_name) &&
4182 (elem->match_type == match_type))
4185 /* Ignore the recommended type and get the first element with this match
4188 TAILQ_FOREACH(elem, &p->table_types, node)
4189 if (elem->match_type == match_type)
4195 static struct table *
4196 table_find(struct rte_swx_pipeline *p, const char *name)
4200 TAILQ_FOREACH(elem, &p->tables, node)
4201 if (strcmp(elem->name, name) == 0)
4207 static struct table *
4208 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4210 struct table *table = NULL;
4212 TAILQ_FOREACH(table, &p->tables, node)
4213 if (table->id == id)
4220 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4222 enum rte_swx_table_match_type match_type,
4223 struct rte_swx_table_ops *ops)
4225 struct table_type *elem;
4229 CHECK_NAME(name, EINVAL);
4230 CHECK(!table_type_find(p, name), EEXIST);
4233 CHECK(ops->create, EINVAL);
4234 CHECK(ops->lkp, EINVAL);
4235 CHECK(ops->free, EINVAL);
4237 /* Node allocation. */
4238 elem = calloc(1, sizeof(struct table_type));
4239 CHECK(elem, ENOMEM);
4241 /* Node initialization. */
4242 strcpy(elem->name, name);
4243 elem->match_type = match_type;
4244 memcpy(&elem->ops, ops, sizeof(*ops));
4246 /* Node add to tailq. */
4247 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4252 static enum rte_swx_table_match_type
4253 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4258 for (i = 0; i < n_fields; i++)
4259 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4263 return RTE_SWX_TABLE_MATCH_EXACT;
4265 if ((i == n_fields - 1) &&
4266 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4267 return RTE_SWX_TABLE_MATCH_LPM;
4269 return RTE_SWX_TABLE_MATCH_WILDCARD;
4273 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
4275 struct rte_swx_pipeline_table_params *params,
4276 const char *recommended_table_type_name,
4280 struct table_type *type;
4282 struct action *default_action;
4283 struct header *header = NULL;
4285 uint32_t offset_prev = 0, action_data_size_max = 0, i;
4289 CHECK_NAME(name, EINVAL);
4290 CHECK(!table_find(p, name), EEXIST);
4292 CHECK(params, EINVAL);
4295 CHECK(!params->n_fields || params->fields, EINVAL);
4296 for (i = 0; i < params->n_fields; i++) {
4297 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4299 struct field *hf, *mf;
4302 CHECK_NAME(field->name, EINVAL);
4304 hf = header_field_parse(p, field->name, &h);
4305 mf = metadata_field_parse(p, field->name);
4306 CHECK(hf || mf, EINVAL);
4308 offset = hf ? hf->offset : mf->offset;
4311 is_header = hf ? 1 : 0;
4312 header = hf ? h : NULL;
4313 offset_prev = offset;
4318 CHECK((is_header && hf && (h->id == header->id)) ||
4319 (!is_header && mf), EINVAL);
4321 CHECK(offset > offset_prev, EINVAL);
4322 offset_prev = offset;
4325 /* Action checks. */
4326 CHECK(params->n_actions, EINVAL);
4327 CHECK(params->action_names, EINVAL);
4328 for (i = 0; i < params->n_actions; i++) {
4329 const char *action_name = params->action_names[i];
4331 uint32_t action_data_size;
4333 CHECK(action_name, EINVAL);
4335 a = action_find(p, action_name);
4338 action_data_size = a->st ? a->st->n_bits / 8 : 0;
4339 if (action_data_size > action_data_size_max)
4340 action_data_size_max = action_data_size;
4343 CHECK(params->default_action_name, EINVAL);
4344 for (i = 0; i < p->n_actions; i++)
4345 if (!strcmp(params->action_names[i],
4346 params->default_action_name))
4348 CHECK(i < params->n_actions, EINVAL);
4349 default_action = action_find(p, params->default_action_name);
4350 CHECK((default_action->st && params->default_action_data) ||
4351 !params->default_action_data, EINVAL);
4353 /* Table type checks. */
4354 if (params->n_fields) {
4355 enum rte_swx_table_match_type match_type;
4357 match_type = table_match_type_resolve(params->fields,
4359 type = table_type_resolve(p,
4360 recommended_table_type_name,
4362 CHECK(type, EINVAL);
4367 /* Memory allocation. */
4368 t = calloc(1, sizeof(struct table));
4371 t->fields = calloc(params->n_fields, sizeof(struct match_field));
4377 t->actions = calloc(params->n_actions, sizeof(struct action *));
4384 if (action_data_size_max) {
4385 t->default_action_data = calloc(1, action_data_size_max);
4386 if (!t->default_action_data) {
4394 /* Node initialization. */
4395 strcpy(t->name, name);
4396 if (args && args[0])
4397 strcpy(t->args, args);
4400 for (i = 0; i < params->n_fields; i++) {
4401 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4402 struct match_field *f = &t->fields[i];
4404 f->match_type = field->match_type;
4405 f->field = is_header ?
4406 header_field_parse(p, field->name, NULL) :
4407 metadata_field_parse(p, field->name);
4409 t->n_fields = params->n_fields;
4410 t->is_header = is_header;
4413 for (i = 0; i < params->n_actions; i++)
4414 t->actions[i] = action_find(p, params->action_names[i]);
4415 t->default_action = default_action;
4416 if (default_action->st)
4417 memcpy(t->default_action_data,
4418 params->default_action_data,
4419 default_action->st->n_bits / 8);
4420 t->n_actions = params->n_actions;
4421 t->default_action_is_const = params->default_action_is_const;
4422 t->action_data_size_max = action_data_size_max;
4425 t->id = p->n_tables;
4427 /* Node add to tailq. */
4428 TAILQ_INSERT_TAIL(&p->tables, t, node);
4434 static struct rte_swx_table_params *
4435 table_params_get(struct table *table)
4437 struct rte_swx_table_params *params;
4438 struct field *first, *last;
4440 uint32_t key_size, key_offset, action_data_size, i;
4442 /* Memory allocation. */
4443 params = calloc(1, sizeof(struct rte_swx_table_params));
4447 /* Key offset and size. */
4448 first = table->fields[0].field;
4449 last = table->fields[table->n_fields - 1].field;
4450 key_offset = first->offset / 8;
4451 key_size = (last->offset + last->n_bits - first->offset) / 8;
4453 /* Memory allocation. */
4454 key_mask = calloc(1, key_size);
4461 for (i = 0; i < table->n_fields; i++) {
4462 struct field *f = table->fields[i].field;
4463 uint32_t start = (f->offset - first->offset) / 8;
4464 size_t size = f->n_bits / 8;
4466 memset(&key_mask[start], 0xFF, size);
4469 /* Action data size. */
4470 action_data_size = 0;
4471 for (i = 0; i < table->n_actions; i++) {
4472 struct action *action = table->actions[i];
4473 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
4475 if (ads > action_data_size)
4476 action_data_size = ads;
4480 params->match_type = table->type->match_type;
4481 params->key_size = key_size;
4482 params->key_offset = key_offset;
4483 params->key_mask0 = key_mask;
4484 params->action_data_size = action_data_size;
4485 params->n_keys_max = table->size;
4491 table_params_free(struct rte_swx_table_params *params)
4496 free(params->key_mask0);
4501 table_state_build(struct rte_swx_pipeline *p)
4503 struct table *table;
4505 p->table_state = calloc(p->n_tables,
4506 sizeof(struct rte_swx_table_state));
4507 CHECK(p->table_state, ENOMEM);
4509 TAILQ_FOREACH(table, &p->tables, node) {
4510 struct rte_swx_table_state *ts = &p->table_state[table->id];
4513 struct rte_swx_table_params *params;
4516 params = table_params_get(table);
4517 CHECK(params, ENOMEM);
4519 ts->obj = table->type->ops.create(params,
4524 table_params_free(params);
4525 CHECK(ts->obj, ENODEV);
4528 /* ts->default_action_data. */
4529 if (table->action_data_size_max) {
4530 ts->default_action_data =
4531 malloc(table->action_data_size_max);
4532 CHECK(ts->default_action_data, ENOMEM);
4534 memcpy(ts->default_action_data,
4535 table->default_action_data,
4536 table->action_data_size_max);
4539 /* ts->default_action_id. */
4540 ts->default_action_id = table->default_action->id;
4547 table_state_build_free(struct rte_swx_pipeline *p)
4551 if (!p->table_state)
4554 for (i = 0; i < p->n_tables; i++) {
4555 struct rte_swx_table_state *ts = &p->table_state[i];
4556 struct table *table = table_find_by_id(p, i);
4559 if (table->type && ts->obj)
4560 table->type->ops.free(ts->obj);
4562 /* ts->default_action_data. */
4563 free(ts->default_action_data);
4566 free(p->table_state);
4567 p->table_state = NULL;
4571 table_state_free(struct rte_swx_pipeline *p)
4573 table_state_build_free(p);
4577 table_stub_lkp(void *table __rte_unused,
4578 void *mailbox __rte_unused,
4579 uint8_t **key __rte_unused,
4580 uint64_t *action_id __rte_unused,
4581 uint8_t **action_data __rte_unused,
4585 return 1; /* DONE. */
4589 table_build(struct rte_swx_pipeline *p)
4593 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4594 struct thread *t = &p->threads[i];
4595 struct table *table;
4597 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
4598 CHECK(t->tables, ENOMEM);
4600 TAILQ_FOREACH(table, &p->tables, node) {
4601 struct table_runtime *r = &t->tables[table->id];
4606 size = table->type->ops.mailbox_size_get();
4609 r->func = table->type->ops.lkp;
4613 r->mailbox = calloc(1, size);
4614 CHECK(r->mailbox, ENOMEM);
4618 r->key = table->is_header ?
4619 &t->structs[table->header->struct_id] :
4620 &t->structs[p->metadata_struct_id];
4622 r->func = table_stub_lkp;
4631 table_build_free(struct rte_swx_pipeline *p)
4635 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4636 struct thread *t = &p->threads[i];
4642 for (j = 0; j < p->n_tables; j++) {
4643 struct table_runtime *r = &t->tables[j];
4654 table_free(struct rte_swx_pipeline *p)
4656 table_build_free(p);
4662 elem = TAILQ_FIRST(&p->tables);
4666 TAILQ_REMOVE(&p->tables, elem, node);
4668 free(elem->actions);
4669 free(elem->default_action_data);
4675 struct table_type *elem;
4677 elem = TAILQ_FIRST(&p->table_types);
4681 TAILQ_REMOVE(&p->table_types, elem, node);
4690 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
4692 struct rte_swx_pipeline *pipeline;
4694 /* Check input parameters. */
4697 /* Memory allocation. */
4698 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
4699 CHECK(pipeline, ENOMEM);
4701 /* Initialization. */
4702 TAILQ_INIT(&pipeline->struct_types);
4703 TAILQ_INIT(&pipeline->port_in_types);
4704 TAILQ_INIT(&pipeline->ports_in);
4705 TAILQ_INIT(&pipeline->port_out_types);
4706 TAILQ_INIT(&pipeline->ports_out);
4707 TAILQ_INIT(&pipeline->extern_types);
4708 TAILQ_INIT(&pipeline->extern_objs);
4709 TAILQ_INIT(&pipeline->extern_funcs);
4710 TAILQ_INIT(&pipeline->headers);
4711 TAILQ_INIT(&pipeline->actions);
4712 TAILQ_INIT(&pipeline->table_types);
4713 TAILQ_INIT(&pipeline->tables);
4715 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
4716 pipeline->numa_node = numa_node;
4723 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
4728 free(p->instructions);
4730 table_state_free(p);
4735 extern_func_free(p);
4745 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
4746 const char **instructions,
4747 uint32_t n_instructions)
4752 err = instruction_config(p, NULL, instructions, n_instructions);
4756 /* Thread instruction pointer reset. */
4757 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
4758 struct thread *t = &p->threads[i];
4760 thread_ip_reset(p, t);
4767 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
4772 CHECK(p->build_done == 0, EEXIST);
4774 status = port_in_build(p);
4778 status = port_out_build(p);
4782 status = struct_build(p);
4786 status = extern_obj_build(p);
4790 status = extern_func_build(p);
4794 status = header_build(p);
4798 status = metadata_build(p);
4802 status = action_build(p);
4806 status = table_build(p);
4810 status = table_state_build(p);
4818 table_state_build_free(p);
4819 table_build_free(p);
4820 action_build_free(p);
4821 metadata_build_free(p);
4822 header_build_free(p);
4823 extern_func_build_free(p);
4824 extern_obj_build_free(p);
4825 port_out_build_free(p);
4826 port_in_build_free(p);
4827 struct_build_free(p);
4833 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
4837 for (i = 0; i < n_instructions; i++)
4845 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
4846 struct rte_swx_table_state **table_state)
4848 if (!p || !table_state || !p->build_done)
4851 *table_state = p->table_state;
4856 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
4857 struct rte_swx_table_state *table_state)
4859 if (!p || !table_state || !p->build_done)
4862 p->table_state = table_state;