1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_prefetch.h>
13 #include <rte_byteorder.h>
15 #include "rte_swx_pipeline.h"
16 #include "rte_swx_ctl.h"
18 #define CHECK(condition, err_code) \
24 #define CHECK_NAME(name, err_code) \
25 CHECK((name) && (name)[0], err_code)
32 #define TRACE(...) printf(__VA_ARGS__)
37 #define ntoh64(x) rte_be_to_cpu_64(x)
38 #define hton64(x) rte_cpu_to_be_64(x)
44 char name[RTE_SWX_NAME_SIZE];
50 TAILQ_ENTRY(struct_type) node;
51 char name[RTE_SWX_NAME_SIZE];
57 TAILQ_HEAD(struct_type_tailq, struct_type);
63 TAILQ_ENTRY(port_in_type) node;
64 char name[RTE_SWX_NAME_SIZE];
65 struct rte_swx_port_in_ops ops;
68 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 TAILQ_ENTRY(port_in) node;
72 struct port_in_type *type;
77 TAILQ_HEAD(port_in_tailq, port_in);
79 struct port_in_runtime {
80 rte_swx_port_in_pkt_rx_t pkt_rx;
87 struct port_out_type {
88 TAILQ_ENTRY(port_out_type) node;
89 char name[RTE_SWX_NAME_SIZE];
90 struct rte_swx_port_out_ops ops;
93 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 TAILQ_ENTRY(port_out) node;
97 struct port_out_type *type;
102 TAILQ_HEAD(port_out_tailq, port_out);
104 struct port_out_runtime {
105 rte_swx_port_out_pkt_tx_t pkt_tx;
106 rte_swx_port_out_flush_t flush;
113 struct extern_type_member_func {
114 TAILQ_ENTRY(extern_type_member_func) node;
115 char name[RTE_SWX_NAME_SIZE];
116 rte_swx_extern_type_member_func_t func;
120 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
123 TAILQ_ENTRY(extern_type) node;
124 char name[RTE_SWX_NAME_SIZE];
125 struct struct_type *mailbox_struct_type;
126 rte_swx_extern_type_constructor_t constructor;
127 rte_swx_extern_type_destructor_t destructor;
128 struct extern_type_member_func_tailq funcs;
132 TAILQ_HEAD(extern_type_tailq, extern_type);
135 TAILQ_ENTRY(extern_obj) node;
136 char name[RTE_SWX_NAME_SIZE];
137 struct extern_type *type;
143 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
146 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
149 struct extern_obj_runtime {
152 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
159 TAILQ_ENTRY(extern_func) node;
160 char name[RTE_SWX_NAME_SIZE];
161 struct struct_type *mailbox_struct_type;
162 rte_swx_extern_func_t func;
167 TAILQ_HEAD(extern_func_tailq, extern_func);
169 struct extern_func_runtime {
171 rte_swx_extern_func_t func;
178 TAILQ_ENTRY(header) node;
179 char name[RTE_SWX_NAME_SIZE];
180 struct struct_type *st;
185 TAILQ_HEAD(header_tailq, header);
187 struct header_runtime {
191 struct header_out_runtime {
201 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
202 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
203 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
204 * when transferred to packet meta-data and in NBO when transferred to packet
208 /* Notation conventions:
209 * -Header field: H = h.header.field (dst/src)
210 * -Meta-data field: M = m.field (dst/src)
211 * -Extern object mailbox field: E = e.field (dst/src)
212 * -Extern function mailbox field: F = f.field (dst/src)
213 * -Table action data field: T = t.field (src only)
214 * -Immediate value: I = 32-bit unsigned value (src only)
217 enum instruction_type {
224 /* extract h.header */
245 /* validate h.header */
248 /* invalidate h.header */
249 INSTR_HDR_INVALIDATE,
253 * dst = HMEF, src = HMEFTI
255 INSTR_MOV, /* dst = MEF, src = MEFT */
256 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
257 INSTR_MOV_I, /* dst = HMEF, src = I */
259 /* dma h.header t.field
260 * memcpy(h.header, t.field, sizeof(h.header))
273 * dst = HMEF, src = HMEFTI
275 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
276 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
277 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
278 INSTR_ALU_ADD_HH, /* dst = H, src = H */
279 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
280 INSTR_ALU_ADD_HI, /* dst = H, src = I */
284 * dst = HMEF, src = HMEFTI
286 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
287 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
288 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
289 INSTR_ALU_SUB_HH, /* dst = H, src = H */
290 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
291 INSTR_ALU_SUB_HI, /* dst = H, src = I */
294 * dst = dst '+ src[0:1] '+ src[2:3] + ...
295 * dst = H, src = {H, h.header}
297 INSTR_ALU_CKADD_FIELD, /* src = H */
298 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
299 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
305 INSTR_ALU_CKSUB_FIELD,
309 * dst = HMEF, src = HMEFTI
311 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
312 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
313 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
317 * dst = HMEF, src = HMEFTI
319 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
320 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
321 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
325 * dst = HMEF, src = HMEFTI
327 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
328 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
329 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
333 * dst = HMEF, src = HMEFTI
335 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
336 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
337 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
338 INSTR_ALU_SHL_HH, /* dst = H, src = H */
339 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
340 INSTR_ALU_SHL_HI, /* dst = H, src = I */
344 * dst = HMEF, src = HMEFTI
346 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
347 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
348 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
349 INSTR_ALU_SHR_HH, /* dst = H, src = H */
350 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
351 INSTR_ALU_SHR_HI, /* dst = H, src = I */
357 struct instr_operand {
372 uint8_t header_id[8];
373 uint8_t struct_id[8];
378 struct instr_hdr_validity {
386 struct instr_dst_src {
387 struct instr_operand dst;
389 struct instr_operand src;
396 uint8_t header_id[8];
397 uint8_t struct_id[8];
408 enum instruction_type type;
411 struct instr_hdr_validity valid;
412 struct instr_dst_src mov;
413 struct instr_dma dma;
414 struct instr_dst_src alu;
415 struct instr_table table;
419 struct instruction_data {
420 char label[RTE_SWX_NAME_SIZE];
421 char jmp_label[RTE_SWX_NAME_SIZE];
422 uint32_t n_users; /* user = jmp instruction to this instruction. */
430 TAILQ_ENTRY(action) node;
431 char name[RTE_SWX_NAME_SIZE];
432 struct struct_type *st;
433 struct instruction *instructions;
434 uint32_t n_instructions;
438 TAILQ_HEAD(action_tailq, action);
444 TAILQ_ENTRY(table_type) node;
445 char name[RTE_SWX_NAME_SIZE];
446 enum rte_swx_table_match_type match_type;
447 struct rte_swx_table_ops ops;
450 TAILQ_HEAD(table_type_tailq, table_type);
453 enum rte_swx_table_match_type match_type;
458 TAILQ_ENTRY(table) node;
459 char name[RTE_SWX_NAME_SIZE];
460 char args[RTE_SWX_NAME_SIZE];
461 struct table_type *type; /* NULL when n_fields == 0. */
464 struct match_field *fields;
466 int is_header; /* Only valid when n_fields > 0. */
467 struct header *header; /* Only valid when n_fields > 0. */
470 struct action **actions;
471 struct action *default_action;
472 uint8_t *default_action_data;
474 int default_action_is_const;
475 uint32_t action_data_size_max;
481 TAILQ_HEAD(table_tailq, table);
483 struct table_runtime {
484 rte_swx_table_lookup_t func;
494 struct rte_swx_pkt pkt;
500 /* Packet headers. */
501 struct header_runtime *headers; /* Extracted or generated headers. */
502 struct header_out_runtime *headers_out; /* Emitted headers. */
503 uint8_t *header_storage;
504 uint8_t *header_out_storage;
505 uint64_t valid_headers;
506 uint32_t n_headers_out;
508 /* Packet meta-data. */
512 struct table_runtime *tables;
513 struct rte_swx_table_state *table_state;
515 int hit; /* 0 = Miss, 1 = Hit. */
517 /* Extern objects and functions. */
518 struct extern_obj_runtime *extern_objs;
519 struct extern_func_runtime *extern_funcs;
522 struct instruction *ip;
523 struct instruction *ret;
526 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
527 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
528 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
530 #define ALU(thread, ip, operator) \
532 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
533 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
534 uint64_t dst64 = *dst64_ptr; \
535 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
536 uint64_t dst = dst64 & dst64_mask; \
538 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
539 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
540 uint64_t src64 = *src64_ptr; \
541 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
542 uint64_t src = src64 & src64_mask; \
544 uint64_t result = dst operator src; \
546 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
549 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
551 #define ALU_S(thread, ip, operator) \
553 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
554 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
555 uint64_t dst64 = *dst64_ptr; \
556 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
557 uint64_t dst = dst64 & dst64_mask; \
559 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
560 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
561 uint64_t src64 = *src64_ptr; \
562 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
564 uint64_t result = dst operator src; \
566 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
571 #define ALU_HM(thread, ip, operator) \
573 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
574 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
575 uint64_t dst64 = *dst64_ptr; \
576 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
577 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
579 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
580 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
581 uint64_t src64 = *src64_ptr; \
582 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
583 uint64_t src = src64 & src64_mask; \
585 uint64_t result = dst operator src; \
586 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
588 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
591 #define ALU_HH(thread, ip, operator) \
593 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
594 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
595 uint64_t dst64 = *dst64_ptr; \
596 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
597 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
599 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
600 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
601 uint64_t src64 = *src64_ptr; \
602 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
604 uint64_t result = dst operator src; \
605 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
607 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
619 #define ALU_I(thread, ip, operator) \
621 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
622 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
623 uint64_t dst64 = *dst64_ptr; \
624 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
625 uint64_t dst = dst64 & dst64_mask; \
627 uint64_t src = (ip)->alu.src_val; \
629 uint64_t result = dst operator src; \
631 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
636 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
638 #define ALU_HI(thread, ip, operator) \
640 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
641 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
642 uint64_t dst64 = *dst64_ptr; \
643 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
644 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
646 uint64_t src = (ip)->alu.src_val; \
648 uint64_t result = dst operator src; \
649 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
651 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
660 #define MOV(thread, ip) \
662 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
663 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
664 uint64_t dst64 = *dst64_ptr; \
665 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
667 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
668 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
669 uint64_t src64 = *src64_ptr; \
670 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
671 uint64_t src = src64 & src64_mask; \
673 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
676 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
678 #define MOV_S(thread, ip) \
680 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
681 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
682 uint64_t dst64 = *dst64_ptr; \
683 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
685 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
686 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
687 uint64_t src64 = *src64_ptr; \
688 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
690 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
699 #define MOV_I(thread, ip) \
701 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
702 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
703 uint64_t dst64 = *dst64_ptr; \
704 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
706 uint64_t src = (ip)->mov.src_val; \
708 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
711 #define METADATA_READ(thread, offset, n_bits) \
713 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
714 uint64_t m64 = *m64_ptr; \
715 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
719 #define METADATA_WRITE(thread, offset, n_bits, value) \
721 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
722 uint64_t m64 = *m64_ptr; \
723 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
725 uint64_t m_new = value; \
727 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
730 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
731 #define RTE_SWX_PIPELINE_THREADS_MAX 16
734 struct rte_swx_pipeline {
735 struct struct_type_tailq struct_types;
736 struct port_in_type_tailq port_in_types;
737 struct port_in_tailq ports_in;
738 struct port_out_type_tailq port_out_types;
739 struct port_out_tailq ports_out;
740 struct extern_type_tailq extern_types;
741 struct extern_obj_tailq extern_objs;
742 struct extern_func_tailq extern_funcs;
743 struct header_tailq headers;
744 struct struct_type *metadata_st;
745 uint32_t metadata_struct_id;
746 struct action_tailq actions;
747 struct table_type_tailq table_types;
748 struct table_tailq tables;
750 struct port_in_runtime *in;
751 struct port_out_runtime *out;
752 struct instruction **action_instructions;
753 struct rte_swx_table_state *table_state;
754 struct instruction *instructions;
755 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
759 uint32_t n_ports_out;
760 uint32_t n_extern_objs;
761 uint32_t n_extern_funcs;
767 uint32_t n_instructions;
775 static struct struct_type *
776 struct_type_find(struct rte_swx_pipeline *p, const char *name)
778 struct struct_type *elem;
780 TAILQ_FOREACH(elem, &p->struct_types, node)
781 if (strcmp(elem->name, name) == 0)
787 static struct field *
788 struct_type_field_find(struct struct_type *st, const char *name)
792 for (i = 0; i < st->n_fields; i++) {
793 struct field *f = &st->fields[i];
795 if (strcmp(f->name, name) == 0)
803 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
805 struct rte_swx_field_params *fields,
808 struct struct_type *st;
812 CHECK_NAME(name, EINVAL);
813 CHECK(fields, EINVAL);
814 CHECK(n_fields, EINVAL);
816 for (i = 0; i < n_fields; i++) {
817 struct rte_swx_field_params *f = &fields[i];
820 CHECK_NAME(f->name, EINVAL);
821 CHECK(f->n_bits, EINVAL);
822 CHECK(f->n_bits <= 64, EINVAL);
823 CHECK((f->n_bits & 7) == 0, EINVAL);
825 for (j = 0; j < i; j++) {
826 struct rte_swx_field_params *f_prev = &fields[j];
828 CHECK(strcmp(f->name, f_prev->name), EINVAL);
832 CHECK(!struct_type_find(p, name), EEXIST);
834 /* Node allocation. */
835 st = calloc(1, sizeof(struct struct_type));
838 st->fields = calloc(n_fields, sizeof(struct field));
844 /* Node initialization. */
845 strcpy(st->name, name);
846 for (i = 0; i < n_fields; i++) {
847 struct field *dst = &st->fields[i];
848 struct rte_swx_field_params *src = &fields[i];
850 strcpy(dst->name, src->name);
851 dst->n_bits = src->n_bits;
852 dst->offset = st->n_bits;
854 st->n_bits += src->n_bits;
856 st->n_fields = n_fields;
858 /* Node add to tailq. */
859 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
865 struct_build(struct rte_swx_pipeline *p)
869 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
870 struct thread *t = &p->threads[i];
872 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
873 CHECK(t->structs, ENOMEM);
880 struct_build_free(struct rte_swx_pipeline *p)
884 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
885 struct thread *t = &p->threads[i];
893 struct_free(struct rte_swx_pipeline *p)
895 struct_build_free(p);
899 struct struct_type *elem;
901 elem = TAILQ_FIRST(&p->struct_types);
905 TAILQ_REMOVE(&p->struct_types, elem, node);
914 static struct port_in_type *
915 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
917 struct port_in_type *elem;
922 TAILQ_FOREACH(elem, &p->port_in_types, node)
923 if (strcmp(elem->name, name) == 0)
930 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
932 struct rte_swx_port_in_ops *ops)
934 struct port_in_type *elem;
937 CHECK_NAME(name, EINVAL);
939 CHECK(ops->create, EINVAL);
940 CHECK(ops->free, EINVAL);
941 CHECK(ops->pkt_rx, EINVAL);
942 CHECK(ops->stats_read, EINVAL);
944 CHECK(!port_in_type_find(p, name), EEXIST);
946 /* Node allocation. */
947 elem = calloc(1, sizeof(struct port_in_type));
950 /* Node initialization. */
951 strcpy(elem->name, name);
952 memcpy(&elem->ops, ops, sizeof(*ops));
954 /* Node add to tailq. */
955 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
960 static struct port_in *
961 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
963 struct port_in *port;
965 TAILQ_FOREACH(port, &p->ports_in, node)
966 if (port->id == port_id)
973 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
975 const char *port_type_name,
978 struct port_in_type *type = NULL;
979 struct port_in *port = NULL;
984 CHECK(!port_in_find(p, port_id), EINVAL);
986 CHECK_NAME(port_type_name, EINVAL);
987 type = port_in_type_find(p, port_type_name);
990 obj = type->ops.create(args);
993 /* Node allocation. */
994 port = calloc(1, sizeof(struct port_in));
997 /* Node initialization. */
1002 /* Node add to tailq. */
1003 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1004 if (p->n_ports_in < port_id + 1)
1005 p->n_ports_in = port_id + 1;
1011 port_in_build(struct rte_swx_pipeline *p)
1013 struct port_in *port;
1016 CHECK(p->n_ports_in, EINVAL);
1017 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1019 for (i = 0; i < p->n_ports_in; i++)
1020 CHECK(port_in_find(p, i), EINVAL);
1022 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1023 CHECK(p->in, ENOMEM);
1025 TAILQ_FOREACH(port, &p->ports_in, node) {
1026 struct port_in_runtime *in = &p->in[port->id];
1028 in->pkt_rx = port->type->ops.pkt_rx;
1029 in->obj = port->obj;
1036 port_in_build_free(struct rte_swx_pipeline *p)
1043 port_in_free(struct rte_swx_pipeline *p)
1045 port_in_build_free(p);
1049 struct port_in *port;
1051 port = TAILQ_FIRST(&p->ports_in);
1055 TAILQ_REMOVE(&p->ports_in, port, node);
1056 port->type->ops.free(port->obj);
1060 /* Input port types. */
1062 struct port_in_type *elem;
1064 elem = TAILQ_FIRST(&p->port_in_types);
1068 TAILQ_REMOVE(&p->port_in_types, elem, node);
1076 static struct port_out_type *
1077 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1079 struct port_out_type *elem;
1084 TAILQ_FOREACH(elem, &p->port_out_types, node)
1085 if (!strcmp(elem->name, name))
1092 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1094 struct rte_swx_port_out_ops *ops)
1096 struct port_out_type *elem;
1099 CHECK_NAME(name, EINVAL);
1101 CHECK(ops->create, EINVAL);
1102 CHECK(ops->free, EINVAL);
1103 CHECK(ops->pkt_tx, EINVAL);
1104 CHECK(ops->stats_read, EINVAL);
1106 CHECK(!port_out_type_find(p, name), EEXIST);
1108 /* Node allocation. */
1109 elem = calloc(1, sizeof(struct port_out_type));
1110 CHECK(elem, ENOMEM);
1112 /* Node initialization. */
1113 strcpy(elem->name, name);
1114 memcpy(&elem->ops, ops, sizeof(*ops));
1116 /* Node add to tailq. */
1117 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1122 static struct port_out *
1123 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1125 struct port_out *port;
1127 TAILQ_FOREACH(port, &p->ports_out, node)
1128 if (port->id == port_id)
1135 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1137 const char *port_type_name,
1140 struct port_out_type *type = NULL;
1141 struct port_out *port = NULL;
1146 CHECK(!port_out_find(p, port_id), EINVAL);
1148 CHECK_NAME(port_type_name, EINVAL);
1149 type = port_out_type_find(p, port_type_name);
1150 CHECK(type, EINVAL);
1152 obj = type->ops.create(args);
1155 /* Node allocation. */
1156 port = calloc(1, sizeof(struct port_out));
1157 CHECK(port, ENOMEM);
1159 /* Node initialization. */
1164 /* Node add to tailq. */
1165 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1166 if (p->n_ports_out < port_id + 1)
1167 p->n_ports_out = port_id + 1;
1173 port_out_build(struct rte_swx_pipeline *p)
1175 struct port_out *port;
1178 CHECK(p->n_ports_out, EINVAL);
1180 for (i = 0; i < p->n_ports_out; i++)
1181 CHECK(port_out_find(p, i), EINVAL);
1183 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1184 CHECK(p->out, ENOMEM);
1186 TAILQ_FOREACH(port, &p->ports_out, node) {
1187 struct port_out_runtime *out = &p->out[port->id];
1189 out->pkt_tx = port->type->ops.pkt_tx;
1190 out->flush = port->type->ops.flush;
1191 out->obj = port->obj;
1198 port_out_build_free(struct rte_swx_pipeline *p)
1205 port_out_free(struct rte_swx_pipeline *p)
1207 port_out_build_free(p);
1211 struct port_out *port;
1213 port = TAILQ_FIRST(&p->ports_out);
1217 TAILQ_REMOVE(&p->ports_out, port, node);
1218 port->type->ops.free(port->obj);
1222 /* Output port types. */
1224 struct port_out_type *elem;
1226 elem = TAILQ_FIRST(&p->port_out_types);
1230 TAILQ_REMOVE(&p->port_out_types, elem, node);
1238 static struct extern_type *
1239 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1241 struct extern_type *elem;
1243 TAILQ_FOREACH(elem, &p->extern_types, node)
1244 if (strcmp(elem->name, name) == 0)
1250 static struct extern_type_member_func *
1251 extern_type_member_func_find(struct extern_type *type, const char *name)
1253 struct extern_type_member_func *elem;
1255 TAILQ_FOREACH(elem, &type->funcs, node)
1256 if (strcmp(elem->name, name) == 0)
1262 static struct extern_obj *
1263 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1265 struct extern_obj *elem;
1267 TAILQ_FOREACH(elem, &p->extern_objs, node)
1268 if (strcmp(elem->name, name) == 0)
1274 static struct field *
1275 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1277 struct extern_obj **object)
1279 struct extern_obj *obj;
1281 char *obj_name, *field_name;
1283 if ((name[0] != 'e') || (name[1] != '.'))
1286 obj_name = strdup(&name[2]);
1290 field_name = strchr(obj_name, '.');
1299 obj = extern_obj_find(p, obj_name);
1305 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1319 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1321 const char *mailbox_struct_type_name,
1322 rte_swx_extern_type_constructor_t constructor,
1323 rte_swx_extern_type_destructor_t destructor)
1325 struct extern_type *elem;
1326 struct struct_type *mailbox_struct_type;
1330 CHECK_NAME(name, EINVAL);
1331 CHECK(!extern_type_find(p, name), EEXIST);
1333 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1334 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1335 CHECK(mailbox_struct_type, EINVAL);
1337 CHECK(constructor, EINVAL);
1338 CHECK(destructor, EINVAL);
1340 /* Node allocation. */
1341 elem = calloc(1, sizeof(struct extern_type));
1342 CHECK(elem, ENOMEM);
1344 /* Node initialization. */
1345 strcpy(elem->name, name);
1346 elem->mailbox_struct_type = mailbox_struct_type;
1347 elem->constructor = constructor;
1348 elem->destructor = destructor;
1349 TAILQ_INIT(&elem->funcs);
1351 /* Node add to tailq. */
1352 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1358 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1359 const char *extern_type_name,
1361 rte_swx_extern_type_member_func_t member_func)
1363 struct extern_type *type;
1364 struct extern_type_member_func *type_member;
1368 CHECK(extern_type_name, EINVAL);
1369 type = extern_type_find(p, extern_type_name);
1370 CHECK(type, EINVAL);
1371 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1373 CHECK(name, EINVAL);
1374 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1376 CHECK(member_func, EINVAL);
1378 /* Node allocation. */
1379 type_member = calloc(1, sizeof(struct extern_type_member_func));
1380 CHECK(type_member, ENOMEM);
1382 /* Node initialization. */
1383 strcpy(type_member->name, name);
1384 type_member->func = member_func;
1385 type_member->id = type->n_funcs;
1387 /* Node add to tailq. */
1388 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1395 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1396 const char *extern_type_name,
1400 struct extern_type *type;
1401 struct extern_obj *obj;
1406 CHECK_NAME(extern_type_name, EINVAL);
1407 type = extern_type_find(p, extern_type_name);
1408 CHECK(type, EINVAL);
1410 CHECK_NAME(name, EINVAL);
1411 CHECK(!extern_obj_find(p, name), EEXIST);
1413 /* Node allocation. */
1414 obj = calloc(1, sizeof(struct extern_obj));
1417 /* Object construction. */
1418 obj_handle = type->constructor(args);
1424 /* Node initialization. */
1425 strcpy(obj->name, name);
1427 obj->obj = obj_handle;
1428 obj->struct_id = p->n_structs;
1429 obj->id = p->n_extern_objs;
1431 /* Node add to tailq. */
1432 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1440 extern_obj_build(struct rte_swx_pipeline *p)
1444 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1445 struct thread *t = &p->threads[i];
1446 struct extern_obj *obj;
1448 t->extern_objs = calloc(p->n_extern_objs,
1449 sizeof(struct extern_obj_runtime));
1450 CHECK(t->extern_objs, ENOMEM);
1452 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1453 struct extern_obj_runtime *r =
1454 &t->extern_objs[obj->id];
1455 struct extern_type_member_func *func;
1456 uint32_t mailbox_size =
1457 obj->type->mailbox_struct_type->n_bits / 8;
1461 r->mailbox = calloc(1, mailbox_size);
1462 CHECK(r->mailbox, ENOMEM);
1464 TAILQ_FOREACH(func, &obj->type->funcs, node)
1465 r->funcs[func->id] = func->func;
1467 t->structs[obj->struct_id] = r->mailbox;
1475 extern_obj_build_free(struct rte_swx_pipeline *p)
1479 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1480 struct thread *t = &p->threads[i];
1483 if (!t->extern_objs)
1486 for (j = 0; j < p->n_extern_objs; j++) {
1487 struct extern_obj_runtime *r = &t->extern_objs[j];
1492 free(t->extern_objs);
1493 t->extern_objs = NULL;
1498 extern_obj_free(struct rte_swx_pipeline *p)
1500 extern_obj_build_free(p);
1502 /* Extern objects. */
1504 struct extern_obj *elem;
1506 elem = TAILQ_FIRST(&p->extern_objs);
1510 TAILQ_REMOVE(&p->extern_objs, elem, node);
1512 elem->type->destructor(elem->obj);
1518 struct extern_type *elem;
1520 elem = TAILQ_FIRST(&p->extern_types);
1524 TAILQ_REMOVE(&p->extern_types, elem, node);
1527 struct extern_type_member_func *func;
1529 func = TAILQ_FIRST(&elem->funcs);
1533 TAILQ_REMOVE(&elem->funcs, func, node);
1544 static struct extern_func *
1545 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1547 struct extern_func *elem;
1549 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1550 if (strcmp(elem->name, name) == 0)
1556 static struct field *
1557 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1559 struct extern_func **function)
1561 struct extern_func *func;
1563 char *func_name, *field_name;
1565 if ((name[0] != 'f') || (name[1] != '.'))
1568 func_name = strdup(&name[2]);
1572 field_name = strchr(func_name, '.');
1581 func = extern_func_find(p, func_name);
1587 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1601 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1603 const char *mailbox_struct_type_name,
1604 rte_swx_extern_func_t func)
1606 struct extern_func *f;
1607 struct struct_type *mailbox_struct_type;
1611 CHECK_NAME(name, EINVAL);
1612 CHECK(!extern_func_find(p, name), EEXIST);
1614 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1615 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1616 CHECK(mailbox_struct_type, EINVAL);
1618 CHECK(func, EINVAL);
1620 /* Node allocation. */
1621 f = calloc(1, sizeof(struct extern_func));
1622 CHECK(func, ENOMEM);
1624 /* Node initialization. */
1625 strcpy(f->name, name);
1626 f->mailbox_struct_type = mailbox_struct_type;
1628 f->struct_id = p->n_structs;
1629 f->id = p->n_extern_funcs;
1631 /* Node add to tailq. */
1632 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1633 p->n_extern_funcs++;
1640 extern_func_build(struct rte_swx_pipeline *p)
1644 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1645 struct thread *t = &p->threads[i];
1646 struct extern_func *func;
1648 /* Memory allocation. */
1649 t->extern_funcs = calloc(p->n_extern_funcs,
1650 sizeof(struct extern_func_runtime));
1651 CHECK(t->extern_funcs, ENOMEM);
1653 /* Extern function. */
1654 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1655 struct extern_func_runtime *r =
1656 &t->extern_funcs[func->id];
1657 uint32_t mailbox_size =
1658 func->mailbox_struct_type->n_bits / 8;
1660 r->func = func->func;
1662 r->mailbox = calloc(1, mailbox_size);
1663 CHECK(r->mailbox, ENOMEM);
1665 t->structs[func->struct_id] = r->mailbox;
1673 extern_func_build_free(struct rte_swx_pipeline *p)
1677 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1678 struct thread *t = &p->threads[i];
1681 if (!t->extern_funcs)
1684 for (j = 0; j < p->n_extern_funcs; j++) {
1685 struct extern_func_runtime *r = &t->extern_funcs[j];
1690 free(t->extern_funcs);
1691 t->extern_funcs = NULL;
1696 extern_func_free(struct rte_swx_pipeline *p)
1698 extern_func_build_free(p);
1701 struct extern_func *elem;
1703 elem = TAILQ_FIRST(&p->extern_funcs);
1707 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1715 static struct header *
1716 header_find(struct rte_swx_pipeline *p, const char *name)
1718 struct header *elem;
1720 TAILQ_FOREACH(elem, &p->headers, node)
1721 if (strcmp(elem->name, name) == 0)
1727 static struct header *
1728 header_parse(struct rte_swx_pipeline *p,
1731 if (name[0] != 'h' || name[1] != '.')
1734 return header_find(p, &name[2]);
1737 static struct field *
1738 header_field_parse(struct rte_swx_pipeline *p,
1740 struct header **header)
1744 char *header_name, *field_name;
1746 if ((name[0] != 'h') || (name[1] != '.'))
1749 header_name = strdup(&name[2]);
1753 field_name = strchr(header_name, '.');
1762 h = header_find(p, header_name);
1768 f = struct_type_field_find(h->st, field_name);
1782 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
1784 const char *struct_type_name)
1786 struct struct_type *st;
1788 size_t n_headers_max;
1791 CHECK_NAME(name, EINVAL);
1792 CHECK_NAME(struct_type_name, EINVAL);
1794 CHECK(!header_find(p, name), EEXIST);
1796 st = struct_type_find(p, struct_type_name);
1799 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
1800 CHECK(p->n_headers < n_headers_max, ENOSPC);
1802 /* Node allocation. */
1803 h = calloc(1, sizeof(struct header));
1806 /* Node initialization. */
1807 strcpy(h->name, name);
1809 h->struct_id = p->n_structs;
1810 h->id = p->n_headers;
1812 /* Node add to tailq. */
1813 TAILQ_INSERT_TAIL(&p->headers, h, node);
1821 header_build(struct rte_swx_pipeline *p)
1824 uint32_t n_bytes = 0, i;
1826 TAILQ_FOREACH(h, &p->headers, node) {
1827 n_bytes += h->st->n_bits / 8;
1830 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1831 struct thread *t = &p->threads[i];
1832 uint32_t offset = 0;
1834 t->headers = calloc(p->n_headers,
1835 sizeof(struct header_runtime));
1836 CHECK(t->headers, ENOMEM);
1838 t->headers_out = calloc(p->n_headers,
1839 sizeof(struct header_out_runtime));
1840 CHECK(t->headers_out, ENOMEM);
1842 t->header_storage = calloc(1, n_bytes);
1843 CHECK(t->header_storage, ENOMEM);
1845 t->header_out_storage = calloc(1, n_bytes);
1846 CHECK(t->header_out_storage, ENOMEM);
1848 TAILQ_FOREACH(h, &p->headers, node) {
1849 uint8_t *header_storage;
1851 header_storage = &t->header_storage[offset];
1852 offset += h->st->n_bits / 8;
1854 t->headers[h->id].ptr0 = header_storage;
1855 t->structs[h->struct_id] = header_storage;
1863 header_build_free(struct rte_swx_pipeline *p)
1867 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1868 struct thread *t = &p->threads[i];
1870 free(t->headers_out);
1871 t->headers_out = NULL;
1876 free(t->header_out_storage);
1877 t->header_out_storage = NULL;
1879 free(t->header_storage);
1880 t->header_storage = NULL;
1885 header_free(struct rte_swx_pipeline *p)
1887 header_build_free(p);
1890 struct header *elem;
1892 elem = TAILQ_FIRST(&p->headers);
1896 TAILQ_REMOVE(&p->headers, elem, node);
1904 static struct field *
1905 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
1907 if (!p->metadata_st)
1910 if (name[0] != 'm' || name[1] != '.')
1913 return struct_type_field_find(p->metadata_st, &name[2]);
1917 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
1918 const char *struct_type_name)
1920 struct struct_type *st = NULL;
1924 CHECK_NAME(struct_type_name, EINVAL);
1925 st = struct_type_find(p, struct_type_name);
1927 CHECK(!p->metadata_st, EINVAL);
1929 p->metadata_st = st;
1930 p->metadata_struct_id = p->n_structs;
1938 metadata_build(struct rte_swx_pipeline *p)
1940 uint32_t n_bytes = p->metadata_st->n_bits / 8;
1943 /* Thread-level initialization. */
1944 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1945 struct thread *t = &p->threads[i];
1948 metadata = calloc(1, n_bytes);
1949 CHECK(metadata, ENOMEM);
1951 t->metadata = metadata;
1952 t->structs[p->metadata_struct_id] = metadata;
1959 metadata_build_free(struct rte_swx_pipeline *p)
1963 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1964 struct thread *t = &p->threads[i];
1972 metadata_free(struct rte_swx_pipeline *p)
1974 metadata_build_free(p);
1980 static struct field *
1981 action_field_parse(struct action *action, const char *name);
1983 static struct field *
1984 struct_field_parse(struct rte_swx_pipeline *p,
1985 struct action *action,
1987 uint32_t *struct_id)
1994 struct header *header;
1996 f = header_field_parse(p, name, &header);
2000 *struct_id = header->struct_id;
2006 f = metadata_field_parse(p, name);
2010 *struct_id = p->metadata_struct_id;
2019 f = action_field_parse(action, name);
2029 struct extern_obj *obj;
2031 f = extern_obj_mailbox_field_parse(p, name, &obj);
2035 *struct_id = obj->struct_id;
2041 struct extern_func *func;
2043 f = extern_func_mailbox_field_parse(p, name, &func);
2047 *struct_id = func->struct_id;
2057 pipeline_port_inc(struct rte_swx_pipeline *p)
2059 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2063 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2065 t->ip = p->instructions;
2069 thread_ip_action_call(struct rte_swx_pipeline *p,
2074 t->ip = p->action_instructions[action_id];
2078 thread_ip_inc(struct rte_swx_pipeline *p);
2081 thread_ip_inc(struct rte_swx_pipeline *p)
2083 struct thread *t = &p->threads[p->thread_id];
2089 thread_ip_inc_cond(struct thread *t, int cond)
2095 thread_yield(struct rte_swx_pipeline *p)
2097 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2104 instr_rx_translate(struct rte_swx_pipeline *p,
2105 struct action *action,
2108 struct instruction *instr,
2109 struct instruction_data *data __rte_unused)
2113 CHECK(!action, EINVAL);
2114 CHECK(n_tokens == 2, EINVAL);
2116 f = metadata_field_parse(p, tokens[1]);
2119 instr->type = INSTR_RX;
2120 instr->io.io.offset = f->offset / 8;
2121 instr->io.io.n_bits = f->n_bits;
2126 instr_rx_exec(struct rte_swx_pipeline *p);
2129 instr_rx_exec(struct rte_swx_pipeline *p)
2131 struct thread *t = &p->threads[p->thread_id];
2132 struct instruction *ip = t->ip;
2133 struct port_in_runtime *port = &p->in[p->port_id];
2134 struct rte_swx_pkt *pkt = &t->pkt;
2138 pkt_received = port->pkt_rx(port->obj, pkt);
2139 t->ptr = &pkt->pkt[pkt->offset];
2140 rte_prefetch0(t->ptr);
2142 TRACE("[Thread %2u] rx %s from port %u\n",
2144 pkt_received ? "1 pkt" : "0 pkts",
2148 t->valid_headers = 0;
2149 t->n_headers_out = 0;
2152 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2155 t->table_state = p->table_state;
2158 pipeline_port_inc(p);
2159 thread_ip_inc_cond(t, pkt_received);
2167 instr_tx_translate(struct rte_swx_pipeline *p,
2168 struct action *action __rte_unused,
2171 struct instruction *instr,
2172 struct instruction_data *data __rte_unused)
2176 CHECK(n_tokens == 2, EINVAL);
2178 f = metadata_field_parse(p, tokens[1]);
2181 instr->type = INSTR_TX;
2182 instr->io.io.offset = f->offset / 8;
2183 instr->io.io.n_bits = f->n_bits;
2188 emit_handler(struct thread *t)
2190 struct header_out_runtime *h0 = &t->headers_out[0];
2191 struct header_out_runtime *h1 = &t->headers_out[1];
2192 uint32_t offset = 0, i;
2194 /* No header change or header decapsulation. */
2195 if ((t->n_headers_out == 1) &&
2196 (h0->ptr + h0->n_bytes == t->ptr)) {
2197 TRACE("Emit handler: no header change or header decap.\n");
2199 t->pkt.offset -= h0->n_bytes;
2200 t->pkt.length += h0->n_bytes;
2205 /* Header encapsulation (optionally, with prior header decasulation). */
2206 if ((t->n_headers_out == 2) &&
2207 (h1->ptr + h1->n_bytes == t->ptr) &&
2208 (h0->ptr == h0->ptr0)) {
2211 TRACE("Emit handler: header encapsulation.\n");
2213 offset = h0->n_bytes + h1->n_bytes;
2214 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2215 t->pkt.offset -= offset;
2216 t->pkt.length += offset;
2221 /* Header insertion. */
2224 /* Header extraction. */
2227 /* For any other case. */
2228 TRACE("Emit handler: complex case.\n");
2230 for (i = 0; i < t->n_headers_out; i++) {
2231 struct header_out_runtime *h = &t->headers_out[i];
2233 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2234 offset += h->n_bytes;
2238 memcpy(t->ptr - offset, t->header_out_storage, offset);
2239 t->pkt.offset -= offset;
2240 t->pkt.length += offset;
2245 instr_tx_exec(struct rte_swx_pipeline *p);
2248 instr_tx_exec(struct rte_swx_pipeline *p)
2250 struct thread *t = &p->threads[p->thread_id];
2251 struct instruction *ip = t->ip;
2252 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2253 struct port_out_runtime *port = &p->out[port_id];
2254 struct rte_swx_pkt *pkt = &t->pkt;
2256 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2264 port->pkt_tx(port->obj, pkt);
2267 thread_ip_reset(p, t);
2275 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2276 struct action *action,
2279 struct instruction *instr,
2280 struct instruction_data *data __rte_unused)
2284 CHECK(!action, EINVAL);
2285 CHECK(n_tokens == 2, EINVAL);
2287 h = header_parse(p, tokens[1]);
2290 instr->type = INSTR_HDR_EXTRACT;
2291 instr->io.hdr.header_id[0] = h->id;
2292 instr->io.hdr.struct_id[0] = h->struct_id;
2293 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2298 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2301 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2303 struct thread *t = &p->threads[p->thread_id];
2304 struct instruction *ip = t->ip;
2305 uint64_t valid_headers = t->valid_headers;
2306 uint8_t *ptr = t->ptr;
2307 uint32_t offset = t->pkt.offset;
2308 uint32_t length = t->pkt.length;
2311 for (i = 0; i < n_extract; i++) {
2312 uint32_t header_id = ip->io.hdr.header_id[i];
2313 uint32_t struct_id = ip->io.hdr.struct_id[i];
2314 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2316 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2322 t->structs[struct_id] = ptr;
2323 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2332 t->valid_headers = valid_headers;
2335 t->pkt.offset = offset;
2336 t->pkt.length = length;
2341 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2343 __instr_hdr_extract_exec(p, 1);
2350 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2352 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2355 __instr_hdr_extract_exec(p, 2);
2362 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2364 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2367 __instr_hdr_extract_exec(p, 3);
2374 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2376 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2379 __instr_hdr_extract_exec(p, 4);
2386 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2388 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2391 __instr_hdr_extract_exec(p, 5);
2398 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2400 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2403 __instr_hdr_extract_exec(p, 6);
2410 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2412 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2415 __instr_hdr_extract_exec(p, 7);
2422 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2424 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2427 __instr_hdr_extract_exec(p, 8);
2437 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2438 struct action *action __rte_unused,
2441 struct instruction *instr,
2442 struct instruction_data *data __rte_unused)
2446 CHECK(n_tokens == 2, EINVAL);
2448 h = header_parse(p, tokens[1]);
2451 instr->type = INSTR_HDR_EMIT;
2452 instr->io.hdr.header_id[0] = h->id;
2453 instr->io.hdr.struct_id[0] = h->struct_id;
2454 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2459 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2462 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2464 struct thread *t = &p->threads[p->thread_id];
2465 struct instruction *ip = t->ip;
2466 uint32_t n_headers_out = t->n_headers_out;
2467 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2468 uint8_t *ho_ptr = NULL;
2469 uint32_t ho_nbytes = 0, i;
2471 for (i = 0; i < n_emit; i++) {
2472 uint32_t header_id = ip->io.hdr.header_id[i];
2473 uint32_t struct_id = ip->io.hdr.struct_id[i];
2474 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2476 struct header_runtime *hi = &t->headers[header_id];
2477 uint8_t *hi_ptr = t->structs[struct_id];
2479 TRACE("[Thread %2u]: emit header %u\n",
2485 if (!t->n_headers_out) {
2486 ho = &t->headers_out[0];
2488 ho->ptr0 = hi->ptr0;
2492 ho_nbytes = n_bytes;
2499 ho_nbytes = ho->n_bytes;
2503 if (ho_ptr + ho_nbytes == hi_ptr) {
2504 ho_nbytes += n_bytes;
2506 ho->n_bytes = ho_nbytes;
2509 ho->ptr0 = hi->ptr0;
2513 ho_nbytes = n_bytes;
2519 ho->n_bytes = ho_nbytes;
2520 t->n_headers_out = n_headers_out;
2524 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2526 __instr_hdr_emit_exec(p, 1);
2533 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2535 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2538 __instr_hdr_emit_exec(p, 1);
2543 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2545 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2548 __instr_hdr_emit_exec(p, 2);
2553 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2555 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2558 __instr_hdr_emit_exec(p, 3);
2563 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2565 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2568 __instr_hdr_emit_exec(p, 4);
2573 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2575 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2578 __instr_hdr_emit_exec(p, 5);
2583 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2585 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2588 __instr_hdr_emit_exec(p, 6);
2593 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2595 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2598 __instr_hdr_emit_exec(p, 7);
2603 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2605 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2608 __instr_hdr_emit_exec(p, 8);
2616 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2617 struct action *action __rte_unused,
2620 struct instruction *instr,
2621 struct instruction_data *data __rte_unused)
2625 CHECK(n_tokens == 2, EINVAL);
2627 h = header_parse(p, tokens[1]);
2630 instr->type = INSTR_HDR_VALIDATE;
2631 instr->valid.header_id = h->id;
2636 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2638 struct thread *t = &p->threads[p->thread_id];
2639 struct instruction *ip = t->ip;
2640 uint32_t header_id = ip->valid.header_id;
2642 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2645 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2655 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2656 struct action *action __rte_unused,
2659 struct instruction *instr,
2660 struct instruction_data *data __rte_unused)
2664 CHECK(n_tokens == 2, EINVAL);
2666 h = header_parse(p, tokens[1]);
2669 instr->type = INSTR_HDR_INVALIDATE;
2670 instr->valid.header_id = h->id;
2675 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
2677 struct thread *t = &p->threads[p->thread_id];
2678 struct instruction *ip = t->ip;
2679 uint32_t header_id = ip->valid.header_id;
2681 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2684 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2693 static struct table *
2694 table_find(struct rte_swx_pipeline *p, const char *name);
2697 instr_table_translate(struct rte_swx_pipeline *p,
2698 struct action *action,
2701 struct instruction *instr,
2702 struct instruction_data *data __rte_unused)
2706 CHECK(!action, EINVAL);
2707 CHECK(n_tokens == 2, EINVAL);
2709 t = table_find(p, tokens[1]);
2712 instr->type = INSTR_TABLE;
2713 instr->table.table_id = t->id;
2718 instr_table_exec(struct rte_swx_pipeline *p)
2720 struct thread *t = &p->threads[p->thread_id];
2721 struct instruction *ip = t->ip;
2722 uint32_t table_id = ip->table.table_id;
2723 struct rte_swx_table_state *ts = &t->table_state[table_id];
2724 struct table_runtime *table = &t->tables[table_id];
2726 uint8_t *action_data;
2730 done = table->func(ts->obj,
2738 TRACE("[Thread %2u] table %u (not finalized)\n",
2746 action_id = hit ? action_id : ts->default_action_id;
2747 action_data = hit ? action_data : ts->default_action_data;
2749 TRACE("[Thread %2u] table %u (%s, action %u)\n",
2752 hit ? "hit" : "miss",
2753 (uint32_t)action_id);
2755 t->action_id = action_id;
2756 t->structs[0] = action_data;
2760 thread_ip_action_call(p, t, action_id);
2767 instr_mov_translate(struct rte_swx_pipeline *p,
2768 struct action *action,
2771 struct instruction *instr,
2772 struct instruction_data *data __rte_unused)
2774 char *dst = tokens[1], *src = tokens[2];
2775 struct field *fdst, *fsrc;
2776 uint32_t dst_struct_id, src_struct_id, src_val;
2778 CHECK(n_tokens == 3, EINVAL);
2780 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
2781 CHECK(fdst, EINVAL);
2784 fsrc = struct_field_parse(p, action, src, &src_struct_id);
2786 instr->type = INSTR_MOV;
2787 if ((dst[0] == 'h' && src[0] != 'h') ||
2788 (dst[0] != 'h' && src[0] == 'h'))
2789 instr->type = INSTR_MOV_S;
2791 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2792 instr->mov.dst.n_bits = fdst->n_bits;
2793 instr->mov.dst.offset = fdst->offset / 8;
2794 instr->mov.src.struct_id = (uint8_t)src_struct_id;
2795 instr->mov.src.n_bits = fsrc->n_bits;
2796 instr->mov.src.offset = fsrc->offset / 8;
2801 src_val = strtoul(src, &src, 0);
2802 CHECK(!src[0], EINVAL);
2805 src_val = htonl(src_val);
2807 instr->type = INSTR_MOV_I;
2808 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
2809 instr->mov.dst.n_bits = fdst->n_bits;
2810 instr->mov.dst.offset = fdst->offset / 8;
2811 instr->mov.src_val = (uint32_t)src_val;
2816 instr_mov_exec(struct rte_swx_pipeline *p)
2818 struct thread *t = &p->threads[p->thread_id];
2819 struct instruction *ip = t->ip;
2821 TRACE("[Thread %2u] mov\n",
2831 instr_mov_s_exec(struct rte_swx_pipeline *p)
2833 struct thread *t = &p->threads[p->thread_id];
2834 struct instruction *ip = t->ip;
2836 TRACE("[Thread %2u] mov (s)\n",
2846 instr_mov_i_exec(struct rte_swx_pipeline *p)
2848 struct thread *t = &p->threads[p->thread_id];
2849 struct instruction *ip = t->ip;
2851 TRACE("[Thread %2u] mov m.f %x\n",
2865 instr_dma_translate(struct rte_swx_pipeline *p,
2866 struct action *action,
2869 struct instruction *instr,
2870 struct instruction_data *data __rte_unused)
2872 char *dst = tokens[1];
2873 char *src = tokens[2];
2877 CHECK(action, EINVAL);
2878 CHECK(n_tokens == 3, EINVAL);
2880 h = header_parse(p, dst);
2883 tf = action_field_parse(action, src);
2886 instr->type = INSTR_DMA_HT;
2887 instr->dma.dst.header_id[0] = h->id;
2888 instr->dma.dst.struct_id[0] = h->struct_id;
2889 instr->dma.n_bytes[0] = h->st->n_bits / 8;
2890 instr->dma.src.offset[0] = tf->offset / 8;
2896 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
2899 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
2901 struct thread *t = &p->threads[p->thread_id];
2902 struct instruction *ip = t->ip;
2903 uint8_t *action_data = t->structs[0];
2904 uint64_t valid_headers = t->valid_headers;
2907 for (i = 0; i < n_dma; i++) {
2908 uint32_t header_id = ip->dma.dst.header_id[i];
2909 uint32_t struct_id = ip->dma.dst.struct_id[i];
2910 uint32_t offset = ip->dma.src.offset[i];
2911 uint32_t n_bytes = ip->dma.n_bytes[i];
2913 struct header_runtime *h = &t->headers[header_id];
2914 uint8_t *h_ptr0 = h->ptr0;
2915 uint8_t *h_ptr = t->structs[struct_id];
2917 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2919 void *src = &action_data[offset];
2921 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2924 memcpy(dst, src, n_bytes);
2925 t->structs[struct_id] = dst;
2926 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2929 t->valid_headers = valid_headers;
2933 instr_dma_ht_exec(struct rte_swx_pipeline *p)
2935 __instr_dma_ht_exec(p, 1);
2942 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
2944 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2947 __instr_dma_ht_exec(p, 2);
2954 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
2956 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2959 __instr_dma_ht_exec(p, 3);
2966 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
2968 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2971 __instr_dma_ht_exec(p, 4);
2978 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
2980 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2983 __instr_dma_ht_exec(p, 5);
2990 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
2992 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2995 __instr_dma_ht_exec(p, 6);
3002 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3004 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3007 __instr_dma_ht_exec(p, 7);
3014 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3016 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3019 __instr_dma_ht_exec(p, 8);
3029 instr_alu_add_translate(struct rte_swx_pipeline *p,
3030 struct action *action,
3033 struct instruction *instr,
3034 struct instruction_data *data __rte_unused)
3036 char *dst = tokens[1], *src = tokens[2];
3037 struct field *fdst, *fsrc;
3038 uint32_t dst_struct_id, src_struct_id, src_val;
3040 CHECK(n_tokens == 3, EINVAL);
3042 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3043 CHECK(fdst, EINVAL);
3045 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3046 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3048 instr->type = INSTR_ALU_ADD;
3049 if (dst[0] == 'h' && src[0] == 'm')
3050 instr->type = INSTR_ALU_ADD_HM;
3051 if (dst[0] == 'm' && src[0] == 'h')
3052 instr->type = INSTR_ALU_ADD_MH;
3053 if (dst[0] == 'h' && src[0] == 'h')
3054 instr->type = INSTR_ALU_ADD_HH;
3056 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3057 instr->alu.dst.n_bits = fdst->n_bits;
3058 instr->alu.dst.offset = fdst->offset / 8;
3059 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3060 instr->alu.src.n_bits = fsrc->n_bits;
3061 instr->alu.src.offset = fsrc->offset / 8;
3065 /* ADD_MI, ADD_HI. */
3066 src_val = strtoul(src, &src, 0);
3067 CHECK(!src[0], EINVAL);
3069 instr->type = INSTR_ALU_ADD_MI;
3071 instr->type = INSTR_ALU_ADD_HI;
3073 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3074 instr->alu.dst.n_bits = fdst->n_bits;
3075 instr->alu.dst.offset = fdst->offset / 8;
3076 instr->alu.src_val = (uint32_t)src_val;
3081 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3082 struct action *action,
3085 struct instruction *instr,
3086 struct instruction_data *data __rte_unused)
3088 char *dst = tokens[1], *src = tokens[2];
3089 struct field *fdst, *fsrc;
3090 uint32_t dst_struct_id, src_struct_id, src_val;
3092 CHECK(n_tokens == 3, EINVAL);
3094 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3095 CHECK(fdst, EINVAL);
3097 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3098 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3100 instr->type = INSTR_ALU_SUB;
3101 if (dst[0] == 'h' && src[0] == 'm')
3102 instr->type = INSTR_ALU_SUB_HM;
3103 if (dst[0] == 'm' && src[0] == 'h')
3104 instr->type = INSTR_ALU_SUB_MH;
3105 if (dst[0] == 'h' && src[0] == 'h')
3106 instr->type = INSTR_ALU_SUB_HH;
3108 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3109 instr->alu.dst.n_bits = fdst->n_bits;
3110 instr->alu.dst.offset = fdst->offset / 8;
3111 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3112 instr->alu.src.n_bits = fsrc->n_bits;
3113 instr->alu.src.offset = fsrc->offset / 8;
3117 /* SUB_MI, SUB_HI. */
3118 src_val = strtoul(src, &src, 0);
3119 CHECK(!src[0], EINVAL);
3121 instr->type = INSTR_ALU_SUB_MI;
3123 instr->type = INSTR_ALU_SUB_HI;
3125 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3126 instr->alu.dst.n_bits = fdst->n_bits;
3127 instr->alu.dst.offset = fdst->offset / 8;
3128 instr->alu.src_val = (uint32_t)src_val;
3133 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3134 struct action *action __rte_unused,
3137 struct instruction *instr,
3138 struct instruction_data *data __rte_unused)
3140 char *dst = tokens[1], *src = tokens[2];
3141 struct header *hdst, *hsrc;
3142 struct field *fdst, *fsrc;
3144 CHECK(n_tokens == 3, EINVAL);
3146 fdst = header_field_parse(p, dst, &hdst);
3147 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3150 fsrc = header_field_parse(p, src, &hsrc);
3152 instr->type = INSTR_ALU_CKADD_FIELD;
3153 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3154 instr->alu.dst.n_bits = fdst->n_bits;
3155 instr->alu.dst.offset = fdst->offset / 8;
3156 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3157 instr->alu.src.n_bits = fsrc->n_bits;
3158 instr->alu.src.offset = fsrc->offset / 8;
3162 /* CKADD_STRUCT, CKADD_STRUCT20. */
3163 hsrc = header_parse(p, src);
3164 CHECK(hsrc, EINVAL);
3166 instr->type = INSTR_ALU_CKADD_STRUCT;
3167 if ((hsrc->st->n_bits / 8) == 20)
3168 instr->type = INSTR_ALU_CKADD_STRUCT20;
3170 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3171 instr->alu.dst.n_bits = fdst->n_bits;
3172 instr->alu.dst.offset = fdst->offset / 8;
3173 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3174 instr->alu.src.n_bits = hsrc->st->n_bits;
3175 instr->alu.src.offset = 0; /* Unused. */
3180 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3181 struct action *action __rte_unused,
3184 struct instruction *instr,
3185 struct instruction_data *data __rte_unused)
3187 char *dst = tokens[1], *src = tokens[2];
3188 struct header *hdst, *hsrc;
3189 struct field *fdst, *fsrc;
3191 CHECK(n_tokens == 3, EINVAL);
3193 fdst = header_field_parse(p, dst, &hdst);
3194 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3196 fsrc = header_field_parse(p, src, &hsrc);
3197 CHECK(fsrc, EINVAL);
3199 instr->type = INSTR_ALU_CKSUB_FIELD;
3200 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3201 instr->alu.dst.n_bits = fdst->n_bits;
3202 instr->alu.dst.offset = fdst->offset / 8;
3203 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3204 instr->alu.src.n_bits = fsrc->n_bits;
3205 instr->alu.src.offset = fsrc->offset / 8;
3210 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3211 struct action *action,
3214 struct instruction *instr,
3215 struct instruction_data *data __rte_unused)
3217 char *dst = tokens[1], *src = tokens[2];
3218 struct field *fdst, *fsrc;
3219 uint32_t dst_struct_id, src_struct_id, src_val;
3221 CHECK(n_tokens == 3, EINVAL);
3223 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3224 CHECK(fdst, EINVAL);
3226 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3227 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3229 instr->type = INSTR_ALU_SHL;
3230 if (dst[0] == 'h' && src[0] == 'm')
3231 instr->type = INSTR_ALU_SHL_HM;
3232 if (dst[0] == 'm' && src[0] == 'h')
3233 instr->type = INSTR_ALU_SHL_MH;
3234 if (dst[0] == 'h' && src[0] == 'h')
3235 instr->type = INSTR_ALU_SHL_HH;
3237 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3238 instr->alu.dst.n_bits = fdst->n_bits;
3239 instr->alu.dst.offset = fdst->offset / 8;
3240 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3241 instr->alu.src.n_bits = fsrc->n_bits;
3242 instr->alu.src.offset = fsrc->offset / 8;
3246 /* SHL_MI, SHL_HI. */
3247 src_val = strtoul(src, &src, 0);
3248 CHECK(!src[0], EINVAL);
3250 instr->type = INSTR_ALU_SHL_MI;
3252 instr->type = INSTR_ALU_SHL_HI;
3254 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3255 instr->alu.dst.n_bits = fdst->n_bits;
3256 instr->alu.dst.offset = fdst->offset / 8;
3257 instr->alu.src_val = (uint32_t)src_val;
3262 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3263 struct action *action,
3266 struct instruction *instr,
3267 struct instruction_data *data __rte_unused)
3269 char *dst = tokens[1], *src = tokens[2];
3270 struct field *fdst, *fsrc;
3271 uint32_t dst_struct_id, src_struct_id, src_val;
3273 CHECK(n_tokens == 3, EINVAL);
3275 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3276 CHECK(fdst, EINVAL);
3278 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3279 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3281 instr->type = INSTR_ALU_SHR;
3282 if (dst[0] == 'h' && src[0] == 'm')
3283 instr->type = INSTR_ALU_SHR_HM;
3284 if (dst[0] == 'm' && src[0] == 'h')
3285 instr->type = INSTR_ALU_SHR_MH;
3286 if (dst[0] == 'h' && src[0] == 'h')
3287 instr->type = INSTR_ALU_SHR_HH;
3289 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3290 instr->alu.dst.n_bits = fdst->n_bits;
3291 instr->alu.dst.offset = fdst->offset / 8;
3292 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3293 instr->alu.src.n_bits = fsrc->n_bits;
3294 instr->alu.src.offset = fsrc->offset / 8;
3298 /* SHR_MI, SHR_HI. */
3299 src_val = strtoul(src, &src, 0);
3300 CHECK(!src[0], EINVAL);
3302 instr->type = INSTR_ALU_SHR_MI;
3304 instr->type = INSTR_ALU_SHR_HI;
3306 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3307 instr->alu.dst.n_bits = fdst->n_bits;
3308 instr->alu.dst.offset = fdst->offset / 8;
3309 instr->alu.src_val = (uint32_t)src_val;
3314 instr_alu_and_translate(struct rte_swx_pipeline *p,
3315 struct action *action,
3318 struct instruction *instr,
3319 struct instruction_data *data __rte_unused)
3321 char *dst = tokens[1], *src = tokens[2];
3322 struct field *fdst, *fsrc;
3323 uint32_t dst_struct_id, src_struct_id, src_val;
3325 CHECK(n_tokens == 3, EINVAL);
3327 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3328 CHECK(fdst, EINVAL);
3331 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3333 instr->type = INSTR_ALU_AND;
3334 if ((dst[0] == 'h' && src[0] != 'h') ||
3335 (dst[0] != 'h' && src[0] == 'h'))
3336 instr->type = INSTR_ALU_AND_S;
3338 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3339 instr->alu.dst.n_bits = fdst->n_bits;
3340 instr->alu.dst.offset = fdst->offset / 8;
3341 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3342 instr->alu.src.n_bits = fsrc->n_bits;
3343 instr->alu.src.offset = fsrc->offset / 8;
3348 src_val = strtoul(src, &src, 0);
3349 CHECK(!src[0], EINVAL);
3352 src_val = htonl(src_val);
3354 instr->type = INSTR_ALU_AND_I;
3355 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3356 instr->alu.dst.n_bits = fdst->n_bits;
3357 instr->alu.dst.offset = fdst->offset / 8;
3358 instr->alu.src_val = (uint32_t)src_val;
3363 instr_alu_or_translate(struct rte_swx_pipeline *p,
3364 struct action *action,
3367 struct instruction *instr,
3368 struct instruction_data *data __rte_unused)
3370 char *dst = tokens[1], *src = tokens[2];
3371 struct field *fdst, *fsrc;
3372 uint32_t dst_struct_id, src_struct_id, src_val;
3374 CHECK(n_tokens == 3, EINVAL);
3376 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3377 CHECK(fdst, EINVAL);
3380 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3382 instr->type = INSTR_ALU_OR;
3383 if ((dst[0] == 'h' && src[0] != 'h') ||
3384 (dst[0] != 'h' && src[0] == 'h'))
3385 instr->type = INSTR_ALU_OR_S;
3387 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3388 instr->alu.dst.n_bits = fdst->n_bits;
3389 instr->alu.dst.offset = fdst->offset / 8;
3390 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3391 instr->alu.src.n_bits = fsrc->n_bits;
3392 instr->alu.src.offset = fsrc->offset / 8;
3397 src_val = strtoul(src, &src, 0);
3398 CHECK(!src[0], EINVAL);
3401 src_val = htonl(src_val);
3403 instr->type = INSTR_ALU_OR_I;
3404 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3405 instr->alu.dst.n_bits = fdst->n_bits;
3406 instr->alu.dst.offset = fdst->offset / 8;
3407 instr->alu.src_val = (uint32_t)src_val;
3412 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3413 struct action *action,
3416 struct instruction *instr,
3417 struct instruction_data *data __rte_unused)
3419 char *dst = tokens[1], *src = tokens[2];
3420 struct field *fdst, *fsrc;
3421 uint32_t dst_struct_id, src_struct_id, src_val;
3423 CHECK(n_tokens == 3, EINVAL);
3425 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3426 CHECK(fdst, EINVAL);
3429 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3431 instr->type = INSTR_ALU_XOR;
3432 if ((dst[0] == 'h' && src[0] != 'h') ||
3433 (dst[0] != 'h' && src[0] == 'h'))
3434 instr->type = INSTR_ALU_XOR_S;
3436 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3437 instr->alu.dst.n_bits = fdst->n_bits;
3438 instr->alu.dst.offset = fdst->offset / 8;
3439 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3440 instr->alu.src.n_bits = fsrc->n_bits;
3441 instr->alu.src.offset = fsrc->offset / 8;
3446 src_val = strtoul(src, &src, 0);
3447 CHECK(!src[0], EINVAL);
3450 src_val = htonl(src_val);
3452 instr->type = INSTR_ALU_XOR_I;
3453 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3454 instr->alu.dst.n_bits = fdst->n_bits;
3455 instr->alu.dst.offset = fdst->offset / 8;
3456 instr->alu.src_val = (uint32_t)src_val;
3461 instr_alu_add_exec(struct rte_swx_pipeline *p)
3463 struct thread *t = &p->threads[p->thread_id];
3464 struct instruction *ip = t->ip;
3466 TRACE("[Thread %2u] add\n", p->thread_id);
3476 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3478 struct thread *t = &p->threads[p->thread_id];
3479 struct instruction *ip = t->ip;
3481 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3491 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3493 struct thread *t = &p->threads[p->thread_id];
3494 struct instruction *ip = t->ip;
3496 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3506 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3508 struct thread *t = &p->threads[p->thread_id];
3509 struct instruction *ip = t->ip;
3511 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3521 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3523 struct thread *t = &p->threads[p->thread_id];
3524 struct instruction *ip = t->ip;
3526 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3536 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3538 struct thread *t = &p->threads[p->thread_id];
3539 struct instruction *ip = t->ip;
3541 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3551 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3553 struct thread *t = &p->threads[p->thread_id];
3554 struct instruction *ip = t->ip;
3556 TRACE("[Thread %2u] sub\n", p->thread_id);
3566 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
3568 struct thread *t = &p->threads[p->thread_id];
3569 struct instruction *ip = t->ip;
3571 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
3581 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
3583 struct thread *t = &p->threads[p->thread_id];
3584 struct instruction *ip = t->ip;
3586 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3596 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
3598 struct thread *t = &p->threads[p->thread_id];
3599 struct instruction *ip = t->ip;
3601 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3611 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
3613 struct thread *t = &p->threads[p->thread_id];
3614 struct instruction *ip = t->ip;
3616 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3626 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
3628 struct thread *t = &p->threads[p->thread_id];
3629 struct instruction *ip = t->ip;
3631 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3641 instr_alu_shl_exec(struct rte_swx_pipeline *p)
3643 struct thread *t = &p->threads[p->thread_id];
3644 struct instruction *ip = t->ip;
3646 TRACE("[Thread %2u] shl\n", p->thread_id);
3656 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
3658 struct thread *t = &p->threads[p->thread_id];
3659 struct instruction *ip = t->ip;
3661 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
3671 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
3673 struct thread *t = &p->threads[p->thread_id];
3674 struct instruction *ip = t->ip;
3676 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
3686 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
3688 struct thread *t = &p->threads[p->thread_id];
3689 struct instruction *ip = t->ip;
3691 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
3701 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
3703 struct thread *t = &p->threads[p->thread_id];
3704 struct instruction *ip = t->ip;
3706 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
3716 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
3718 struct thread *t = &p->threads[p->thread_id];
3719 struct instruction *ip = t->ip;
3721 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
3731 instr_alu_shr_exec(struct rte_swx_pipeline *p)
3733 struct thread *t = &p->threads[p->thread_id];
3734 struct instruction *ip = t->ip;
3736 TRACE("[Thread %2u] shr\n", p->thread_id);
3746 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
3748 struct thread *t = &p->threads[p->thread_id];
3749 struct instruction *ip = t->ip;
3751 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
3761 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
3763 struct thread *t = &p->threads[p->thread_id];
3764 struct instruction *ip = t->ip;
3766 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
3776 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
3778 struct thread *t = &p->threads[p->thread_id];
3779 struct instruction *ip = t->ip;
3781 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
3791 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
3793 struct thread *t = &p->threads[p->thread_id];
3794 struct instruction *ip = t->ip;
3796 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
3806 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
3808 struct thread *t = &p->threads[p->thread_id];
3809 struct instruction *ip = t->ip;
3811 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
3821 instr_alu_and_exec(struct rte_swx_pipeline *p)
3823 struct thread *t = &p->threads[p->thread_id];
3824 struct instruction *ip = t->ip;
3826 TRACE("[Thread %2u] and\n", p->thread_id);
3836 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
3838 struct thread *t = &p->threads[p->thread_id];
3839 struct instruction *ip = t->ip;
3841 TRACE("[Thread %2u] and (s)\n", p->thread_id);
3851 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
3853 struct thread *t = &p->threads[p->thread_id];
3854 struct instruction *ip = t->ip;
3856 TRACE("[Thread %2u] and (i)\n", p->thread_id);
3866 instr_alu_or_exec(struct rte_swx_pipeline *p)
3868 struct thread *t = &p->threads[p->thread_id];
3869 struct instruction *ip = t->ip;
3871 TRACE("[Thread %2u] or\n", p->thread_id);
3881 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
3883 struct thread *t = &p->threads[p->thread_id];
3884 struct instruction *ip = t->ip;
3886 TRACE("[Thread %2u] or (s)\n", p->thread_id);
3896 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
3898 struct thread *t = &p->threads[p->thread_id];
3899 struct instruction *ip = t->ip;
3901 TRACE("[Thread %2u] or (i)\n", p->thread_id);
3911 instr_alu_xor_exec(struct rte_swx_pipeline *p)
3913 struct thread *t = &p->threads[p->thread_id];
3914 struct instruction *ip = t->ip;
3916 TRACE("[Thread %2u] xor\n", p->thread_id);
3926 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
3928 struct thread *t = &p->threads[p->thread_id];
3929 struct instruction *ip = t->ip;
3931 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
3941 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
3943 struct thread *t = &p->threads[p->thread_id];
3944 struct instruction *ip = t->ip;
3946 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
3956 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
3958 struct thread *t = &p->threads[p->thread_id];
3959 struct instruction *ip = t->ip;
3960 uint8_t *dst_struct, *src_struct;
3961 uint16_t *dst16_ptr, dst;
3962 uint64_t *src64_ptr, src64, src64_mask, src;
3965 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3968 dst_struct = t->structs[ip->alu.dst.struct_id];
3969 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3972 src_struct = t->structs[ip->alu.src.struct_id];
3973 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3975 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3976 src = src64 & src64_mask;
3981 /* The first input (r) is a 16-bit number. The second and the third
3982 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3983 * three numbers (output r) is a 34-bit number.
3985 r += (src >> 32) + (src & 0xFFFFFFFF);
3987 /* The first input is a 16-bit number. The second input is an 18-bit
3988 * number. In the worst case scenario, the sum of the two numbers is a
3991 r = (r & 0xFFFF) + (r >> 16);
3993 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3994 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3996 r = (r & 0xFFFF) + (r >> 16);
3998 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3999 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4000 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4001 * therefore the output r is always a 16-bit number.
4003 r = (r & 0xFFFF) + (r >> 16);
4008 *dst16_ptr = (uint16_t)r;
4015 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4017 struct thread *t = &p->threads[p->thread_id];
4018 struct instruction *ip = t->ip;
4019 uint8_t *dst_struct, *src_struct;
4020 uint16_t *dst16_ptr, dst;
4021 uint64_t *src64_ptr, src64, src64_mask, src;
4024 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4027 dst_struct = t->structs[ip->alu.dst.struct_id];
4028 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4031 src_struct = t->structs[ip->alu.src.struct_id];
4032 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4034 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4035 src = src64 & src64_mask;
4040 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
4041 * the following sequence of operations in 2's complement arithmetic:
4042 * a '- b = (a - b) % 0xFFFF.
4044 * In order to prevent an underflow for the below subtraction, in which
4045 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
4046 * minuend), we first add a multiple of the 0xFFFF modulus to the
4047 * minuend. The number we add to the minuend needs to be a 34-bit number
4048 * or higher, so for readability reasons we picked the 36-bit multiple.
4049 * We are effectively turning the 16-bit minuend into a 36-bit number:
4050 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
4052 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
4054 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
4055 * result (the output r) is a 36-bit number.
4057 r -= (src >> 32) + (src & 0xFFFFFFFF);
4059 /* The first input is a 16-bit number. The second input is a 20-bit
4060 * number. Their sum is a 21-bit number.
4062 r = (r & 0xFFFF) + (r >> 16);
4064 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4065 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
4067 r = (r & 0xFFFF) + (r >> 16);
4069 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4070 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4071 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4072 * generated, therefore the output r is always a 16-bit number.
4074 r = (r & 0xFFFF) + (r >> 16);
4079 *dst16_ptr = (uint16_t)r;
4086 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
4088 struct thread *t = &p->threads[p->thread_id];
4089 struct instruction *ip = t->ip;
4090 uint8_t *dst_struct, *src_struct;
4091 uint16_t *dst16_ptr;
4092 uint32_t *src32_ptr;
4095 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4098 dst_struct = t->structs[ip->alu.dst.struct_id];
4099 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4101 src_struct = t->structs[ip->alu.src.struct_id];
4102 src32_ptr = (uint32_t *)&src_struct[0];
4104 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4105 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4106 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4107 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4108 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4110 /* The first input is a 16-bit number. The second input is a 19-bit
4111 * number. Their sum is a 20-bit number.
4113 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4115 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4116 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4118 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4120 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4121 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4122 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4123 * generated, therefore the output r is always a 16-bit number.
4125 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4128 r0 = r0 ? r0 : 0xFFFF;
4130 *dst16_ptr = (uint16_t)r0;
4137 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4139 struct thread *t = &p->threads[p->thread_id];
4140 struct instruction *ip = t->ip;
4141 uint8_t *dst_struct, *src_struct;
4142 uint16_t *dst16_ptr;
4143 uint32_t *src32_ptr;
4147 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4150 dst_struct = t->structs[ip->alu.dst.struct_id];
4151 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4153 src_struct = t->structs[ip->alu.src.struct_id];
4154 src32_ptr = (uint32_t *)&src_struct[0];
4156 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4157 * Therefore, in the worst case scenario, a 35-bit number is added to a
4158 * 16-bit number (the input r), so the output r is 36-bit number.
4160 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4163 /* The first input is a 16-bit number. The second input is a 20-bit
4164 * number. Their sum is a 21-bit number.
4166 r = (r & 0xFFFF) + (r >> 16);
4168 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4169 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4171 r = (r & 0xFFFF) + (r >> 16);
4173 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4174 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4175 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4176 * generated, therefore the output r is always a 16-bit number.
4178 r = (r & 0xFFFF) + (r >> 16);
4183 *dst16_ptr = (uint16_t)r;
4189 #define RTE_SWX_INSTRUCTION_TOKENS_MAX 16
4192 instr_translate(struct rte_swx_pipeline *p,
4193 struct action *action,
4195 struct instruction *instr,
4196 struct instruction_data *data)
4198 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
4199 int n_tokens = 0, tpos = 0;
4201 /* Parse the instruction string into tokens. */
4205 token = strtok_r(string, " \t\v", &string);
4209 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
4211 tokens[n_tokens] = token;
4215 CHECK(n_tokens, EINVAL);
4217 /* Handle the optional instruction label. */
4218 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
4219 strcpy(data->label, tokens[0]);
4222 CHECK(n_tokens - tpos, EINVAL);
4225 /* Identify the instruction type. */
4226 if (!strcmp(tokens[tpos], "rx"))
4227 return instr_rx_translate(p,
4234 if (!strcmp(tokens[tpos], "tx"))
4235 return instr_tx_translate(p,
4242 if (!strcmp(tokens[tpos], "extract"))
4243 return instr_hdr_extract_translate(p,
4250 if (!strcmp(tokens[tpos], "emit"))
4251 return instr_hdr_emit_translate(p,
4258 if (!strcmp(tokens[tpos], "validate"))
4259 return instr_hdr_validate_translate(p,
4266 if (!strcmp(tokens[tpos], "invalidate"))
4267 return instr_hdr_invalidate_translate(p,
4274 if (!strcmp(tokens[tpos], "mov"))
4275 return instr_mov_translate(p,
4282 if (!strcmp(tokens[tpos], "dma"))
4283 return instr_dma_translate(p,
4290 if (!strcmp(tokens[tpos], "add"))
4291 return instr_alu_add_translate(p,
4298 if (!strcmp(tokens[tpos], "sub"))
4299 return instr_alu_sub_translate(p,
4306 if (!strcmp(tokens[tpos], "ckadd"))
4307 return instr_alu_ckadd_translate(p,
4314 if (!strcmp(tokens[tpos], "cksub"))
4315 return instr_alu_cksub_translate(p,
4322 if (!strcmp(tokens[tpos], "and"))
4323 return instr_alu_and_translate(p,
4330 if (!strcmp(tokens[tpos], "or"))
4331 return instr_alu_or_translate(p,
4338 if (!strcmp(tokens[tpos], "xor"))
4339 return instr_alu_xor_translate(p,
4346 if (!strcmp(tokens[tpos], "shl"))
4347 return instr_alu_shl_translate(p,
4354 if (!strcmp(tokens[tpos], "shr"))
4355 return instr_alu_shr_translate(p,
4362 if (!strcmp(tokens[tpos], "table"))
4363 return instr_table_translate(p,
4374 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
4376 uint32_t count = 0, i;
4381 for (i = 0; i < n; i++)
4382 if (!strcmp(label, data[i].jmp_label))
4389 instr_label_check(struct instruction_data *instruction_data,
4390 uint32_t n_instructions)
4394 /* Check that all instruction labels are unique. */
4395 for (i = 0; i < n_instructions; i++) {
4396 struct instruction_data *data = &instruction_data[i];
4397 char *label = data->label;
4403 for (j = i + 1; j < n_instructions; j++)
4404 CHECK(strcmp(label, data[j].label), EINVAL);
4407 /* Get users for each instruction label. */
4408 for (i = 0; i < n_instructions; i++) {
4409 struct instruction_data *data = &instruction_data[i];
4410 char *label = data->label;
4412 data->n_users = label_is_used(instruction_data,
4421 instruction_config(struct rte_swx_pipeline *p,
4423 const char **instructions,
4424 uint32_t n_instructions)
4426 struct instruction *instr = NULL;
4427 struct instruction_data *data = NULL;
4428 char *string = NULL;
4432 CHECK(n_instructions, EINVAL);
4433 CHECK(instructions, EINVAL);
4434 for (i = 0; i < n_instructions; i++)
4435 CHECK(instructions[i], EINVAL);
4437 /* Memory allocation. */
4438 instr = calloc(n_instructions, sizeof(struct instruction));
4444 data = calloc(n_instructions, sizeof(struct instruction_data));
4450 for (i = 0; i < n_instructions; i++) {
4451 string = strdup(instructions[i]);
4457 err = instr_translate(p, a, string, &instr[i], &data[i]);
4464 err = instr_label_check(data, n_instructions);
4471 a->instructions = instr;
4472 a->n_instructions = n_instructions;
4474 p->instructions = instr;
4475 p->n_instructions = n_instructions;
4487 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
4489 static instr_exec_t instruction_table[] = {
4490 [INSTR_RX] = instr_rx_exec,
4491 [INSTR_TX] = instr_tx_exec,
4493 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
4494 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
4495 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
4496 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
4497 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
4498 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
4499 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
4500 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
4502 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
4503 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
4504 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
4505 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
4506 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
4507 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
4508 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
4509 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
4510 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
4512 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
4513 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
4515 [INSTR_MOV] = instr_mov_exec,
4516 [INSTR_MOV_S] = instr_mov_s_exec,
4517 [INSTR_MOV_I] = instr_mov_i_exec,
4519 [INSTR_DMA_HT] = instr_dma_ht_exec,
4520 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
4521 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
4522 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
4523 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
4524 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
4525 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
4526 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
4528 [INSTR_ALU_ADD] = instr_alu_add_exec,
4529 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
4530 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
4531 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
4532 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
4533 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
4535 [INSTR_ALU_SUB] = instr_alu_sub_exec,
4536 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
4537 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
4538 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
4539 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
4540 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
4542 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
4543 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
4544 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
4545 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
4547 [INSTR_ALU_AND] = instr_alu_and_exec,
4548 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
4549 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
4551 [INSTR_ALU_OR] = instr_alu_or_exec,
4552 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
4553 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
4555 [INSTR_ALU_XOR] = instr_alu_xor_exec,
4556 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
4557 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
4559 [INSTR_ALU_SHL] = instr_alu_shl_exec,
4560 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
4561 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
4562 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
4563 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
4564 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
4566 [INSTR_ALU_SHR] = instr_alu_shr_exec,
4567 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
4568 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
4569 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
4570 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
4571 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
4573 [INSTR_TABLE] = instr_table_exec,
4577 instr_exec(struct rte_swx_pipeline *p)
4579 struct thread *t = &p->threads[p->thread_id];
4580 struct instruction *ip = t->ip;
4581 instr_exec_t instr = instruction_table[ip->type];
4589 static struct action *
4590 action_find(struct rte_swx_pipeline *p, const char *name)
4592 struct action *elem;
4597 TAILQ_FOREACH(elem, &p->actions, node)
4598 if (strcmp(elem->name, name) == 0)
4604 static struct field *
4605 action_field_find(struct action *a, const char *name)
4607 return a->st ? struct_type_field_find(a->st, name) : NULL;
4610 static struct field *
4611 action_field_parse(struct action *action, const char *name)
4613 if (name[0] != 't' || name[1] != '.')
4616 return action_field_find(action, &name[2]);
4620 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
4622 const char *args_struct_type_name,
4623 const char **instructions,
4624 uint32_t n_instructions)
4626 struct struct_type *args_struct_type;
4632 CHECK_NAME(name, EINVAL);
4633 CHECK(!action_find(p, name), EEXIST);
4635 if (args_struct_type_name) {
4636 CHECK_NAME(args_struct_type_name, EINVAL);
4637 args_struct_type = struct_type_find(p, args_struct_type_name);
4638 CHECK(args_struct_type, EINVAL);
4640 args_struct_type = NULL;
4643 /* Node allocation. */
4644 a = calloc(1, sizeof(struct action));
4647 /* Node initialization. */
4648 strcpy(a->name, name);
4649 a->st = args_struct_type;
4650 a->id = p->n_actions;
4652 /* Instruction translation. */
4653 err = instruction_config(p, a, instructions, n_instructions);
4659 /* Node add to tailq. */
4660 TAILQ_INSERT_TAIL(&p->actions, a, node);
4667 action_build(struct rte_swx_pipeline *p)
4669 struct action *action;
4671 p->action_instructions = calloc(p->n_actions,
4672 sizeof(struct instruction *));
4673 CHECK(p->action_instructions, ENOMEM);
4675 TAILQ_FOREACH(action, &p->actions, node)
4676 p->action_instructions[action->id] = action->instructions;
4682 action_build_free(struct rte_swx_pipeline *p)
4684 free(p->action_instructions);
4685 p->action_instructions = NULL;
4689 action_free(struct rte_swx_pipeline *p)
4691 action_build_free(p);
4694 struct action *action;
4696 action = TAILQ_FIRST(&p->actions);
4700 TAILQ_REMOVE(&p->actions, action, node);
4701 free(action->instructions);
4709 static struct table_type *
4710 table_type_find(struct rte_swx_pipeline *p, const char *name)
4712 struct table_type *elem;
4714 TAILQ_FOREACH(elem, &p->table_types, node)
4715 if (strcmp(elem->name, name) == 0)
4721 static struct table_type *
4722 table_type_resolve(struct rte_swx_pipeline *p,
4723 const char *recommended_type_name,
4724 enum rte_swx_table_match_type match_type)
4726 struct table_type *elem;
4728 /* Only consider the recommended type if the match type is correct. */
4729 if (recommended_type_name)
4730 TAILQ_FOREACH(elem, &p->table_types, node)
4731 if (!strcmp(elem->name, recommended_type_name) &&
4732 (elem->match_type == match_type))
4735 /* Ignore the recommended type and get the first element with this match
4738 TAILQ_FOREACH(elem, &p->table_types, node)
4739 if (elem->match_type == match_type)
4745 static struct table *
4746 table_find(struct rte_swx_pipeline *p, const char *name)
4750 TAILQ_FOREACH(elem, &p->tables, node)
4751 if (strcmp(elem->name, name) == 0)
4757 static struct table *
4758 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
4760 struct table *table = NULL;
4762 TAILQ_FOREACH(table, &p->tables, node)
4763 if (table->id == id)
4770 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
4772 enum rte_swx_table_match_type match_type,
4773 struct rte_swx_table_ops *ops)
4775 struct table_type *elem;
4779 CHECK_NAME(name, EINVAL);
4780 CHECK(!table_type_find(p, name), EEXIST);
4783 CHECK(ops->create, EINVAL);
4784 CHECK(ops->lkp, EINVAL);
4785 CHECK(ops->free, EINVAL);
4787 /* Node allocation. */
4788 elem = calloc(1, sizeof(struct table_type));
4789 CHECK(elem, ENOMEM);
4791 /* Node initialization. */
4792 strcpy(elem->name, name);
4793 elem->match_type = match_type;
4794 memcpy(&elem->ops, ops, sizeof(*ops));
4796 /* Node add to tailq. */
4797 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
4802 static enum rte_swx_table_match_type
4803 table_match_type_resolve(struct rte_swx_match_field_params *fields,
4808 for (i = 0; i < n_fields; i++)
4809 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
4813 return RTE_SWX_TABLE_MATCH_EXACT;
4815 if ((i == n_fields - 1) &&
4816 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
4817 return RTE_SWX_TABLE_MATCH_LPM;
4819 return RTE_SWX_TABLE_MATCH_WILDCARD;
4823 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
4825 struct rte_swx_pipeline_table_params *params,
4826 const char *recommended_table_type_name,
4830 struct table_type *type;
4832 struct action *default_action;
4833 struct header *header = NULL;
4835 uint32_t offset_prev = 0, action_data_size_max = 0, i;
4839 CHECK_NAME(name, EINVAL);
4840 CHECK(!table_find(p, name), EEXIST);
4842 CHECK(params, EINVAL);
4845 CHECK(!params->n_fields || params->fields, EINVAL);
4846 for (i = 0; i < params->n_fields; i++) {
4847 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4849 struct field *hf, *mf;
4852 CHECK_NAME(field->name, EINVAL);
4854 hf = header_field_parse(p, field->name, &h);
4855 mf = metadata_field_parse(p, field->name);
4856 CHECK(hf || mf, EINVAL);
4858 offset = hf ? hf->offset : mf->offset;
4861 is_header = hf ? 1 : 0;
4862 header = hf ? h : NULL;
4863 offset_prev = offset;
4868 CHECK((is_header && hf && (h->id == header->id)) ||
4869 (!is_header && mf), EINVAL);
4871 CHECK(offset > offset_prev, EINVAL);
4872 offset_prev = offset;
4875 /* Action checks. */
4876 CHECK(params->n_actions, EINVAL);
4877 CHECK(params->action_names, EINVAL);
4878 for (i = 0; i < params->n_actions; i++) {
4879 const char *action_name = params->action_names[i];
4881 uint32_t action_data_size;
4883 CHECK(action_name, EINVAL);
4885 a = action_find(p, action_name);
4888 action_data_size = a->st ? a->st->n_bits / 8 : 0;
4889 if (action_data_size > action_data_size_max)
4890 action_data_size_max = action_data_size;
4893 CHECK(params->default_action_name, EINVAL);
4894 for (i = 0; i < p->n_actions; i++)
4895 if (!strcmp(params->action_names[i],
4896 params->default_action_name))
4898 CHECK(i < params->n_actions, EINVAL);
4899 default_action = action_find(p, params->default_action_name);
4900 CHECK((default_action->st && params->default_action_data) ||
4901 !params->default_action_data, EINVAL);
4903 /* Table type checks. */
4904 if (params->n_fields) {
4905 enum rte_swx_table_match_type match_type;
4907 match_type = table_match_type_resolve(params->fields,
4909 type = table_type_resolve(p,
4910 recommended_table_type_name,
4912 CHECK(type, EINVAL);
4917 /* Memory allocation. */
4918 t = calloc(1, sizeof(struct table));
4921 t->fields = calloc(params->n_fields, sizeof(struct match_field));
4927 t->actions = calloc(params->n_actions, sizeof(struct action *));
4934 if (action_data_size_max) {
4935 t->default_action_data = calloc(1, action_data_size_max);
4936 if (!t->default_action_data) {
4944 /* Node initialization. */
4945 strcpy(t->name, name);
4946 if (args && args[0])
4947 strcpy(t->args, args);
4950 for (i = 0; i < params->n_fields; i++) {
4951 struct rte_swx_match_field_params *field = ¶ms->fields[i];
4952 struct match_field *f = &t->fields[i];
4954 f->match_type = field->match_type;
4955 f->field = is_header ?
4956 header_field_parse(p, field->name, NULL) :
4957 metadata_field_parse(p, field->name);
4959 t->n_fields = params->n_fields;
4960 t->is_header = is_header;
4963 for (i = 0; i < params->n_actions; i++)
4964 t->actions[i] = action_find(p, params->action_names[i]);
4965 t->default_action = default_action;
4966 if (default_action->st)
4967 memcpy(t->default_action_data,
4968 params->default_action_data,
4969 default_action->st->n_bits / 8);
4970 t->n_actions = params->n_actions;
4971 t->default_action_is_const = params->default_action_is_const;
4972 t->action_data_size_max = action_data_size_max;
4975 t->id = p->n_tables;
4977 /* Node add to tailq. */
4978 TAILQ_INSERT_TAIL(&p->tables, t, node);
4984 static struct rte_swx_table_params *
4985 table_params_get(struct table *table)
4987 struct rte_swx_table_params *params;
4988 struct field *first, *last;
4990 uint32_t key_size, key_offset, action_data_size, i;
4992 /* Memory allocation. */
4993 params = calloc(1, sizeof(struct rte_swx_table_params));
4997 /* Key offset and size. */
4998 first = table->fields[0].field;
4999 last = table->fields[table->n_fields - 1].field;
5000 key_offset = first->offset / 8;
5001 key_size = (last->offset + last->n_bits - first->offset) / 8;
5003 /* Memory allocation. */
5004 key_mask = calloc(1, key_size);
5011 for (i = 0; i < table->n_fields; i++) {
5012 struct field *f = table->fields[i].field;
5013 uint32_t start = (f->offset - first->offset) / 8;
5014 size_t size = f->n_bits / 8;
5016 memset(&key_mask[start], 0xFF, size);
5019 /* Action data size. */
5020 action_data_size = 0;
5021 for (i = 0; i < table->n_actions; i++) {
5022 struct action *action = table->actions[i];
5023 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
5025 if (ads > action_data_size)
5026 action_data_size = ads;
5030 params->match_type = table->type->match_type;
5031 params->key_size = key_size;
5032 params->key_offset = key_offset;
5033 params->key_mask0 = key_mask;
5034 params->action_data_size = action_data_size;
5035 params->n_keys_max = table->size;
5041 table_params_free(struct rte_swx_table_params *params)
5046 free(params->key_mask0);
5051 table_state_build(struct rte_swx_pipeline *p)
5053 struct table *table;
5055 p->table_state = calloc(p->n_tables,
5056 sizeof(struct rte_swx_table_state));
5057 CHECK(p->table_state, ENOMEM);
5059 TAILQ_FOREACH(table, &p->tables, node) {
5060 struct rte_swx_table_state *ts = &p->table_state[table->id];
5063 struct rte_swx_table_params *params;
5066 params = table_params_get(table);
5067 CHECK(params, ENOMEM);
5069 ts->obj = table->type->ops.create(params,
5074 table_params_free(params);
5075 CHECK(ts->obj, ENODEV);
5078 /* ts->default_action_data. */
5079 if (table->action_data_size_max) {
5080 ts->default_action_data =
5081 malloc(table->action_data_size_max);
5082 CHECK(ts->default_action_data, ENOMEM);
5084 memcpy(ts->default_action_data,
5085 table->default_action_data,
5086 table->action_data_size_max);
5089 /* ts->default_action_id. */
5090 ts->default_action_id = table->default_action->id;
5097 table_state_build_free(struct rte_swx_pipeline *p)
5101 if (!p->table_state)
5104 for (i = 0; i < p->n_tables; i++) {
5105 struct rte_swx_table_state *ts = &p->table_state[i];
5106 struct table *table = table_find_by_id(p, i);
5109 if (table->type && ts->obj)
5110 table->type->ops.free(ts->obj);
5112 /* ts->default_action_data. */
5113 free(ts->default_action_data);
5116 free(p->table_state);
5117 p->table_state = NULL;
5121 table_state_free(struct rte_swx_pipeline *p)
5123 table_state_build_free(p);
5127 table_stub_lkp(void *table __rte_unused,
5128 void *mailbox __rte_unused,
5129 uint8_t **key __rte_unused,
5130 uint64_t *action_id __rte_unused,
5131 uint8_t **action_data __rte_unused,
5135 return 1; /* DONE. */
5139 table_build(struct rte_swx_pipeline *p)
5143 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5144 struct thread *t = &p->threads[i];
5145 struct table *table;
5147 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
5148 CHECK(t->tables, ENOMEM);
5150 TAILQ_FOREACH(table, &p->tables, node) {
5151 struct table_runtime *r = &t->tables[table->id];
5156 size = table->type->ops.mailbox_size_get();
5159 r->func = table->type->ops.lkp;
5163 r->mailbox = calloc(1, size);
5164 CHECK(r->mailbox, ENOMEM);
5168 r->key = table->is_header ?
5169 &t->structs[table->header->struct_id] :
5170 &t->structs[p->metadata_struct_id];
5172 r->func = table_stub_lkp;
5181 table_build_free(struct rte_swx_pipeline *p)
5185 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5186 struct thread *t = &p->threads[i];
5192 for (j = 0; j < p->n_tables; j++) {
5193 struct table_runtime *r = &t->tables[j];
5204 table_free(struct rte_swx_pipeline *p)
5206 table_build_free(p);
5212 elem = TAILQ_FIRST(&p->tables);
5216 TAILQ_REMOVE(&p->tables, elem, node);
5218 free(elem->actions);
5219 free(elem->default_action_data);
5225 struct table_type *elem;
5227 elem = TAILQ_FIRST(&p->table_types);
5231 TAILQ_REMOVE(&p->table_types, elem, node);
5240 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
5242 struct rte_swx_pipeline *pipeline;
5244 /* Check input parameters. */
5247 /* Memory allocation. */
5248 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
5249 CHECK(pipeline, ENOMEM);
5251 /* Initialization. */
5252 TAILQ_INIT(&pipeline->struct_types);
5253 TAILQ_INIT(&pipeline->port_in_types);
5254 TAILQ_INIT(&pipeline->ports_in);
5255 TAILQ_INIT(&pipeline->port_out_types);
5256 TAILQ_INIT(&pipeline->ports_out);
5257 TAILQ_INIT(&pipeline->extern_types);
5258 TAILQ_INIT(&pipeline->extern_objs);
5259 TAILQ_INIT(&pipeline->extern_funcs);
5260 TAILQ_INIT(&pipeline->headers);
5261 TAILQ_INIT(&pipeline->actions);
5262 TAILQ_INIT(&pipeline->table_types);
5263 TAILQ_INIT(&pipeline->tables);
5265 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
5266 pipeline->numa_node = numa_node;
5273 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
5278 free(p->instructions);
5280 table_state_free(p);
5285 extern_func_free(p);
5295 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
5296 const char **instructions,
5297 uint32_t n_instructions)
5302 err = instruction_config(p, NULL, instructions, n_instructions);
5306 /* Thread instruction pointer reset. */
5307 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
5308 struct thread *t = &p->threads[i];
5310 thread_ip_reset(p, t);
5317 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
5322 CHECK(p->build_done == 0, EEXIST);
5324 status = port_in_build(p);
5328 status = port_out_build(p);
5332 status = struct_build(p);
5336 status = extern_obj_build(p);
5340 status = extern_func_build(p);
5344 status = header_build(p);
5348 status = metadata_build(p);
5352 status = action_build(p);
5356 status = table_build(p);
5360 status = table_state_build(p);
5368 table_state_build_free(p);
5369 table_build_free(p);
5370 action_build_free(p);
5371 metadata_build_free(p);
5372 header_build_free(p);
5373 extern_func_build_free(p);
5374 extern_obj_build_free(p);
5375 port_out_build_free(p);
5376 port_in_build_free(p);
5377 struct_build_free(p);
5383 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
5387 for (i = 0; i < n_instructions; i++)
5395 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
5396 struct rte_swx_table_state **table_state)
5398 if (!p || !table_state || !p->build_done)
5401 *table_state = p->table_state;
5406 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
5407 struct rte_swx_table_state *table_state)
5409 if (!p || !table_state || !p->build_done)
5412 p->table_state = table_state;