1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell.
6 #include <rte_graph_worker.h>
8 #include "pkt_cls_priv.h"
9 #include "node_private.h"
11 /* Next node for each ptype, default is '0' is "pkt_drop" */
12 static const uint8_t p_nxt[256] __rte_cache_aligned = {
13 [RTE_PTYPE_L3_IPV4] = PKT_CLS_NEXT_IP4_LOOKUP,
15 [RTE_PTYPE_L3_IPV4_EXT] = PKT_CLS_NEXT_IP4_LOOKUP,
17 [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN] = PKT_CLS_NEXT_IP4_LOOKUP,
19 [RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER] =
20 PKT_CLS_NEXT_IP4_LOOKUP,
22 [RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER] =
23 PKT_CLS_NEXT_IP4_LOOKUP,
25 [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER] =
26 PKT_CLS_NEXT_IP4_LOOKUP,
30 pkt_cls_node_process(struct rte_graph *graph, struct rte_node *node,
31 void **objs, uint16_t nb_objs)
33 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
34 uint8_t l0, l1, l2, l3, last_type;
35 uint16_t next_index, n_left_from;
36 uint16_t held = 0, last_spec = 0;
37 struct pkt_cls_node_ctx *ctx;
38 void **to_next, **from;
41 pkts = (struct rte_mbuf **)objs;
43 n_left_from = nb_objs;
45 for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
46 rte_prefetch0(&objs[i]);
48 #if RTE_GRAPH_BURST_SIZE > 64
49 for (i = 0; i < 4 && i < n_left_from; i++)
50 rte_prefetch0(pkts[i]);
53 ctx = (struct pkt_cls_node_ctx *)node->ctx;
54 last_type = ctx->l2l3_type;
55 next_index = p_nxt[last_type];
57 /* Get stream for the speculated next node */
58 to_next = rte_node_next_stream_get(graph, node,
60 while (n_left_from >= 4) {
61 #if RTE_GRAPH_BURST_SIZE > 64
62 if (likely(n_left_from > 7)) {
63 rte_prefetch0(pkts[4]);
64 rte_prefetch0(pkts[5]);
65 rte_prefetch0(pkts[6]);
66 rte_prefetch0(pkts[7]);
77 l0 = mbuf0->packet_type &
78 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
79 l1 = mbuf1->packet_type &
80 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
81 l2 = mbuf2->packet_type &
82 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
83 l3 = mbuf3->packet_type &
84 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
86 /* Check if they are destined to same
87 * next node based on l2l3 packet type.
89 uint8_t fix_spec = (last_type ^ l0) | (last_type ^ l1) |
90 (last_type ^ l2) | (last_type ^ l3);
92 if (unlikely(fix_spec)) {
93 /* Copy things successfully speculated till now */
94 rte_memcpy(to_next, from,
95 last_spec * sizeof(from[0]));
102 if (p_nxt[l0] == next_index) {
103 to_next[0] = from[0];
107 rte_node_enqueue_x1(graph, node,
112 if (p_nxt[l1] == next_index) {
113 to_next[0] = from[1];
117 rte_node_enqueue_x1(graph, node,
122 if (p_nxt[l2] == next_index) {
123 to_next[0] = from[2];
127 rte_node_enqueue_x1(graph, node,
132 if (p_nxt[l3] == next_index) {
133 to_next[0] = from[3];
137 rte_node_enqueue_x1(graph, node,
141 /* Update speculated ptype */
142 if ((last_type != l3) && (l2 == l3) &&
143 (next_index != p_nxt[l3])) {
144 /* Put the current stream for
147 rte_node_next_stream_put(graph, node,
152 /* Get next stream for new ltype */
153 next_index = p_nxt[l3];
155 to_next = rte_node_next_stream_get(graph, node,
158 } else if (next_index == p_nxt[l3]) {
168 while (n_left_from > 0) {
174 l0 = mbuf0->packet_type &
175 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
176 if (unlikely((l0 != last_type) &&
177 (p_nxt[l0] != next_index))) {
178 /* Copy things successfully speculated till now */
179 rte_memcpy(to_next, from,
180 last_spec * sizeof(from[0]));
182 to_next += last_spec;
186 rte_node_enqueue_x1(graph, node,
194 /* !!! Home run !!! */
195 if (likely(last_spec == nb_objs)) {
196 rte_node_next_stream_move(graph, node, next_index);
201 /* Copy things successfully speculated till now */
202 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
203 rte_node_next_stream_put(graph, node, next_index, held);
205 ctx->l2l3_type = last_type;
209 /* Packet Classification Node */
210 struct rte_node_register pkt_cls_node = {
211 .process = pkt_cls_node_process,
214 .nb_edges = PKT_CLS_NEXT_MAX,
216 /* Pkt drop node starts at '0' */
217 [PKT_CLS_NEXT_PKT_DROP] = "pkt_drop",
218 [PKT_CLS_NEXT_IP4_LOOKUP] = "ip4_lookup",
221 RTE_NODE_REGISTER(pkt_cls_node);