1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
6 #include <rte_ethdev.h>
9 #include <rte_graph_worker.h>
11 #include "ethdev_rx_priv.h"
12 #include "node_private.h"
14 static struct ethdev_rx_node_main ethdev_rx_main;
16 static __rte_always_inline uint16_t
17 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
18 ethdev_rx_node_ctx_t *ctx)
20 uint16_t count, next_index;
24 queue = ctx->queue_id;
25 next_index = ctx->cls_next;
27 /* Get pkts from port */
28 count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
29 RTE_GRAPH_BURST_SIZE);
34 /* Enqueue to next node */
35 rte_node_next_stream_move(graph, node, next_index);
40 static __rte_always_inline uint16_t
41 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
42 void **objs, uint16_t cnt)
44 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
50 n_pkts = ethdev_rx_node_process_inline(graph, node, ctx);
54 static inline uint32_t
55 l3_ptype(uint16_t etype, uint32_t ptype)
57 ptype = ptype & ~RTE_PTYPE_L3_MASK;
58 if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
59 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
60 else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
61 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
65 /* Callback for soft ptype parsing */
67 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs,
68 uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
70 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
71 struct rte_ether_hdr *eth_hdr;
72 uint16_t etype, n_left;
73 struct rte_mbuf **pkts;
77 RTE_SET_USED(max_pkts);
78 RTE_SET_USED(user_param);
82 while (n_left >= 12) {
84 /* Prefetch next-next mbufs */
85 rte_prefetch0(pkts[8]);
86 rte_prefetch0(pkts[9]);
87 rte_prefetch0(pkts[10]);
88 rte_prefetch0(pkts[11]);
90 /* Prefetch next mbuf data */
92 rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *));
94 rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *));
96 rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *));
98 rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *));
107 /* Extract ptype of mbuf0 */
108 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
109 etype = eth_hdr->ether_type;
110 mbuf0->packet_type = l3_ptype(etype, 0);
112 /* Extract ptype of mbuf1 */
113 eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *);
114 etype = eth_hdr->ether_type;
115 mbuf1->packet_type = l3_ptype(etype, 0);
117 /* Extract ptype of mbuf2 */
118 eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *);
119 etype = eth_hdr->ether_type;
120 mbuf2->packet_type = l3_ptype(etype, 0);
122 /* Extract ptype of mbuf3 */
123 eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *);
124 etype = eth_hdr->ether_type;
125 mbuf3->packet_type = l3_ptype(etype, 0);
134 /* Extract ptype of mbuf0 */
135 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
136 etype = eth_hdr->ether_type;
137 mbuf0->packet_type = l3_ptype(etype, 0);
143 #define MAX_PTYPES 16
145 ethdev_ptype_setup(uint16_t port, uint16_t queue)
147 uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
148 uint32_t ptypes[MAX_PTYPES];
151 /* Check IPv4 & IPv6 ptype support */
152 rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes,
154 for (i = 0; i < rc; i++) {
155 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
157 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
161 if (!l3_ipv4 || !l3_ipv6) {
162 node_info("ethdev_rx",
163 "Enabling ptype callback for required ptypes on port %u\n",
166 if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
168 node_err("ethdev_rx",
169 "Failed to add rx ptype cb: port=%d, queue=%d\n",
179 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
181 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
182 ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
187 if (elem->nid == node->id) {
188 /* Update node specific context */
189 memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
195 RTE_VERIFY(elem != NULL);
197 ctx->cls_next = ETHDEV_RX_NEXT_PKT_CLS;
199 /* Check and setup ptype */
200 return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
203 struct ethdev_rx_node_main *
204 ethdev_rx_get_node_data_get(void)
206 return ðdev_rx_main;
209 static struct rte_node_register ethdev_rx_node_base = {
210 .process = ethdev_rx_node_process,
211 .flags = RTE_NODE_SOURCE_F,
214 .init = ethdev_rx_node_init,
216 .nb_edges = ETHDEV_RX_NEXT_MAX,
218 /* Default pkt classification node */
219 [ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls",
220 [ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup",
224 struct rte_node_register *
225 ethdev_rx_node_get(void)
227 return ðdev_rx_node_base;
230 RTE_NODE_REGISTER(ethdev_rx_node_base);