1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
6 #include <rte_ethdev.h>
9 #include <rte_graph_worker.h>
12 #include "ethdev_rx_priv.h"
13 #include "node_private.h"
15 static struct ethdev_rx_node_main ethdev_rx_main;
17 static __rte_always_inline uint16_t
18 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
19 ethdev_rx_node_ctx_t *ctx)
21 uint16_t count, next_index;
25 queue = ctx->queue_id;
26 next_index = ctx->cls_next;
28 /* Get pkts from port */
29 count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
30 RTE_GRAPH_BURST_SIZE);
35 /* Enqueue to next node */
36 rte_node_next_stream_move(graph, node, next_index);
41 static __rte_always_inline uint16_t
42 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
43 void **objs, uint16_t cnt)
45 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
51 n_pkts = ethdev_rx_node_process_inline(graph, node, ctx);
55 static inline uint32_t
56 l3_ptype(uint16_t etype, uint32_t ptype)
58 ptype = ptype & ~RTE_PTYPE_L3_MASK;
59 if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
60 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
61 else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
62 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
66 /* Callback for soft ptype parsing */
68 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs,
69 uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
71 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
72 struct rte_ether_hdr *eth_hdr;
73 uint16_t etype, n_left;
74 struct rte_mbuf **pkts;
78 RTE_SET_USED(max_pkts);
79 RTE_SET_USED(user_param);
83 while (n_left >= 12) {
85 /* Prefetch next-next mbufs */
86 rte_prefetch0(pkts[8]);
87 rte_prefetch0(pkts[9]);
88 rte_prefetch0(pkts[10]);
89 rte_prefetch0(pkts[11]);
91 /* Prefetch next mbuf data */
93 rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *));
95 rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *));
97 rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *));
99 rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *));
108 /* Extract ptype of mbuf0 */
109 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
110 etype = eth_hdr->ether_type;
111 mbuf0->packet_type = l3_ptype(etype, 0);
113 /* Extract ptype of mbuf1 */
114 eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *);
115 etype = eth_hdr->ether_type;
116 mbuf1->packet_type = l3_ptype(etype, 0);
118 /* Extract ptype of mbuf2 */
119 eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *);
120 etype = eth_hdr->ether_type;
121 mbuf2->packet_type = l3_ptype(etype, 0);
123 /* Extract ptype of mbuf3 */
124 eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *);
125 etype = eth_hdr->ether_type;
126 mbuf3->packet_type = l3_ptype(etype, 0);
135 /* Extract ptype of mbuf0 */
136 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
137 etype = eth_hdr->ether_type;
138 mbuf0->packet_type = l3_ptype(etype, 0);
144 #define MAX_PTYPES 16
146 ethdev_ptype_setup(uint16_t port, uint16_t queue)
148 uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
149 uint32_t ptypes[MAX_PTYPES];
152 /* Check IPv4 & IPv6 ptype support */
153 rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes,
155 for (i = 0; i < rc; i++) {
156 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
158 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
162 if (!l3_ipv4 || !l3_ipv6) {
163 node_info("ethdev_rx",
164 "Enabling ptype callback for required ptypes on port %u\n",
167 if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
169 node_err("ethdev_rx",
170 "Failed to add rx ptype cb: port=%d, queue=%d\n",
180 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
182 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
183 ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
188 if (elem->nid == node->id) {
189 /* Update node specific context */
190 memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
196 RTE_VERIFY(elem != NULL);
198 ctx->cls_next = ETHDEV_RX_NEXT_PKT_CLS;
200 /* Check and setup ptype */
201 return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
204 struct ethdev_rx_node_main *
205 ethdev_rx_get_node_data_get(void)
207 return ðdev_rx_main;
210 static struct rte_node_register ethdev_rx_node_base = {
211 .process = ethdev_rx_node_process,
212 .flags = RTE_NODE_SOURCE_F,
215 .init = ethdev_rx_node_init,
217 .nb_edges = ETHDEV_RX_NEXT_MAX,
219 /* Default pkt classification node */
220 [ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls",
221 [ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup",
225 struct rte_node_register *
226 ethdev_rx_node_get(void)
228 return ðdev_rx_node_base;
231 RTE_NODE_REGISTER(ethdev_rx_node_base);