1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
6 #include <rte_ethdev.h>
9 #include <rte_graph_worker.h>
12 #include "ethdev_rx_priv.h"
13 #include "node_private.h"
15 static struct ethdev_rx_node_main ethdev_rx_main;
17 static __rte_always_inline uint16_t
18 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
19 uint16_t port, uint16_t queue)
21 uint16_t count, next_index = ETHDEV_RX_NEXT_IP4_LOOKUP;
23 /* Get pkts from port */
24 count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
25 RTE_GRAPH_BURST_SIZE);
30 /* Enqueue to next node */
31 rte_node_next_stream_move(graph, node, next_index);
36 static __rte_always_inline uint16_t
37 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
38 void **objs, uint16_t cnt)
40 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
46 n_pkts = ethdev_rx_node_process_inline(graph, node, ctx->port_id,
51 static inline uint32_t
52 l3_ptype(uint16_t etype, uint32_t ptype)
54 ptype = ptype & ~RTE_PTYPE_L3_MASK;
55 if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
56 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
57 else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
58 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
62 /* Callback for soft ptype parsing */
64 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs,
65 uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
67 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
68 struct rte_ether_hdr *eth_hdr;
69 uint16_t etype, n_left;
70 struct rte_mbuf **pkts;
74 RTE_SET_USED(max_pkts);
75 RTE_SET_USED(user_param);
79 while (n_left >= 12) {
81 /* Prefetch next-next mbufs */
82 rte_prefetch0(pkts[8]);
83 rte_prefetch0(pkts[9]);
84 rte_prefetch0(pkts[10]);
85 rte_prefetch0(pkts[11]);
87 /* Prefetch next mbuf data */
89 rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *));
91 rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *));
93 rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *));
95 rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *));
104 /* Extract ptype of mbuf0 */
105 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
106 etype = eth_hdr->ether_type;
107 mbuf0->packet_type = l3_ptype(etype, 0);
109 /* Extract ptype of mbuf1 */
110 eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *);
111 etype = eth_hdr->ether_type;
112 mbuf1->packet_type = l3_ptype(etype, 0);
114 /* Extract ptype of mbuf2 */
115 eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *);
116 etype = eth_hdr->ether_type;
117 mbuf2->packet_type = l3_ptype(etype, 0);
119 /* Extract ptype of mbuf3 */
120 eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *);
121 etype = eth_hdr->ether_type;
122 mbuf3->packet_type = l3_ptype(etype, 0);
131 /* Extract ptype of mbuf0 */
132 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
133 etype = eth_hdr->ether_type;
134 mbuf0->packet_type = l3_ptype(etype, 0);
140 #define MAX_PTYPES 16
142 ethdev_ptype_setup(uint16_t port, uint16_t queue)
144 uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
145 uint32_t ptypes[MAX_PTYPES];
148 /* Check IPv4 & IPv6 ptype support */
149 rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes,
151 for (i = 0; i < rc; i++) {
152 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
154 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
158 if (!l3_ipv4 || !l3_ipv6) {
159 node_info("ethdev_rx",
160 "Enabling ptype callback for required ptypes on port %u\n",
163 if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
165 node_err("ethdev_rx",
166 "Failed to add rx ptype cb: port=%d, queue=%d\n",
176 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
178 ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
179 ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
184 if (elem->nid == node->id) {
185 /* Update node specific context */
186 memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
192 RTE_VERIFY(elem != NULL);
194 /* Check and setup ptype */
195 return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
198 struct ethdev_rx_node_main *
199 ethdev_rx_get_node_data_get(void)
201 return ðdev_rx_main;
204 static struct rte_node_register ethdev_rx_node_base = {
205 .process = ethdev_rx_node_process,
206 .flags = RTE_NODE_SOURCE_F,
209 .init = ethdev_rx_node_init,
211 .nb_edges = ETHDEV_RX_NEXT_MAX,
212 .next_nodes = {[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup"},
215 struct rte_node_register *
216 ethdev_rx_node_get(void)
218 return ðdev_rx_node_base;
221 RTE_NODE_REGISTER(ethdev_rx_node_base);