5cc73659835468bd92c111c98da427a6febcb2da
[dpdk.git] / lib / librte_node / ethdev_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_debug.h>
6 #include <rte_ethdev.h>
7 #include <rte_ether.h>
8 #include <rte_graph.h>
9 #include <rte_graph_worker.h>
10 #include <rte_mbuf.h>
11
12 #include "ethdev_rx_priv.h"
13 #include "node_private.h"
14
15 static struct ethdev_rx_node_main ethdev_rx_main;
16
17 static __rte_always_inline uint16_t
18 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
19                               uint16_t port, uint16_t queue)
20 {
21         uint16_t count, next_index = ETHDEV_RX_NEXT_IP4_LOOKUP;
22
23         /* Get pkts from port */
24         count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
25                                  RTE_GRAPH_BURST_SIZE);
26
27         if (!count)
28                 return 0;
29         node->idx = count;
30         /* Enqueue to next node */
31         rte_node_next_stream_move(graph, node, next_index);
32
33         return count;
34 }
35
36 static __rte_always_inline uint16_t
37 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
38                        void **objs, uint16_t cnt)
39 {
40         ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
41         uint16_t n_pkts = 0;
42
43         RTE_SET_USED(objs);
44         RTE_SET_USED(cnt);
45
46         n_pkts = ethdev_rx_node_process_inline(graph, node, ctx->port_id,
47                                                ctx->queue_id);
48         return n_pkts;
49 }
50
51 static inline uint32_t
52 l3_ptype(uint16_t etype, uint32_t ptype)
53 {
54         ptype = ptype & ~RTE_PTYPE_L3_MASK;
55         if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
56                 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
57         else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
58                 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
59         return ptype;
60 }
61
62 /* Callback for soft ptype parsing */
63 static uint16_t
64 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs,
65                  uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
66 {
67         struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
68         struct rte_ether_hdr *eth_hdr;
69         uint16_t etype, n_left;
70         struct rte_mbuf **pkts;
71
72         RTE_SET_USED(port);
73         RTE_SET_USED(queue);
74         RTE_SET_USED(max_pkts);
75         RTE_SET_USED(user_param);
76
77         pkts = mbufs;
78         n_left = nb_pkts;
79         while (n_left >= 12) {
80
81                 /* Prefetch next-next mbufs */
82                 rte_prefetch0(pkts[8]);
83                 rte_prefetch0(pkts[9]);
84                 rte_prefetch0(pkts[10]);
85                 rte_prefetch0(pkts[11]);
86
87                 /* Prefetch next mbuf data */
88                 rte_prefetch0(
89                         rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *));
90                 rte_prefetch0(
91                         rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *));
92                 rte_prefetch0(
93                         rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *));
94                 rte_prefetch0(
95                         rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *));
96
97                 mbuf0 = pkts[0];
98                 mbuf1 = pkts[1];
99                 mbuf2 = pkts[2];
100                 mbuf3 = pkts[3];
101                 pkts += 4;
102                 n_left -= 4;
103
104                 /* Extract ptype of mbuf0 */
105                 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
106                 etype = eth_hdr->ether_type;
107                 mbuf0->packet_type = l3_ptype(etype, 0);
108
109                 /* Extract ptype of mbuf1 */
110                 eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *);
111                 etype = eth_hdr->ether_type;
112                 mbuf1->packet_type = l3_ptype(etype, 0);
113
114                 /* Extract ptype of mbuf2 */
115                 eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *);
116                 etype = eth_hdr->ether_type;
117                 mbuf2->packet_type = l3_ptype(etype, 0);
118
119                 /* Extract ptype of mbuf3 */
120                 eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *);
121                 etype = eth_hdr->ether_type;
122                 mbuf3->packet_type = l3_ptype(etype, 0);
123         }
124
125         while (n_left > 0) {
126                 mbuf0 = pkts[0];
127
128                 pkts += 1;
129                 n_left -= 1;
130
131                 /* Extract ptype of mbuf0 */
132                 eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
133                 etype = eth_hdr->ether_type;
134                 mbuf0->packet_type = l3_ptype(etype, 0);
135         }
136
137         return nb_pkts;
138 }
139
140 #define MAX_PTYPES 16
141 static int
142 ethdev_ptype_setup(uint16_t port, uint16_t queue)
143 {
144         uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
145         uint32_t ptypes[MAX_PTYPES];
146         int i, rc;
147
148         /* Check IPv4 & IPv6 ptype support */
149         rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes,
150                                               MAX_PTYPES);
151         for (i = 0; i < rc; i++) {
152                 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
153                         l3_ipv4 = 1;
154                 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
155                         l3_ipv6 = 1;
156         }
157
158         if (!l3_ipv4 || !l3_ipv6) {
159                 node_info("ethdev_rx",
160                           "Enabling ptype callback for required ptypes on port %u\n",
161                           port);
162
163                 if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
164                                              NULL)) {
165                         node_err("ethdev_rx",
166                                  "Failed to add rx ptype cb: port=%d, queue=%d\n",
167                                  port, queue);
168                         return -EINVAL;
169                 }
170         }
171
172         return 0;
173 }
174
175 static int
176 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
177 {
178         ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
179         ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
180
181         RTE_SET_USED(graph);
182
183         while (elem) {
184                 if (elem->nid == node->id) {
185                         /* Update node specific context */
186                         memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
187                         break;
188                 }
189                 elem = elem->next;
190         }
191
192         RTE_VERIFY(elem != NULL);
193
194         /* Check and setup ptype */
195         return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
196 }
197
198 struct ethdev_rx_node_main *
199 ethdev_rx_get_node_data_get(void)
200 {
201         return &ethdev_rx_main;
202 }
203
204 static struct rte_node_register ethdev_rx_node_base = {
205         .process = ethdev_rx_node_process,
206         .flags = RTE_NODE_SOURCE_F,
207         .name = "ethdev_rx",
208
209         .init = ethdev_rx_node_init,
210
211         .nb_edges = ETHDEV_RX_NEXT_MAX,
212         .next_nodes = {[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup"},
213 };
214
215 struct rte_node_register *
216 ethdev_rx_node_get(void)
217 {
218         return &ethdev_rx_node_base;
219 }
220
221 RTE_NODE_REGISTER(ethdev_rx_node_base);