1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
16 #include <sys/socket.h>
17 #include <arpa/inet.h>
19 #include <rte_debug.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev.h>
22 #include <rte_cycles.h>
31 #include "l3fwd_event.h"
33 #include "l3fwd_route.h"
35 #define IPV4_L3FWD_LPM_MAX_RULES 1024
36 #define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
37 #define IPV6_L3FWD_LPM_MAX_RULES 1024
38 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
40 static struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
41 static struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
43 static inline uint16_t
44 lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr *ipv4_hdr,
46 struct rte_lpm *ipv4_l3fwd_lookup_struct)
48 uint32_t dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
51 if (rte_lpm_lookup(ipv4_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
57 static inline uint16_t
58 lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
60 struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
62 const uint8_t *dst_ip = ipv6_hdr->dst_addr;
65 if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
71 static __rte_always_inline uint16_t
72 lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
75 struct rte_ipv6_hdr *ipv6_hdr;
76 struct rte_ipv4_hdr *ipv4_hdr;
77 struct rte_ether_hdr *eth_hdr;
79 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
81 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
82 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
84 return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
85 qconf->ipv4_lookup_struct);
86 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
88 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
89 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
91 return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
92 qconf->ipv6_lookup_struct);
99 * lpm_get_dst_port optimized routine for packets where dst_ipv4 is already
100 * precalculated. If packet is ipv6 dst_addr is taken directly from packet
101 * header and dst_ipv4 value is not used.
103 static __rte_always_inline uint16_t
104 lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
105 uint32_t dst_ipv4, uint16_t portid)
108 struct rte_ipv6_hdr *ipv6_hdr;
109 struct rte_ether_hdr *eth_hdr;
111 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
112 return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
113 dst_ipv4, &next_hop) == 0)
114 ? next_hop : portid);
116 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
118 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
119 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
121 return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
122 ipv6_hdr->dst_addr, &next_hop) == 0)
123 ? next_hop : portid);
130 #if defined(RTE_ARCH_X86)
131 #include "l3fwd_lpm_sse.h"
132 #elif defined __ARM_NEON
133 #include "l3fwd_lpm_neon.h"
134 #elif defined(RTE_ARCH_PPC_64)
135 #include "l3fwd_lpm_altivec.h"
137 #include "l3fwd_lpm.h"
140 /* main processing loop */
142 lpm_main_loop(__rte_unused void *dummy)
144 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
146 uint64_t prev_tsc, diff_tsc, cur_tsc;
150 struct lcore_conf *qconf;
151 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
152 US_PER_S * BURST_TX_DRAIN_US;
154 lcore_id = rte_lcore_id();
155 qconf = &lcore_conf[lcore_id];
157 const uint16_t n_rx_q = qconf->n_rx_queue;
158 const uint16_t n_tx_p = qconf->n_tx_port;
160 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
164 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
166 for (i = 0; i < n_rx_q; i++) {
168 portid = qconf->rx_queue_list[i].port_id;
169 queueid = qconf->rx_queue_list[i].queue_id;
171 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
172 lcore_id, portid, queueid);
175 cur_tsc = rte_rdtsc();
178 while (!force_quit) {
181 * TX burst queue drain
183 diff_tsc = cur_tsc - prev_tsc;
184 if (unlikely(diff_tsc > drain_tsc)) {
186 for (i = 0; i < n_tx_p; ++i) {
187 portid = qconf->tx_port_id[i];
188 if (qconf->tx_mbufs[portid].len == 0)
191 qconf->tx_mbufs[portid].len,
193 qconf->tx_mbufs[portid].len = 0;
200 * Read packet from RX queues
202 for (i = 0; i < n_rx_q; ++i) {
203 portid = qconf->rx_queue_list[i].port_id;
204 queueid = qconf->rx_queue_list[i].queue_id;
205 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
210 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
211 || defined RTE_ARCH_PPC_64
212 l3fwd_lpm_send_packets(nb_rx, pkts_burst,
215 l3fwd_lpm_no_opt_send_packets(nb_rx, pkts_burst,
220 cur_tsc = rte_rdtsc();
226 static __rte_always_inline uint16_t
227 lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf)
229 mbuf->port = lpm_get_dst_port(lconf, mbuf, mbuf->port);
231 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
232 || defined RTE_ARCH_PPC_64
233 process_packet(mbuf, &mbuf->port);
236 struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf,
237 struct rte_ether_hdr *);
238 #ifdef DO_RFC_1812_CHECKS
239 struct rte_ipv4_hdr *ipv4_hdr;
240 if (RTE_ETH_IS_IPV4_HDR(mbuf->packet_type)) {
241 /* Handle IPv4 headers.*/
242 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf,
243 struct rte_ipv4_hdr *,
244 sizeof(struct rte_ether_hdr));
246 if (is_valid_ipv4_pkt(ipv4_hdr, mbuf->pkt_len)
248 mbuf->port = BAD_PORT;
251 /* Update time to live and header checksum */
252 --(ipv4_hdr->time_to_live);
253 ++(ipv4_hdr->hdr_checksum);
257 *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[mbuf->port];
260 rte_ether_addr_copy(&ports_eth_addr[mbuf->port],
266 static __rte_always_inline void
267 lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
270 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
271 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
272 evt_rsrc->evq.nb_queues - 1];
273 const uint8_t event_d_id = evt_rsrc->event_d_id;
274 struct lcore_conf *lconf;
275 unsigned int lcore_id;
281 lcore_id = rte_lcore_id();
282 lconf = &lcore_conf[lcore_id];
284 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
285 while (!force_quit) {
286 if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
289 if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
290 rte_pktmbuf_free(ev.mbuf);
294 if (flags & L3FWD_EVENT_TX_ENQ) {
295 ev.queue_id = tx_q_id;
296 ev.op = RTE_EVENT_OP_FORWARD;
297 while (rte_event_enqueue_burst(event_d_id, event_p_id,
298 &ev, 1) && !force_quit)
302 if (flags & L3FWD_EVENT_TX_DIRECT) {
303 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
304 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
305 event_p_id, &ev, 1, 0) &&
312 static __rte_always_inline void
313 lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
316 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
317 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
318 evt_rsrc->evq.nb_queues - 1];
319 const uint8_t event_d_id = evt_rsrc->event_d_id;
320 const uint16_t deq_len = evt_rsrc->deq_depth;
321 struct rte_event events[MAX_PKT_BURST];
322 struct lcore_conf *lconf;
323 unsigned int lcore_id;
324 int i, nb_enq, nb_deq;
329 lcore_id = rte_lcore_id();
331 lconf = &lcore_conf[lcore_id];
333 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
335 while (!force_quit) {
336 /* Read events from RX queues */
337 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
344 for (i = 0; i < nb_deq; i++) {
345 if (flags & L3FWD_EVENT_TX_ENQ) {
346 events[i].queue_id = tx_q_id;
347 events[i].op = RTE_EVENT_OP_FORWARD;
350 if (flags & L3FWD_EVENT_TX_DIRECT)
351 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
354 lpm_process_event_pkt(lconf, events[i].mbuf);
357 if (flags & L3FWD_EVENT_TX_ENQ) {
358 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
360 while (nb_enq < nb_deq && !force_quit)
361 nb_enq += rte_event_enqueue_burst(event_d_id,
362 event_p_id, events + nb_enq,
366 if (flags & L3FWD_EVENT_TX_DIRECT) {
367 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
368 event_p_id, events, nb_deq, 0);
369 while (nb_enq < nb_deq && !force_quit)
370 nb_enq += rte_event_eth_tx_adapter_enqueue(
371 event_d_id, event_p_id,
378 static __rte_always_inline void
379 lpm_event_loop(struct l3fwd_event_resources *evt_rsrc,
382 if (flags & L3FWD_EVENT_SINGLE)
383 lpm_event_loop_single(evt_rsrc, flags);
384 if (flags & L3FWD_EVENT_BURST)
385 lpm_event_loop_burst(evt_rsrc, flags);
389 lpm_event_main_loop_tx_d(__rte_unused void *dummy)
391 struct l3fwd_event_resources *evt_rsrc =
392 l3fwd_get_eventdev_rsrc();
394 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
399 lpm_event_main_loop_tx_d_burst(__rte_unused void *dummy)
401 struct l3fwd_event_resources *evt_rsrc =
402 l3fwd_get_eventdev_rsrc();
404 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
409 lpm_event_main_loop_tx_q(__rte_unused void *dummy)
411 struct l3fwd_event_resources *evt_rsrc =
412 l3fwd_get_eventdev_rsrc();
414 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
419 lpm_event_main_loop_tx_q_burst(__rte_unused void *dummy)
421 struct l3fwd_event_resources *evt_rsrc =
422 l3fwd_get_eventdev_rsrc();
424 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
429 setup_lpm(const int socketid)
431 struct rte_lpm6_config config;
432 struct rte_lpm_config config_ipv4;
436 char abuf[INET6_ADDRSTRLEN];
438 /* create the LPM table */
439 config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
440 config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
441 config_ipv4.flags = 0;
442 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
443 ipv4_l3fwd_lpm_lookup_struct[socketid] =
444 rte_lpm_create(s, socketid, &config_ipv4);
445 if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL)
446 rte_exit(EXIT_FAILURE,
447 "Unable to create the l3fwd LPM table on socket %d\n",
450 /* populate the LPM table */
451 for (i = 0; i < RTE_DIM(ipv4_l3fwd_route_array); i++) {
454 /* skip unused ports */
455 if ((1 << ipv4_l3fwd_route_array[i].if_out &
456 enabled_port_mask) == 0)
459 ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid],
460 ipv4_l3fwd_route_array[i].ip,
461 ipv4_l3fwd_route_array[i].depth,
462 ipv4_l3fwd_route_array[i].if_out);
465 rte_exit(EXIT_FAILURE,
466 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
470 in.s_addr = htonl(ipv4_l3fwd_route_array[i].ip);
471 printf("LPM: Adding route %s / %d (%d)\n",
472 inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
473 ipv4_l3fwd_route_array[i].depth,
474 ipv4_l3fwd_route_array[i].if_out);
477 /* create the LPM6 table */
478 snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
480 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
481 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
483 ipv6_l3fwd_lpm_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
485 if (ipv6_l3fwd_lpm_lookup_struct[socketid] == NULL)
486 rte_exit(EXIT_FAILURE,
487 "Unable to create the l3fwd LPM table on socket %d\n",
490 /* populate the LPM table */
491 for (i = 0; i < RTE_DIM(ipv6_l3fwd_route_array); i++) {
493 /* skip unused ports */
494 if ((1 << ipv6_l3fwd_route_array[i].if_out &
495 enabled_port_mask) == 0)
498 ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
499 ipv6_l3fwd_route_array[i].ip,
500 ipv6_l3fwd_route_array[i].depth,
501 ipv6_l3fwd_route_array[i].if_out);
504 rte_exit(EXIT_FAILURE,
505 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
509 printf("LPM: Adding route %s / %d (%d)\n",
510 inet_ntop(AF_INET6, ipv6_l3fwd_route_array[i].ip,
512 ipv6_l3fwd_route_array[i].depth,
513 ipv6_l3fwd_route_array[i].if_out);
518 lpm_check_ptype(int portid)
521 int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
522 uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
524 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
528 uint32_t ptypes[ret];
530 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
531 for (i = 0; i < ret; ++i) {
532 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
534 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
538 if (ptype_l3_ipv4 == 0)
539 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
541 if (ptype_l3_ipv6 == 0)
542 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
544 if (ptype_l3_ipv4 && ptype_l3_ipv6)
552 lpm_parse_ptype(struct rte_mbuf *m)
554 struct rte_ether_hdr *eth_hdr;
555 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
558 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
559 ether_type = eth_hdr->ether_type;
560 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
561 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
562 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
563 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
565 m->packet_type = packet_type;
569 lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
570 struct rte_mbuf *pkts[], uint16_t nb_pkts,
571 uint16_t max_pkts __rte_unused,
572 void *user_param __rte_unused)
576 if (unlikely(nb_pkts == 0))
578 rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
579 for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
580 rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
581 struct ether_hdr *));
582 lpm_parse_ptype(pkts[i]);
584 lpm_parse_ptype(pkts[i]);
589 /* Return ipv4/ipv6 lpm fwd lookup struct. */
591 lpm_get_ipv4_l3fwd_lookup_struct(const int socketid)
593 return ipv4_l3fwd_lpm_lookup_struct[socketid];
597 lpm_get_ipv6_l3fwd_lookup_struct(const int socketid)
599 return ipv6_l3fwd_lpm_lookup_struct[socketid];