1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
16 #include <sys/socket.h>
17 #include <arpa/inet.h>
19 #include <rte_debug.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev.h>
22 #include <rte_cycles.h>
31 #include "l3fwd_event.h"
33 #include "l3fwd_route.h"
35 #define IPV4_L3FWD_LPM_MAX_RULES 1024
36 #define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
37 #define IPV6_L3FWD_LPM_MAX_RULES 1024
38 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
40 static struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
41 static struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
43 /* Performing LPM-based lookups. 8< */
44 static inline uint16_t
45 lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr *ipv4_hdr,
47 struct rte_lpm *ipv4_l3fwd_lookup_struct)
49 uint32_t dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
52 if (rte_lpm_lookup(ipv4_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
57 /* >8 End of performing LPM-based lookups. */
59 static inline uint16_t
60 lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
62 struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
64 const uint8_t *dst_ip = ipv6_hdr->dst_addr;
67 if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
73 static __rte_always_inline uint16_t
74 lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
77 struct rte_ipv6_hdr *ipv6_hdr;
78 struct rte_ipv4_hdr *ipv4_hdr;
79 struct rte_ether_hdr *eth_hdr;
81 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
83 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
84 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
86 return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
87 qconf->ipv4_lookup_struct);
88 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
90 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
91 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
93 return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
94 qconf->ipv6_lookup_struct);
101 * lpm_get_dst_port optimized routine for packets where dst_ipv4 is already
102 * precalculated. If packet is ipv6 dst_addr is taken directly from packet
103 * header and dst_ipv4 value is not used.
105 static __rte_always_inline uint16_t
106 lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
107 uint32_t dst_ipv4, uint16_t portid)
110 struct rte_ipv6_hdr *ipv6_hdr;
111 struct rte_ether_hdr *eth_hdr;
113 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
114 return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
115 dst_ipv4, &next_hop) == 0)
116 ? next_hop : portid);
118 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
120 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
121 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
123 return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
124 ipv6_hdr->dst_addr, &next_hop) == 0)
125 ? next_hop : portid);
132 #if defined(RTE_ARCH_X86)
133 #include "l3fwd_lpm_sse.h"
134 #elif defined __ARM_NEON
135 #include "l3fwd_lpm_neon.h"
136 #elif defined(RTE_ARCH_PPC_64)
137 #include "l3fwd_lpm_altivec.h"
139 #include "l3fwd_lpm.h"
142 /* main processing loop */
144 lpm_main_loop(__rte_unused void *dummy)
146 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
148 uint64_t prev_tsc, diff_tsc, cur_tsc;
152 struct lcore_conf *qconf;
153 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
154 US_PER_S * BURST_TX_DRAIN_US;
156 lcore_id = rte_lcore_id();
157 qconf = &lcore_conf[lcore_id];
159 const uint16_t n_rx_q = qconf->n_rx_queue;
160 const uint16_t n_tx_p = qconf->n_tx_port;
162 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
166 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
168 for (i = 0; i < n_rx_q; i++) {
170 portid = qconf->rx_queue_list[i].port_id;
171 queueid = qconf->rx_queue_list[i].queue_id;
173 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
174 lcore_id, portid, queueid);
177 cur_tsc = rte_rdtsc();
180 while (!force_quit) {
183 * TX burst queue drain
185 diff_tsc = cur_tsc - prev_tsc;
186 if (unlikely(diff_tsc > drain_tsc)) {
188 for (i = 0; i < n_tx_p; ++i) {
189 portid = qconf->tx_port_id[i];
190 if (qconf->tx_mbufs[portid].len == 0)
193 qconf->tx_mbufs[portid].len,
195 qconf->tx_mbufs[portid].len = 0;
202 * Read packet from RX queues
204 for (i = 0; i < n_rx_q; ++i) {
205 portid = qconf->rx_queue_list[i].port_id;
206 queueid = qconf->rx_queue_list[i].queue_id;
207 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
212 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
213 || defined RTE_ARCH_PPC_64
214 l3fwd_lpm_send_packets(nb_rx, pkts_burst,
217 l3fwd_lpm_no_opt_send_packets(nb_rx, pkts_burst,
222 cur_tsc = rte_rdtsc();
228 static __rte_always_inline uint16_t
229 lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf)
231 mbuf->port = lpm_get_dst_port(lconf, mbuf, mbuf->port);
233 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
234 || defined RTE_ARCH_PPC_64
235 process_packet(mbuf, &mbuf->port);
238 struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf,
239 struct rte_ether_hdr *);
240 #ifdef DO_RFC_1812_CHECKS
241 struct rte_ipv4_hdr *ipv4_hdr;
242 if (RTE_ETH_IS_IPV4_HDR(mbuf->packet_type)) {
243 /* Handle IPv4 headers.*/
244 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf,
245 struct rte_ipv4_hdr *,
246 sizeof(struct rte_ether_hdr));
248 if (is_valid_ipv4_pkt(ipv4_hdr, mbuf->pkt_len)
250 mbuf->port = BAD_PORT;
253 /* Update time to live and header checksum */
254 --(ipv4_hdr->time_to_live);
255 ++(ipv4_hdr->hdr_checksum);
259 *(uint64_t *)ð_hdr->dst_addr = dest_eth_addr[mbuf->port];
262 rte_ether_addr_copy(&ports_eth_addr[mbuf->port],
268 static __rte_always_inline void
269 lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
272 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
273 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
274 evt_rsrc->evq.nb_queues - 1];
275 const uint8_t event_d_id = evt_rsrc->event_d_id;
276 struct lcore_conf *lconf;
277 unsigned int lcore_id;
283 lcore_id = rte_lcore_id();
284 lconf = &lcore_conf[lcore_id];
286 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
287 while (!force_quit) {
288 if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
291 if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
292 rte_pktmbuf_free(ev.mbuf);
296 if (flags & L3FWD_EVENT_TX_ENQ) {
297 ev.queue_id = tx_q_id;
298 ev.op = RTE_EVENT_OP_FORWARD;
299 while (rte_event_enqueue_burst(event_d_id, event_p_id,
300 &ev, 1) && !force_quit)
304 if (flags & L3FWD_EVENT_TX_DIRECT) {
305 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
306 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
307 event_p_id, &ev, 1, 0) &&
314 static __rte_always_inline void
315 lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
318 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
319 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
320 evt_rsrc->evq.nb_queues - 1];
321 const uint8_t event_d_id = evt_rsrc->event_d_id;
322 const uint16_t deq_len = evt_rsrc->deq_depth;
323 struct rte_event events[MAX_PKT_BURST];
324 struct lcore_conf *lconf;
325 unsigned int lcore_id;
326 int i, nb_enq, nb_deq;
331 lcore_id = rte_lcore_id();
333 lconf = &lcore_conf[lcore_id];
335 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
337 while (!force_quit) {
338 /* Read events from RX queues */
339 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
346 for (i = 0; i < nb_deq; i++) {
347 if (flags & L3FWD_EVENT_TX_ENQ) {
348 events[i].queue_id = tx_q_id;
349 events[i].op = RTE_EVENT_OP_FORWARD;
352 if (flags & L3FWD_EVENT_TX_DIRECT)
353 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
356 lpm_process_event_pkt(lconf, events[i].mbuf);
359 if (flags & L3FWD_EVENT_TX_ENQ) {
360 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
362 while (nb_enq < nb_deq && !force_quit)
363 nb_enq += rte_event_enqueue_burst(event_d_id,
364 event_p_id, events + nb_enq,
368 if (flags & L3FWD_EVENT_TX_DIRECT) {
369 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
370 event_p_id, events, nb_deq, 0);
371 while (nb_enq < nb_deq && !force_quit)
372 nb_enq += rte_event_eth_tx_adapter_enqueue(
373 event_d_id, event_p_id,
380 static __rte_always_inline void
381 lpm_event_loop(struct l3fwd_event_resources *evt_rsrc,
384 if (flags & L3FWD_EVENT_SINGLE)
385 lpm_event_loop_single(evt_rsrc, flags);
386 if (flags & L3FWD_EVENT_BURST)
387 lpm_event_loop_burst(evt_rsrc, flags);
391 lpm_event_main_loop_tx_d(__rte_unused void *dummy)
393 struct l3fwd_event_resources *evt_rsrc =
394 l3fwd_get_eventdev_rsrc();
396 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
401 lpm_event_main_loop_tx_d_burst(__rte_unused void *dummy)
403 struct l3fwd_event_resources *evt_rsrc =
404 l3fwd_get_eventdev_rsrc();
406 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
411 lpm_event_main_loop_tx_q(__rte_unused void *dummy)
413 struct l3fwd_event_resources *evt_rsrc =
414 l3fwd_get_eventdev_rsrc();
416 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
421 lpm_event_main_loop_tx_q_burst(__rte_unused void *dummy)
423 struct l3fwd_event_resources *evt_rsrc =
424 l3fwd_get_eventdev_rsrc();
426 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
430 static __rte_always_inline void
431 lpm_process_event_vector(struct rte_event_vector *vec, struct lcore_conf *lconf)
433 struct rte_mbuf **mbufs = vec->mbufs;
436 /* Process first packet to init vector attributes */
437 lpm_process_event_pkt(lconf, mbufs[0]);
438 if (vec->attr_valid) {
439 if (mbufs[0]->port != BAD_PORT)
440 vec->port = mbufs[0]->port;
445 for (i = 1; i < vec->nb_elem; i++) {
446 lpm_process_event_pkt(lconf, mbufs[i]);
447 event_vector_attr_validate(vec, mbufs[i]);
451 /* Same eventdev loop for single and burst of vector */
452 static __rte_always_inline void
453 lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
456 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
457 const uint8_t tx_q_id =
458 evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
459 const uint8_t event_d_id = evt_rsrc->event_d_id;
460 const uint16_t deq_len = evt_rsrc->deq_depth;
461 struct rte_event events[MAX_PKT_BURST];
462 struct lcore_conf *lconf;
463 unsigned int lcore_id;
464 int i, nb_enq, nb_deq;
469 lcore_id = rte_lcore_id();
470 lconf = &lcore_conf[lcore_id];
472 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
474 while (!force_quit) {
475 /* Read events from RX queues */
476 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
483 for (i = 0; i < nb_deq; i++) {
484 if (flags & L3FWD_EVENT_TX_ENQ) {
485 events[i].queue_id = tx_q_id;
486 events[i].op = RTE_EVENT_OP_FORWARD;
489 lpm_process_event_vector(events[i].vec, lconf);
491 if (flags & L3FWD_EVENT_TX_DIRECT)
492 event_vector_txq_set(events[i].vec, 0);
495 if (flags & L3FWD_EVENT_TX_ENQ) {
496 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
498 while (nb_enq < nb_deq && !force_quit)
499 nb_enq += rte_event_enqueue_burst(
500 event_d_id, event_p_id, events + nb_enq,
504 if (flags & L3FWD_EVENT_TX_DIRECT) {
505 nb_enq = rte_event_eth_tx_adapter_enqueue(
506 event_d_id, event_p_id, events, nb_deq, 0);
507 while (nb_enq < nb_deq && !force_quit)
508 nb_enq += rte_event_eth_tx_adapter_enqueue(
509 event_d_id, event_p_id, events + nb_enq,
516 lpm_event_main_loop_tx_d_vector(__rte_unused void *dummy)
518 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
520 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
525 lpm_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
527 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
529 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
534 lpm_event_main_loop_tx_q_vector(__rte_unused void *dummy)
536 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
538 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
543 lpm_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
545 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
547 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
552 setup_lpm(const int socketid)
554 struct rte_lpm6_config config;
555 struct rte_lpm_config config_ipv4;
559 char abuf[INET6_ADDRSTRLEN];
561 /* create the LPM table */
562 config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
563 config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
564 config_ipv4.flags = 0;
565 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
566 ipv4_l3fwd_lpm_lookup_struct[socketid] =
567 rte_lpm_create(s, socketid, &config_ipv4);
568 if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL)
569 rte_exit(EXIT_FAILURE,
570 "Unable to create the l3fwd LPM table on socket %d\n",
573 /* populate the LPM table */
574 for (i = 0; i < RTE_DIM(ipv4_l3fwd_route_array); i++) {
577 /* skip unused ports */
578 if ((1 << ipv4_l3fwd_route_array[i].if_out &
579 enabled_port_mask) == 0)
582 ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid],
583 ipv4_l3fwd_route_array[i].ip,
584 ipv4_l3fwd_route_array[i].depth,
585 ipv4_l3fwd_route_array[i].if_out);
588 rte_exit(EXIT_FAILURE,
589 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
593 in.s_addr = htonl(ipv4_l3fwd_route_array[i].ip);
594 printf("LPM: Adding route %s / %d (%d)\n",
595 inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
596 ipv4_l3fwd_route_array[i].depth,
597 ipv4_l3fwd_route_array[i].if_out);
600 /* create the LPM6 table */
601 snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
603 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
604 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
606 ipv6_l3fwd_lpm_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
608 if (ipv6_l3fwd_lpm_lookup_struct[socketid] == NULL)
609 rte_exit(EXIT_FAILURE,
610 "Unable to create the l3fwd LPM table on socket %d\n",
613 /* populate the LPM table */
614 for (i = 0; i < RTE_DIM(ipv6_l3fwd_route_array); i++) {
616 /* skip unused ports */
617 if ((1 << ipv6_l3fwd_route_array[i].if_out &
618 enabled_port_mask) == 0)
621 ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
622 ipv6_l3fwd_route_array[i].ip,
623 ipv6_l3fwd_route_array[i].depth,
624 ipv6_l3fwd_route_array[i].if_out);
627 rte_exit(EXIT_FAILURE,
628 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
632 printf("LPM: Adding route %s / %d (%d)\n",
633 inet_ntop(AF_INET6, ipv6_l3fwd_route_array[i].ip,
635 ipv6_l3fwd_route_array[i].depth,
636 ipv6_l3fwd_route_array[i].if_out);
641 lpm_check_ptype(int portid)
644 int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
645 uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
647 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
651 uint32_t ptypes[ret];
653 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
654 for (i = 0; i < ret; ++i) {
655 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
657 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
661 if (ptype_l3_ipv4 == 0)
662 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
664 if (ptype_l3_ipv6 == 0)
665 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
667 if (ptype_l3_ipv4 && ptype_l3_ipv6)
675 lpm_parse_ptype(struct rte_mbuf *m)
677 struct rte_ether_hdr *eth_hdr;
678 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
681 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
682 ether_type = eth_hdr->ether_type;
683 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
684 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
685 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
686 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
688 m->packet_type = packet_type;
692 lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
693 struct rte_mbuf *pkts[], uint16_t nb_pkts,
694 uint16_t max_pkts __rte_unused,
695 void *user_param __rte_unused)
699 if (unlikely(nb_pkts == 0))
701 rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
702 for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
703 rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
704 struct ether_hdr *));
705 lpm_parse_ptype(pkts[i]);
707 lpm_parse_ptype(pkts[i]);
712 /* Return ipv4/ipv6 lpm fwd lookup struct. */
714 lpm_get_ipv4_l3fwd_lookup_struct(const int socketid)
716 return ipv4_l3fwd_lpm_lookup_struct[socketid];
720 lpm_get_ipv6_l3fwd_lookup_struct(const int socketid)
722 return ipv6_l3fwd_lpm_lookup_struct[socketid];