1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
16 #include <sys/socket.h>
17 #include <arpa/inet.h>
19 #include <rte_debug.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev.h>
22 #include <rte_cycles.h>
31 #include "l3fwd_event.h"
33 struct ipv4_l3fwd_lpm_route {
39 struct ipv6_l3fwd_lpm_route {
45 /* 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735). */
46 static struct ipv4_l3fwd_lpm_route ipv4_l3fwd_lpm_route_array[] = {
47 {RTE_IPV4(198, 18, 0, 0), 24, 0},
48 {RTE_IPV4(198, 18, 1, 0), 24, 1},
49 {RTE_IPV4(198, 18, 2, 0), 24, 2},
50 {RTE_IPV4(198, 18, 3, 0), 24, 3},
51 {RTE_IPV4(198, 18, 4, 0), 24, 4},
52 {RTE_IPV4(198, 18, 5, 0), 24, 5},
53 {RTE_IPV4(198, 18, 6, 0), 24, 6},
54 {RTE_IPV4(198, 18, 7, 0), 24, 7},
57 /* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */
58 static struct ipv6_l3fwd_lpm_route ipv6_l3fwd_lpm_route_array[] = {
59 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 0},
60 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 48, 1},
61 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0}, 48, 2},
62 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0}, 48, 3},
63 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0}, 48, 4},
64 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0}, 48, 5},
65 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0}, 48, 6},
66 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0}, 48, 7},
69 #define IPV4_L3FWD_LPM_NUM_ROUTES \
70 (sizeof(ipv4_l3fwd_lpm_route_array) / sizeof(ipv4_l3fwd_lpm_route_array[0]))
71 #define IPV6_L3FWD_LPM_NUM_ROUTES \
72 (sizeof(ipv6_l3fwd_lpm_route_array) / sizeof(ipv6_l3fwd_lpm_route_array[0]))
74 #define IPV4_L3FWD_LPM_MAX_RULES 1024
75 #define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
76 #define IPV6_L3FWD_LPM_MAX_RULES 1024
77 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
79 struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
80 struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
82 static inline uint16_t
83 lpm_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
86 struct rte_lpm *ipv4_l3fwd_lookup_struct =
87 (struct rte_lpm *)lookup_struct;
89 return (uint16_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
90 rte_be_to_cpu_32(((struct rte_ipv4_hdr *)ipv4_hdr)->dst_addr),
91 &next_hop) == 0) ? next_hop : portid);
94 static inline uint16_t
95 lpm_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
98 struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
99 (struct rte_lpm6 *)lookup_struct;
101 return (uint16_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
102 ((struct rte_ipv6_hdr *)ipv6_hdr)->dst_addr,
103 &next_hop) == 0) ? next_hop : portid);
106 static __rte_always_inline uint16_t
107 lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
110 struct rte_ipv6_hdr *ipv6_hdr;
111 struct rte_ipv4_hdr *ipv4_hdr;
112 struct rte_ether_hdr *eth_hdr;
114 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
116 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
117 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
119 return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
120 qconf->ipv4_lookup_struct);
121 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
123 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
124 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
126 return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
127 qconf->ipv6_lookup_struct);
134 * lpm_get_dst_port optimized routine for packets where dst_ipv4 is already
135 * precalculated. If packet is ipv6 dst_addr is taken directly from packet
136 * header and dst_ipv4 value is not used.
138 static __rte_always_inline uint16_t
139 lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
140 uint32_t dst_ipv4, uint16_t portid)
143 struct rte_ipv6_hdr *ipv6_hdr;
144 struct rte_ether_hdr *eth_hdr;
146 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
147 return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
148 dst_ipv4, &next_hop) == 0)
149 ? next_hop : portid);
151 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
153 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
154 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
156 return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
157 ipv6_hdr->dst_addr, &next_hop) == 0)
158 ? next_hop : portid);
165 #if defined(RTE_ARCH_X86)
166 #include "l3fwd_lpm_sse.h"
167 #elif defined RTE_MACHINE_CPUFLAG_NEON
168 #include "l3fwd_lpm_neon.h"
169 #elif defined(RTE_ARCH_PPC_64)
170 #include "l3fwd_lpm_altivec.h"
172 #include "l3fwd_lpm.h"
175 /* main processing loop */
177 lpm_main_loop(__attribute__((unused)) void *dummy)
179 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
181 uint64_t prev_tsc, diff_tsc, cur_tsc;
185 struct lcore_conf *qconf;
186 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
187 US_PER_S * BURST_TX_DRAIN_US;
191 lcore_id = rte_lcore_id();
192 qconf = &lcore_conf[lcore_id];
194 if (qconf->n_rx_queue == 0) {
195 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
199 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
201 for (i = 0; i < qconf->n_rx_queue; i++) {
203 portid = qconf->rx_queue_list[i].port_id;
204 queueid = qconf->rx_queue_list[i].queue_id;
206 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
207 lcore_id, portid, queueid);
210 while (!force_quit) {
212 cur_tsc = rte_rdtsc();
215 * TX burst queue drain
217 diff_tsc = cur_tsc - prev_tsc;
218 if (unlikely(diff_tsc > drain_tsc)) {
220 for (i = 0; i < qconf->n_tx_port; ++i) {
221 portid = qconf->tx_port_id[i];
222 if (qconf->tx_mbufs[portid].len == 0)
225 qconf->tx_mbufs[portid].len,
227 qconf->tx_mbufs[portid].len = 0;
234 * Read packet from RX queues
236 for (i = 0; i < qconf->n_rx_queue; ++i) {
237 portid = qconf->rx_queue_list[i].port_id;
238 queueid = qconf->rx_queue_list[i].queue_id;
239 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
244 #if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
245 || defined RTE_ARCH_PPC_64
246 l3fwd_lpm_send_packets(nb_rx, pkts_burst,
249 l3fwd_lpm_no_opt_send_packets(nb_rx, pkts_burst,
258 static __rte_always_inline uint16_t
259 lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf)
261 mbuf->port = lpm_get_dst_port(lconf, mbuf, mbuf->port);
263 #if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
264 || defined RTE_ARCH_PPC_64
265 process_packet(mbuf, &mbuf->port);
268 struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf,
269 struct rte_ether_hdr *);
270 #ifdef DO_RFC_1812_CHECKS
271 struct rte_ipv4_hdr *ipv4_hdr;
272 if (RTE_ETH_IS_IPV4_HDR(mbuf->packet_type)) {
273 /* Handle IPv4 headers.*/
274 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf,
275 struct rte_ipv4_hdr *,
276 sizeof(struct rte_ether_hdr));
278 if (is_valid_ipv4_pkt(ipv4_hdr, mbuf->pkt_len)
280 mbuf->port = BAD_PORT;
283 /* Update time to live and header checksum */
284 --(ipv4_hdr->time_to_live);
285 ++(ipv4_hdr->hdr_checksum);
289 *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[mbuf->port];
292 rte_ether_addr_copy(&ports_eth_addr[mbuf->port],
298 static __rte_always_inline void
299 lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
302 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
303 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
304 evt_rsrc->evq.nb_queues - 1];
305 const uint8_t event_d_id = evt_rsrc->event_d_id;
306 struct lcore_conf *lconf;
307 unsigned int lcore_id;
313 lcore_id = rte_lcore_id();
314 lconf = &lcore_conf[lcore_id];
316 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
317 while (!force_quit) {
318 if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
321 if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
322 rte_pktmbuf_free(ev.mbuf);
326 if (flags & L3FWD_EVENT_TX_ENQ) {
327 ev.queue_id = tx_q_id;
328 ev.op = RTE_EVENT_OP_FORWARD;
329 while (rte_event_enqueue_burst(event_d_id, event_p_id,
330 &ev, 1) && !force_quit)
334 if (flags & L3FWD_EVENT_TX_DIRECT) {
335 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
336 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
337 event_p_id, &ev, 1, 0) &&
344 static __rte_always_inline void
345 lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
348 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
349 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
350 evt_rsrc->evq.nb_queues - 1];
351 const uint8_t event_d_id = evt_rsrc->event_d_id;
352 const uint16_t deq_len = evt_rsrc->deq_depth;
353 struct rte_event events[MAX_PKT_BURST];
354 struct lcore_conf *lconf;
355 unsigned int lcore_id;
356 int i, nb_enq, nb_deq;
361 lcore_id = rte_lcore_id();
363 lconf = &lcore_conf[lcore_id];
365 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
367 while (!force_quit) {
368 /* Read events from RX queues */
369 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
376 for (i = 0; i < nb_deq; i++) {
377 if (flags & L3FWD_EVENT_TX_ENQ) {
378 events[i].queue_id = tx_q_id;
379 events[i].op = RTE_EVENT_OP_FORWARD;
382 if (flags & L3FWD_EVENT_TX_DIRECT)
383 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
386 lpm_process_event_pkt(lconf, events[i].mbuf);
389 if (flags & L3FWD_EVENT_TX_ENQ) {
390 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
392 while (nb_enq < nb_deq && !force_quit)
393 nb_enq += rte_event_enqueue_burst(event_d_id,
394 event_p_id, events + nb_enq,
398 if (flags & L3FWD_EVENT_TX_DIRECT) {
399 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
400 event_p_id, events, nb_deq, 0);
401 while (nb_enq < nb_deq && !force_quit)
402 nb_enq += rte_event_eth_tx_adapter_enqueue(
403 event_d_id, event_p_id,
410 static __rte_always_inline void
411 lpm_event_loop(struct l3fwd_event_resources *evt_rsrc,
414 if (flags & L3FWD_EVENT_SINGLE)
415 lpm_event_loop_single(evt_rsrc, flags);
416 if (flags & L3FWD_EVENT_BURST)
417 lpm_event_loop_burst(evt_rsrc, flags);
421 lpm_event_main_loop_tx_d(__attribute__((unused)) void *dummy)
423 struct l3fwd_event_resources *evt_rsrc =
424 l3fwd_get_eventdev_rsrc();
426 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
431 lpm_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy)
433 struct l3fwd_event_resources *evt_rsrc =
434 l3fwd_get_eventdev_rsrc();
436 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
441 lpm_event_main_loop_tx_q(__attribute__((unused)) void *dummy)
443 struct l3fwd_event_resources *evt_rsrc =
444 l3fwd_get_eventdev_rsrc();
446 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
451 lpm_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy)
453 struct l3fwd_event_resources *evt_rsrc =
454 l3fwd_get_eventdev_rsrc();
456 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
461 setup_lpm(const int socketid)
463 struct rte_lpm6_config config;
464 struct rte_lpm_config config_ipv4;
468 char abuf[INET6_ADDRSTRLEN];
470 /* create the LPM table */
471 config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
472 config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
473 config_ipv4.flags = 0;
474 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
475 ipv4_l3fwd_lpm_lookup_struct[socketid] =
476 rte_lpm_create(s, socketid, &config_ipv4);
477 if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL)
478 rte_exit(EXIT_FAILURE,
479 "Unable to create the l3fwd LPM table on socket %d\n",
482 /* populate the LPM table */
483 for (i = 0; i < IPV4_L3FWD_LPM_NUM_ROUTES; i++) {
486 /* skip unused ports */
487 if ((1 << ipv4_l3fwd_lpm_route_array[i].if_out &
488 enabled_port_mask) == 0)
491 ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid],
492 ipv4_l3fwd_lpm_route_array[i].ip,
493 ipv4_l3fwd_lpm_route_array[i].depth,
494 ipv4_l3fwd_lpm_route_array[i].if_out);
497 rte_exit(EXIT_FAILURE,
498 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
502 in.s_addr = htonl(ipv4_l3fwd_lpm_route_array[i].ip);
503 printf("LPM: Adding route %s / %d (%d)\n",
504 inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
505 ipv4_l3fwd_lpm_route_array[i].depth,
506 ipv4_l3fwd_lpm_route_array[i].if_out);
509 /* create the LPM6 table */
510 snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
512 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
513 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
515 ipv6_l3fwd_lpm_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
517 if (ipv6_l3fwd_lpm_lookup_struct[socketid] == NULL)
518 rte_exit(EXIT_FAILURE,
519 "Unable to create the l3fwd LPM table on socket %d\n",
522 /* populate the LPM table */
523 for (i = 0; i < IPV6_L3FWD_LPM_NUM_ROUTES; i++) {
525 /* skip unused ports */
526 if ((1 << ipv6_l3fwd_lpm_route_array[i].if_out &
527 enabled_port_mask) == 0)
530 ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
531 ipv6_l3fwd_lpm_route_array[i].ip,
532 ipv6_l3fwd_lpm_route_array[i].depth,
533 ipv6_l3fwd_lpm_route_array[i].if_out);
536 rte_exit(EXIT_FAILURE,
537 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
541 printf("LPM: Adding route %s / %d (%d)\n",
542 inet_ntop(AF_INET6, ipv6_l3fwd_lpm_route_array[i].ip,
544 ipv6_l3fwd_lpm_route_array[i].depth,
545 ipv6_l3fwd_lpm_route_array[i].if_out);
550 lpm_check_ptype(int portid)
553 int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
554 uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
556 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
560 uint32_t ptypes[ret];
562 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
563 for (i = 0; i < ret; ++i) {
564 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
566 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
570 if (ptype_l3_ipv4 == 0)
571 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
573 if (ptype_l3_ipv6 == 0)
574 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
576 if (ptype_l3_ipv4 && ptype_l3_ipv6)
584 lpm_parse_ptype(struct rte_mbuf *m)
586 struct rte_ether_hdr *eth_hdr;
587 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
590 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
591 ether_type = eth_hdr->ether_type;
592 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
593 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
594 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
595 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
597 m->packet_type = packet_type;
601 lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
602 struct rte_mbuf *pkts[], uint16_t nb_pkts,
603 uint16_t max_pkts __rte_unused,
604 void *user_param __rte_unused)
608 if (unlikely(nb_pkts == 0))
610 rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
611 for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
612 rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
613 struct ether_hdr *));
614 lpm_parse_ptype(pkts[i]);
616 lpm_parse_ptype(pkts[i]);
621 /* Return ipv4/ipv6 lpm fwd lookup struct. */
623 lpm_get_ipv4_l3fwd_lookup_struct(const int socketid)
625 return ipv4_l3fwd_lpm_lookup_struct[socketid];
629 lpm_get_ipv6_l3fwd_lookup_struct(const int socketid)
631 return ipv6_l3fwd_lpm_lookup_struct[socketid];