1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
36 #include <rte_malloc.h>
37 #include <rte_fbk_hash.h>
40 #define RTE_LOGTYPE_IPv4_MULTICAST RTE_LOGTYPE_USER1
44 #define MCAST_CLONE_PORTS 2
45 #define MCAST_CLONE_SEGS 2
47 #define PKT_MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
48 #define NB_PKT_MBUF 8192
50 #define HDR_MBUF_DATA_SIZE (2 * RTE_PKTMBUF_HEADROOM)
51 #define NB_HDR_MBUF (NB_PKT_MBUF * MAX_PORTS)
53 #define NB_CLONE_MBUF (NB_PKT_MBUF * MCAST_CLONE_PORTS * MCAST_CLONE_SEGS * 2)
55 /* allow max jumbo frame 9.5 KB */
56 #define JUMBO_FRAME_MAX_SIZE 0x2600
58 #define MAX_PKT_BURST 32
59 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
61 /* Configure how many packets ahead to prefetch, when reading packets */
62 #define PREFETCH_OFFSET 3
65 * Construct Ethernet multicast address from IPv4 multicast address.
66 * Citing RFC 1112, section 6.4:
67 * "An IP host group address is mapped to an Ethernet multicast address
68 * by placing the low-order 23-bits of the IP address into the low-order
69 * 23 bits of the Ethernet multicast address 01-00-5E-00-00-00 (hex)."
71 #define ETHER_ADDR_FOR_IPV4_MCAST(x) \
72 (rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16)
75 * Configurable number of RX/TX ring descriptors
77 #define RTE_TEST_RX_DESC_DEFAULT 1024
78 #define RTE_TEST_TX_DESC_DEFAULT 1024
79 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
80 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
82 /* ethernet addresses of ports */
83 static struct ether_addr ports_eth_addr[MAX_PORTS];
85 /* mask of enabled ports */
86 static uint32_t enabled_port_mask = 0;
88 static uint16_t nb_ports;
90 static int rx_queue_per_lcore = 1;
94 struct rte_mbuf *m_table[MAX_PKT_BURST];
97 #define MAX_RX_QUEUE_PER_LCORE 16
98 #define MAX_TX_QUEUE_PER_PORT 16
99 struct lcore_queue_conf {
102 uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
103 uint16_t tx_queue_id[MAX_PORTS];
104 struct mbuf_table tx_mbufs[MAX_PORTS];
105 } __rte_cache_aligned;
106 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
108 static struct rte_eth_conf port_conf = {
110 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
112 .ignore_offload_bitfield = 1,
113 .offloads = (DEV_RX_OFFLOAD_JUMBO_FRAME |
114 DEV_RX_OFFLOAD_CRC_STRIP),
117 .mq_mode = ETH_MQ_TX_NONE,
118 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
122 static struct rte_mempool *packet_pool, *header_pool, *clone_pool;
126 static struct rte_fbk_hash_params mcast_hash_params = {
127 .name = "MCAST_HASH",
129 .entries_per_bucket = 4,
135 struct rte_fbk_hash_table *mcast_hash = NULL;
137 struct mcast_group_params {
142 static struct mcast_group_params mcast_group_table[] = {
143 {IPv4(224,0,0,101), 0x1},
144 {IPv4(224,0,0,102), 0x2},
145 {IPv4(224,0,0,103), 0x3},
146 {IPv4(224,0,0,104), 0x4},
147 {IPv4(224,0,0,105), 0x5},
148 {IPv4(224,0,0,106), 0x6},
149 {IPv4(224,0,0,107), 0x7},
150 {IPv4(224,0,0,108), 0x8},
151 {IPv4(224,0,0,109), 0x9},
152 {IPv4(224,0,0,110), 0xA},
153 {IPv4(224,0,0,111), 0xB},
154 {IPv4(224,0,0,112), 0xC},
155 {IPv4(224,0,0,113), 0xD},
156 {IPv4(224,0,0,114), 0xE},
157 {IPv4(224,0,0,115), 0xF},
160 #define N_MCAST_GROUPS \
161 (sizeof (mcast_group_table) / sizeof (mcast_group_table[0]))
164 /* Send burst of packets on an output interface */
166 send_burst(struct lcore_queue_conf *qconf, uint16_t port)
168 struct rte_mbuf **m_table;
172 queueid = qconf->tx_queue_id[port];
173 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
174 n = qconf->tx_mbufs[port].len;
176 ret = rte_eth_tx_burst(port, queueid, m_table, n);
177 while (unlikely (ret < n)) {
178 rte_pktmbuf_free(m_table[ret]);
182 qconf->tx_mbufs[port].len = 0;
185 /* Get number of bits set. */
186 static inline uint32_t
191 for (n = 0; v != 0; v &= v - 1, n++)
198 * Create the output multicast packet based on the given input packet.
199 * There are two approaches for creating outgoing packet, though both
200 * are based on data zero-copy idea, they differ in few details:
201 * First one creates a clone of the input packet, e.g - walk though all
202 * segments of the input packet, and for each of them create a new packet
203 * mbuf and attach that new mbuf to the segment (refer to rte_pktmbuf_clone()
204 * for more details). Then new mbuf is allocated for the packet header
205 * and is prepended to the 'clone' mbuf.
206 * Second approach doesn't make a clone, it just increment refcnt for all
207 * input packet segments. Then it allocates new mbuf for the packet header
208 * and prepends it to the input packet.
209 * Basically first approach reuses only input packet's data, but creates
210 * it's own copy of packet's metadata. Second approach reuses both input's
211 * packet data and metadata.
212 * The advantage of first approach - is that each outgoing packet has it's
213 * own copy of metadata, so we can safely modify data pointer of the
214 * input packet. That allows us to skip creation if the output packet for
215 * the last destination port, but instead modify input packet's header inplace,
216 * e.g: for N destination ports we need to invoke mcast_out_pkt (N-1) times.
217 * The advantage of second approach - less work for each outgoing packet,
218 * e.g: we skip "clone" operation completely. Though it comes with a price -
219 * input packet's metadata has to be intact. So for N destination ports we
220 * need to invoke mcast_out_pkt N times.
221 * So for small number of outgoing ports (and segments in the input packet)
222 * first approach will be faster.
223 * As number of outgoing ports (and/or input segments) will grow,
224 * second way will become more preferable.
229 * Control which of the two approaches described above should be used:
230 * - 0 - use second approach:
231 * Don't "clone" input packet.
232 * Prepend new header directly to the input packet
233 * - 1 - use first approach:
234 * Make a "clone" of input packet first.
235 * Prepend new header to the clone of the input packet
237 * - The pointer to the new outgoing packet.
238 * - NULL if operation failed.
240 static inline struct rte_mbuf *
241 mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
243 struct rte_mbuf *hdr;
245 /* Create new mbuf for the header. */
246 if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
249 /* If requested, then make a new clone packet. */
250 if (use_clone != 0 &&
251 unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
252 rte_pktmbuf_free(hdr);
256 /* prepend new header */
260 /* update header's fields */
261 hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
262 hdr->nb_segs = pkt->nb_segs + 1;
264 /* copy metadata from source packet*/
265 hdr->port = pkt->port;
266 hdr->vlan_tci = pkt->vlan_tci;
267 hdr->vlan_tci_outer = pkt->vlan_tci_outer;
268 hdr->tx_offload = pkt->tx_offload;
269 hdr->hash = pkt->hash;
271 hdr->ol_flags = pkt->ol_flags;
273 __rte_mbuf_sanity_check(hdr, 1);
278 * Write new Ethernet header to the outgoing packet,
279 * and put it into the outgoing queue for the given port.
282 mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
283 struct lcore_queue_conf *qconf, uint16_t port)
285 struct ether_hdr *ethdr;
288 /* Construct Ethernet header. */
289 ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
290 RTE_ASSERT(ethdr != NULL);
292 ether_addr_copy(dest_addr, ðdr->d_addr);
293 ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
294 ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
296 /* Put new packet into the output queue */
297 len = qconf->tx_mbufs[port].len;
298 qconf->tx_mbufs[port].m_table[len] = pkt;
299 qconf->tx_mbufs[port].len = ++len;
301 /* Transmit packets */
302 if (unlikely(MAX_PKT_BURST == len))
303 send_burst(qconf, port);
306 /* Multicast forward of the input packet */
308 mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
311 struct ipv4_hdr *iphdr;
312 uint32_t dest_addr, port_mask, port_num, use_clone;
317 struct ether_addr as_addr;
320 /* Remove the Ethernet header from the input packet */
321 iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
322 RTE_ASSERT(iphdr != NULL);
324 dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
327 * Check that it is a valid multicast address and
328 * we have some active ports assigned to it.
330 if(!IS_IPV4_MCAST(dest_addr) ||
331 (hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 ||
332 (port_mask = hash & enabled_port_mask) == 0) {
337 /* Calculate number of destination ports. */
338 port_num = bitcnt(port_mask);
340 /* Should we use rte_pktmbuf_clone() or not. */
341 use_clone = (port_num <= MCAST_CLONE_PORTS &&
342 m->nb_segs <= MCAST_CLONE_SEGS);
344 /* Mark all packet's segments as referenced port_num times */
346 rte_pktmbuf_refcnt_update(m, (uint16_t)port_num);
348 /* construct destination ethernet address */
349 dst_eth_addr.as_int = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr);
351 for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) {
353 /* Prepare output packet and send it out. */
354 if ((port_mask & 1) != 0) {
355 if (likely ((mc = mcast_out_pkt(m, use_clone)) != NULL))
356 mcast_send_pkt(mc, &dst_eth_addr.as_addr,
358 else if (use_clone == 0)
364 * If we making clone packets, then, for the last destination port,
365 * we can overwrite input packet's metadata.
368 mcast_send_pkt(m, &dst_eth_addr.as_addr, qconf, port);
373 /* Send burst of outgoing packet, if timeout expires. */
375 send_timeout_burst(struct lcore_queue_conf *qconf)
379 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
381 cur_tsc = rte_rdtsc();
382 if (likely (cur_tsc < qconf->tx_tsc + drain_tsc))
385 for (portid = 0; portid < MAX_PORTS; portid++) {
386 if (qconf->tx_mbufs[portid].len != 0)
387 send_burst(qconf, portid);
389 qconf->tx_tsc = cur_tsc;
392 /* main processing loop */
394 main_loop(__rte_unused void *dummy)
396 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
400 struct lcore_queue_conf *qconf;
402 lcore_id = rte_lcore_id();
403 qconf = &lcore_queue_conf[lcore_id];
406 if (qconf->n_rx_queue == 0) {
407 RTE_LOG(INFO, IPv4_MULTICAST, "lcore %u has nothing to do\n",
412 RTE_LOG(INFO, IPv4_MULTICAST, "entering main loop on lcore %u\n",
415 for (i = 0; i < qconf->n_rx_queue; i++) {
417 portid = qconf->rx_queue_list[i];
418 RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n",
425 * Read packet from RX queues
427 for (i = 0; i < qconf->n_rx_queue; i++) {
429 portid = qconf->rx_queue_list[i];
430 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
433 /* Prefetch first packets */
434 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
435 rte_prefetch0(rte_pktmbuf_mtod(
436 pkts_burst[j], void *));
439 /* Prefetch and forward already prefetched packets */
440 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
441 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
442 j + PREFETCH_OFFSET], void *));
443 mcast_forward(pkts_burst[j], qconf);
446 /* Forward remaining prefetched packets */
447 for (; j < nb_rx; j++) {
448 mcast_forward(pkts_burst[j], qconf);
452 /* Send out packets from TX queues */
453 send_timeout_burst(qconf);
459 print_usage(const char *prgname)
461 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
462 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
463 " -q NQ: number of queue (=ports) per lcore (default is 1)\n",
468 parse_portmask(const char *portmask)
473 /* parse hexadecimal string */
474 pm = strtoul(portmask, &end, 16);
475 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
482 parse_nqueue(const char *q_arg)
487 /* parse numerical string */
489 n = strtoul(q_arg, &end, 0);
490 if (errno != 0 || end == NULL || *end != '\0' ||
491 n == 0 || n >= MAX_RX_QUEUE_PER_LCORE)
497 /* Parse the argument given in the command line of the application */
499 parse_args(int argc, char **argv)
504 char *prgname = argv[0];
505 static struct option lgopts[] = {
511 while ((opt = getopt_long(argc, argvopt, "p:q:",
512 lgopts, &option_index)) != EOF) {
517 enabled_port_mask = parse_portmask(optarg);
518 if (enabled_port_mask == 0) {
519 printf("invalid portmask\n");
520 print_usage(prgname);
527 rx_queue_per_lcore = parse_nqueue(optarg);
528 if (rx_queue_per_lcore < 0) {
529 printf("invalid queue number\n");
530 print_usage(prgname);
536 print_usage(prgname);
542 argv[optind-1] = prgname;
545 optind = 1; /* reset getopt lib */
550 print_ethaddr(const char *name, struct ether_addr *eth_addr)
552 char buf[ETHER_ADDR_FMT_SIZE];
553 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
554 printf("%s%s", name, buf);
558 init_mcast_hash(void)
562 mcast_hash_params.socket_id = rte_socket_id();
563 mcast_hash = rte_fbk_hash_create(&mcast_hash_params);
564 if (mcast_hash == NULL){
568 for (i = 0; i < N_MCAST_GROUPS; i ++){
569 if (rte_fbk_hash_add_key(mcast_hash,
570 mcast_group_table[i].ip,
571 mcast_group_table[i].port_mask) < 0) {
579 /* Check the link status of all ports in up to 9s, and print them finally */
581 check_all_ports_link_status(uint32_t port_mask)
583 #define CHECK_INTERVAL 100 /* 100ms */
584 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
586 uint8_t count, all_ports_up, print_flag = 0;
587 struct rte_eth_link link;
589 printf("\nChecking link status");
591 for (count = 0; count <= MAX_CHECK_TIME; count++) {
593 RTE_ETH_FOREACH_DEV(portid) {
594 if ((port_mask & (1 << portid)) == 0)
596 memset(&link, 0, sizeof(link));
597 rte_eth_link_get_nowait(portid, &link);
598 /* print link status if flag set */
599 if (print_flag == 1) {
600 if (link.link_status)
602 "Port%d Link Up. Speed %u Mbps - %s\n",
603 portid, link.link_speed,
604 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
605 ("full-duplex") : ("half-duplex\n"));
607 printf("Port %d Link Down\n", portid);
610 /* clear all_ports_up flag if any link down */
611 if (link.link_status == ETH_LINK_DOWN) {
616 /* after finally printing all link status, get out */
620 if (all_ports_up == 0) {
623 rte_delay_ms(CHECK_INTERVAL);
626 /* set the print_flag if all ports up or timeout */
627 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
635 main(int argc, char **argv)
637 struct lcore_queue_conf *qconf;
638 struct rte_eth_dev_info dev_info;
639 struct rte_eth_txconf *txconf;
642 unsigned lcore_id = 0, rx_lcore_id = 0;
643 uint32_t n_tx_queue, nb_lcores;
647 ret = rte_eal_init(argc, argv);
649 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
653 /* parse application arguments (after the EAL ones) */
654 ret = parse_args(argc, argv);
656 rte_exit(EXIT_FAILURE, "Invalid IPV4_MULTICAST parameters\n");
658 /* create the mbuf pools */
659 packet_pool = rte_pktmbuf_pool_create("packet_pool", NB_PKT_MBUF, 32,
660 0, PKT_MBUF_DATA_SIZE, rte_socket_id());
662 if (packet_pool == NULL)
663 rte_exit(EXIT_FAILURE, "Cannot init packet mbuf pool\n");
665 header_pool = rte_pktmbuf_pool_create("header_pool", NB_HDR_MBUF, 32,
666 0, HDR_MBUF_DATA_SIZE, rte_socket_id());
668 if (header_pool == NULL)
669 rte_exit(EXIT_FAILURE, "Cannot init header mbuf pool\n");
671 clone_pool = rte_pktmbuf_pool_create("clone_pool", NB_CLONE_MBUF, 32,
672 0, 0, rte_socket_id());
674 if (clone_pool == NULL)
675 rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n");
677 nb_ports = rte_eth_dev_count_avail();
679 rte_exit(EXIT_FAILURE, "No physical ports!\n");
680 if (nb_ports > MAX_PORTS)
681 nb_ports = MAX_PORTS;
683 nb_lcores = rte_lcore_count();
685 /* initialize all ports */
686 RTE_ETH_FOREACH_DEV(portid) {
687 struct rte_eth_rxconf rxq_conf;
688 struct rte_eth_conf local_port_conf = port_conf;
690 /* skip ports that are not enabled */
691 if ((enabled_port_mask & (1 << portid)) == 0) {
692 printf("Skipping disabled port %d\n", portid);
696 qconf = &lcore_queue_conf[rx_lcore_id];
698 /* limit the frame size to the maximum supported by NIC */
699 rte_eth_dev_info_get(portid, &dev_info);
700 local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
701 dev_info.max_rx_pktlen,
702 local_port_conf.rxmode.max_rx_pkt_len);
704 /* get the lcore_id for this port */
705 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
706 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
709 qconf = &lcore_queue_conf[rx_lcore_id];
711 if (rx_lcore_id >= RTE_MAX_LCORE)
712 rte_exit(EXIT_FAILURE, "Not enough cores\n");
714 qconf->rx_queue_list[qconf->n_rx_queue] = portid;
718 printf("Initializing port %d on lcore %u... ", portid,
722 n_tx_queue = nb_lcores;
723 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
724 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
726 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
729 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
732 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
735 rte_exit(EXIT_FAILURE,
736 "Cannot adjust number of descriptors: err=%d, port=%d\n",
739 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
740 print_ethaddr(" Address:", &ports_eth_addr[portid]);
743 /* init one RX queue */
745 printf("rxq=%hu ", queueid);
747 rxq_conf = dev_info.default_rxconf;
748 rxq_conf.offloads = local_port_conf.rxmode.offloads;
749 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
750 rte_eth_dev_socket_id(portid),
754 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n",
757 /* init one TX queue per couple (lcore,port) */
760 RTE_LCORE_FOREACH(lcore_id) {
761 if (rte_lcore_is_enabled(lcore_id) == 0)
763 printf("txq=%u,%hu ", lcore_id, queueid);
766 txconf = &dev_info.default_txconf;
767 txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
768 txconf->offloads = local_port_conf.txmode.offloads;
769 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
770 rte_lcore_to_socket_id(lcore_id), txconf);
772 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
773 "port=%d\n", ret, portid);
775 qconf = &lcore_queue_conf[lcore_id];
776 qconf->tx_queue_id[portid] = queueid;
781 ret = rte_eth_dev_start(portid);
783 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
789 check_all_ports_link_status(enabled_port_mask);
791 /* initialize the multicast hash */
792 int retval = init_mcast_hash();
794 rte_exit(EXIT_FAILURE, "Cannot build the multicast hash\n");
796 /* launch per-lcore init on every lcore */
797 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
798 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
799 if (rte_eal_wait_lcore(lcore_id) < 0)