1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
11 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_rawdev.h>
14 #include <rte_ioat_rawdev.h>
16 /* size of ring used for software copying between rx and tx. */
17 #define RTE_LOGTYPE_IOAT RTE_LOGTYPE_USER1
18 #define MAX_PKT_BURST 32
19 #define MEMPOOL_CACHE_SIZE 512
20 #define MIN_POOL_SIZE 65536U
21 #define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
22 #define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
23 #define CMD_LINE_OPT_PORTMASK "portmask"
24 #define CMD_LINE_OPT_NB_QUEUE "nb-queue"
25 #define CMD_LINE_OPT_COPY_TYPE "copy-type"
26 #define CMD_LINE_OPT_RING_SIZE "ring-size"
28 /* configurable number of RX/TX ring descriptors */
29 #define RX_DEFAULT_RINGSIZE 1024
30 #define TX_DEFAULT_RINGSIZE 1024
32 /* max number of RX queues per port */
33 #define MAX_RX_QUEUES_COUNT 8
35 struct rxtx_port_config {
39 /* for software copy mode */
40 struct rte_ring *rx_to_tx_ring;
41 /* for IOAT rawdev copy mode */
42 uint16_t ioat_ids[MAX_RX_QUEUES_COUNT];
45 struct rxtx_transmission_config {
46 struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
51 typedef enum copy_mode_t {
52 #define COPY_MODE_SW "sw"
54 #define COPY_MODE_IOAT "hw"
56 COPY_MODE_INVALID_NUM,
57 COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM
60 /* mask of enabled ports */
61 static uint32_t ioat_enabled_port_mask;
63 /* number of RX queues per port */
64 static uint16_t nb_queues = 1;
66 /* MAC updating enabled by default. */
67 static int mac_updating = 1;
69 /* hardare copy mode enabled by default. */
70 static copy_mode_t copy_mode = COPY_MODE_IOAT_NUM;
72 /* size of IOAT rawdev ring for hardware copy mode or
73 * rte_ring for software copy mode
75 static unsigned short ring_size = 2048;
77 /* global transmission config */
78 struct rxtx_transmission_config cfg;
80 /* configurable number of RX/TX ring descriptors */
81 static uint16_t nb_rxd = RX_DEFAULT_RINGSIZE;
82 static uint16_t nb_txd = TX_DEFAULT_RINGSIZE;
84 static volatile bool force_quit;
86 /* ethernet addresses of ports */
87 static struct rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS];
89 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
90 struct rte_mempool *ioat_pktmbuf_pool;
93 update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid)
95 struct rte_ether_hdr *eth;
98 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
100 /* 02:00:00:00:00:xx - overwriting 2 bytes of source address but
101 * it's acceptable cause it gets overwritten by rte_ether_addr_copy
103 tmp = ð->d_addr.addr_bytes[0];
104 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
107 rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->s_addr);
111 pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
113 /* Copy packet metadata */
114 rte_memcpy(&dst->rearm_data,
116 offsetof(struct rte_mbuf, cacheline1)
117 - offsetof(struct rte_mbuf, rearm_data));
119 /* Copy packet data */
120 rte_memcpy(rte_pktmbuf_mtod(dst, char *),
121 rte_pktmbuf_mtod(src, char *), src->data_len);
125 ioat_enqueue_packets(struct rte_mbuf **pkts,
126 uint32_t nb_rx, uint16_t dev_id)
130 struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
132 const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
133 &pkts[0]->rearm_data);
135 ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
136 (void *)pkts_copy, nb_rx);
138 if (unlikely(ret < 0))
139 rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
141 for (i = 0; i < nb_rx; i++) {
142 /* Perform data copy */
143 ret = rte_ioat_enqueue_copy(dev_id,
146 pkts_copy[i]->buf_iova
148 rte_pktmbuf_data_len(pkts[i])
151 (uintptr_t)pkts_copy[i],
159 /* Free any not enqueued packets. */
160 rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
161 rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
167 /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
169 ioat_rx_port(struct rxtx_port_config *rx_config)
171 uint32_t nb_rx, nb_enq, i, j;
172 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
174 for (i = 0; i < rx_config->nb_queues; i++) {
176 nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i,
177 pkts_burst, MAX_PKT_BURST);
182 if (copy_mode == COPY_MODE_IOAT_NUM) {
183 /* Perform packet hardware copy */
184 nb_enq = ioat_enqueue_packets(pkts_burst,
185 nb_rx, rx_config->ioat_ids[i]);
187 rte_ioat_do_copies(rx_config->ioat_ids[i]);
189 /* Perform packet software copy, free source packets */
191 struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
193 ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
194 (void *)pkts_burst_copy, nb_rx);
196 if (unlikely(ret < 0))
197 rte_exit(EXIT_FAILURE,
198 "Unable to allocate memory.\n");
200 for (j = 0; j < nb_rx; j++)
201 pktmbuf_sw_copy(pkts_burst[j],
204 rte_mempool_put_bulk(ioat_pktmbuf_pool,
205 (void *)pkts_burst, nb_rx);
207 nb_enq = rte_ring_enqueue_burst(
208 rx_config->rx_to_tx_ring,
209 (void *)pkts_burst_copy, nb_rx, NULL);
211 /* Free any not enqueued packets. */
212 rte_mempool_put_bulk(ioat_pktmbuf_pool,
213 (void *)&pkts_burst_copy[nb_enq],
219 /* Transmit packets from IOAT rawdev/rte_ring for one port. */
221 ioat_tx_port(struct rxtx_port_config *tx_config)
223 uint32_t i, j, nb_dq = 0;
224 struct rte_mbuf *mbufs_src[MAX_PKT_BURST];
225 struct rte_mbuf *mbufs_dst[MAX_PKT_BURST];
227 for (i = 0; i < tx_config->nb_queues; i++) {
228 if (copy_mode == COPY_MODE_IOAT_NUM) {
229 /* Deque the mbufs from IOAT device. */
230 nb_dq = rte_ioat_completed_copies(
231 tx_config->ioat_ids[i], MAX_PKT_BURST,
232 (void *)mbufs_src, (void *)mbufs_dst);
234 /* Deque the mbufs from rx_to_tx_ring. */
235 nb_dq = rte_ring_dequeue_burst(
236 tx_config->rx_to_tx_ring, (void *)mbufs_dst,
237 MAX_PKT_BURST, NULL);
243 if (copy_mode == COPY_MODE_IOAT_NUM)
244 rte_mempool_put_bulk(ioat_pktmbuf_pool,
245 (void *)mbufs_src, nb_dq);
247 /* Update macs if enabled */
249 for (j = 0; j < nb_dq; j++)
250 update_mac_addrs(mbufs_dst[j],
251 tx_config->rxtx_port);
254 const uint16_t nb_tx = rte_eth_tx_burst(
255 tx_config->rxtx_port, 0,
256 (void *)mbufs_dst, nb_dq);
258 /* Free any unsent packets. */
259 if (unlikely(nb_tx < nb_dq))
260 rte_mempool_put_bulk(ioat_pktmbuf_pool,
261 (void *)&mbufs_dst[nb_tx],
266 /* Main rx processing loop for IOAT rawdev. */
271 uint16_t nb_ports = cfg.nb_ports;
273 RTE_LOG(INFO, IOAT, "Entering main rx loop for copy on lcore %u\n",
277 for (i = 0; i < nb_ports; i++)
278 ioat_rx_port(&cfg.ports[i]);
281 /* Main tx processing loop for hardware copy. */
286 uint16_t nb_ports = cfg.nb_ports;
288 RTE_LOG(INFO, IOAT, "Entering main tx loop for copy on lcore %u\n",
292 for (i = 0; i < nb_ports; i++)
293 ioat_tx_port(&cfg.ports[i]);
296 /* Main rx and tx loop if only one slave lcore available */
301 uint16_t nb_ports = cfg.nb_ports;
303 RTE_LOG(INFO, IOAT, "Entering main rx and tx loop for copy on"
304 " lcore %u\n", rte_lcore_id());
307 for (i = 0; i < nb_ports; i++) {
308 ioat_rx_port(&cfg.ports[i]);
309 ioat_tx_port(&cfg.ports[i]);
313 static void start_forwarding_cores(void)
315 uint32_t lcore_id = rte_lcore_id();
317 RTE_LOG(INFO, IOAT, "Entering %s on lcore %u\n",
318 __func__, rte_lcore_id());
320 if (cfg.nb_lcores == 1) {
321 lcore_id = rte_get_next_lcore(lcore_id, true, true);
322 rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop,
324 } else if (cfg.nb_lcores > 1) {
325 lcore_id = rte_get_next_lcore(lcore_id, true, true);
326 rte_eal_remote_launch((lcore_function_t *)rx_main_loop,
329 lcore_id = rte_get_next_lcore(lcore_id, true, true);
330 rte_eal_remote_launch((lcore_function_t *)tx_main_loop, NULL,
337 ioat_usage(const char *prgname)
339 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
340 " -p --portmask: hexadecimal bitmask of ports to configure\n"
341 " -q NQ: number of RX queues per port (default is 1)\n"
342 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
344 " - The source MAC address is replaced by the TX port MAC address\n"
345 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
346 " -c --copy-type CT: type of copy: sw|hw\n"
347 " -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n",
352 ioat_parse_portmask(const char *portmask)
357 /* Parse hexadecimal string */
358 pm = strtoul(portmask, &end, 16);
359 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
366 ioat_parse_copy_mode(const char *copy_mode)
368 if (strcmp(copy_mode, COPY_MODE_SW) == 0)
369 return COPY_MODE_SW_NUM;
370 else if (strcmp(copy_mode, COPY_MODE_IOAT) == 0)
371 return COPY_MODE_IOAT_NUM;
373 return COPY_MODE_INVALID_NUM;
376 /* Parse the argument given in the command line of the application */
378 ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
380 static const char short_options[] =
382 "q:" /* number of RX queues per port */
383 "c:" /* copy type (sw|hw) */
387 static const struct option lgopts[] = {
388 {CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
389 {CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
390 {CMD_LINE_OPT_PORTMASK, required_argument, NULL, 'p'},
391 {CMD_LINE_OPT_NB_QUEUE, required_argument, NULL, 'q'},
392 {CMD_LINE_OPT_COPY_TYPE, required_argument, NULL, 'c'},
393 {CMD_LINE_OPT_RING_SIZE, required_argument, NULL, 's'},
397 const unsigned int default_port_mask = (1 << nb_ports) - 1;
401 char *prgname = argv[0];
403 ioat_enabled_port_mask = default_port_mask;
406 while ((opt = getopt_long(argc, argvopt, short_options,
407 lgopts, &option_index)) != EOF) {
412 ioat_enabled_port_mask = ioat_parse_portmask(optarg);
413 if (ioat_enabled_port_mask & ~default_port_mask ||
414 ioat_enabled_port_mask <= 0) {
415 printf("Invalid portmask, %s, suggest 0x%x\n",
416 optarg, default_port_mask);
423 nb_queues = atoi(optarg);
424 if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) {
425 printf("Invalid RX queues number %s. Max %u\n",
426 optarg, MAX_RX_QUEUES_COUNT);
433 copy_mode = ioat_parse_copy_mode(optarg);
434 if (copy_mode == COPY_MODE_INVALID_NUM) {
435 printf("Invalid copy type. Use: sw, hw\n");
442 ring_size = atoi(optarg);
443 if (ring_size == 0) {
444 printf("Invalid ring size, %s.\n", optarg);
460 printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
462 argv[optind - 1] = prgname;
465 optind = 1; /* reset getopt lib */
469 /* check link status, return true if at least one port is up */
471 check_link_status(uint32_t port_mask)
474 struct rte_eth_link link;
477 printf("\nChecking link status\n");
478 RTE_ETH_FOREACH_DEV(portid) {
479 if ((port_mask & (1 << portid)) == 0)
482 memset(&link, 0, sizeof(link));
483 rte_eth_link_get(portid, &link);
485 /* Print link status */
486 if (link.link_status) {
488 "Port %d Link Up. Speed %u Mbps - %s\n",
489 portid, link.link_speed,
490 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
491 ("full-duplex") : ("half-duplex\n"));
494 printf("Port %d Link Down\n", portid);
500 configure_rawdev_queue(uint32_t dev_id)
502 struct rte_ioat_rawdev_config dev_config = { .ring_size = ring_size };
503 struct rte_rawdev_info info = { .dev_private = &dev_config };
505 if (rte_rawdev_configure(dev_id, &info) != 0) {
506 rte_exit(EXIT_FAILURE,
507 "Error with rte_rawdev_configure()\n");
509 if (rte_rawdev_start(dev_id) != 0) {
510 rte_exit(EXIT_FAILURE,
511 "Error with rte_rawdev_start()\n");
518 uint16_t nb_rawdev = 0, rdev_id = 0;
521 for (i = 0; i < cfg.nb_ports; i++) {
522 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
523 struct rte_rawdev_info rdev_info = { 0 };
526 if (rdev_id == rte_rawdev_count())
528 rte_rawdev_info_get(rdev_id++, &rdev_info);
529 } while (strcmp(rdev_info.driver_name,
530 IOAT_PMD_RAWDEV_NAME_STR) != 0);
532 cfg.ports[i].ioat_ids[j] = rdev_id - 1;
533 configure_rawdev_queue(cfg.ports[i].ioat_ids[j]);
538 if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues)
539 rte_exit(EXIT_FAILURE,
540 "Not enough IOAT rawdevs (%u) for all queues (%u).\n",
541 nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
542 RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
550 for (i = 0; i < cfg.nb_ports; i++) {
551 char ring_name[RTE_RING_NAMESIZE];
553 snprintf(ring_name, sizeof(ring_name), "rx_to_tx_ring_%u", i);
554 /* Create ring for inter core communication */
555 cfg.ports[i].rx_to_tx_ring = rte_ring_create(
556 ring_name, ring_size,
557 rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
559 if (cfg.ports[i].rx_to_tx_ring == NULL)
560 rte_exit(EXIT_FAILURE, "Ring create failed: %s\n",
561 rte_strerror(rte_errno));
566 * Initializes a given port using global settings and with the RX buffers
567 * coming from the mbuf_pool passed as a parameter.
570 port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
572 /* configuring port to use RSS for multiple RX queues */
573 static const struct rte_eth_conf port_conf = {
575 .mq_mode = ETH_MQ_RX_RSS,
576 .max_rx_pkt_len = RTE_ETHER_MAX_LEN
581 .rss_hf = ETH_RSS_PROTO_MASK,
586 struct rte_eth_rxconf rxq_conf;
587 struct rte_eth_txconf txq_conf;
588 struct rte_eth_conf local_port_conf = port_conf;
589 struct rte_eth_dev_info dev_info;
592 /* Skip ports that are not enabled */
593 if ((ioat_enabled_port_mask & (1 << portid)) == 0) {
594 printf("Skipping disabled port %u\n", portid);
599 printf("Initializing port %u... ", portid);
601 rte_eth_dev_info_get(portid, &dev_info);
602 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
603 dev_info.flow_type_rss_offloads;
604 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
605 local_port_conf.txmode.offloads |=
606 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
607 ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
609 rte_exit(EXIT_FAILURE, "Cannot configure device:"
610 " err=%d, port=%u\n", ret, portid);
612 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
615 rte_exit(EXIT_FAILURE,
616 "Cannot adjust number of descriptors: err=%d, port=%u\n",
619 rte_eth_macaddr_get(portid, &ioat_ports_eth_addr[portid]);
622 rxq_conf = dev_info.default_rxconf;
623 rxq_conf.offloads = local_port_conf.rxmode.offloads;
624 for (i = 0; i < nb_queues; i++) {
625 ret = rte_eth_rx_queue_setup(portid, i, nb_rxd,
626 rte_eth_dev_socket_id(portid), &rxq_conf,
629 rte_exit(EXIT_FAILURE,
630 "rte_eth_rx_queue_setup:err=%d,port=%u, queue_id=%u\n",
634 /* Init one TX queue on each port */
635 txq_conf = dev_info.default_txconf;
636 txq_conf.offloads = local_port_conf.txmode.offloads;
637 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
638 rte_eth_dev_socket_id(portid),
641 rte_exit(EXIT_FAILURE,
642 "rte_eth_tx_queue_setup:err=%d,port=%u\n",
645 /* Initialize TX buffers */
646 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
647 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
648 rte_eth_dev_socket_id(portid));
649 if (tx_buffer[portid] == NULL)
650 rte_exit(EXIT_FAILURE,
651 "Cannot allocate buffer for tx on port %u\n",
654 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
657 ret = rte_eth_dev_start(portid);
659 rte_exit(EXIT_FAILURE,
660 "rte_eth_dev_start:err=%d, port=%u\n",
663 rte_eth_promiscuous_enable(portid);
665 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
667 ioat_ports_eth_addr[portid].addr_bytes[0],
668 ioat_ports_eth_addr[portid].addr_bytes[1],
669 ioat_ports_eth_addr[portid].addr_bytes[2],
670 ioat_ports_eth_addr[portid].addr_bytes[3],
671 ioat_ports_eth_addr[portid].addr_bytes[4],
672 ioat_ports_eth_addr[portid].addr_bytes[5]);
674 cfg.ports[cfg.nb_ports].rxtx_port = portid;
675 cfg.ports[cfg.nb_ports++].nb_queues = nb_queues;
679 signal_handler(int signum)
681 if (signum == SIGINT || signum == SIGTERM) {
682 printf("\n\nSignal %d received, preparing to exit...\n",
689 main(int argc, char **argv)
692 uint16_t nb_ports, portid;
694 unsigned int nb_mbufs;
697 ret = rte_eal_init(argc, argv);
699 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
704 signal(SIGINT, signal_handler);
705 signal(SIGTERM, signal_handler);
707 nb_ports = rte_eth_dev_count_avail();
709 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
711 /* Parse application arguments (after the EAL ones) */
712 ret = ioat_parse_args(argc, argv, nb_ports);
714 rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n");
716 nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
717 4 * MAX_PKT_BURST) + rte_lcore_count() * MEMPOOL_CACHE_SIZE),
720 /* Create the mbuf pool */
721 ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
722 MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
724 if (ioat_pktmbuf_pool == NULL)
725 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
727 /* Initialise each port */
729 RTE_ETH_FOREACH_DEV(portid)
730 port_init(portid, ioat_pktmbuf_pool, nb_queues);
732 while (!check_link_status(ioat_enabled_port_mask) && !force_quit)
735 /* Check if there is enough lcores for all ports. */
736 cfg.nb_lcores = rte_lcore_count() - 1;
737 if (cfg.nb_lcores < 1)
738 rte_exit(EXIT_FAILURE,
739 "There should be at least one slave lcore.\n");
741 if (copy_mode == COPY_MODE_IOAT_NUM)
743 else /* copy_mode == COPY_MODE_SW_NUM */
746 start_forwarding_cores();
748 /* force_quit is true when we get here */
749 rte_eal_mp_wait_lcore();
752 for (i = 0; i < cfg.nb_ports; i++) {
753 printf("Closing port %d\n", cfg.ports[i].rxtx_port);
754 rte_eth_dev_stop(cfg.ports[i].rxtx_port);
755 rte_eth_dev_close(cfg.ports[i].rxtx_port);
756 if (copy_mode == COPY_MODE_IOAT_NUM) {
757 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
758 printf("Stopping rawdev %d\n",
759 cfg.ports[i].ioat_ids[j]);
760 rte_rawdev_stop(cfg.ports[i].ioat_ids[j]);
762 } else /* copy_mode == COPY_MODE_SW_NUM */
763 rte_ring_free(cfg.ports[i].rx_to_tx_ring);