1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
11 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_rawdev.h>
14 #include <rte_ioat_rawdev.h>
16 /* size of ring used for software copying between rx and tx. */
17 #define RTE_LOGTYPE_IOAT RTE_LOGTYPE_USER1
18 #define MAX_PKT_BURST 32
19 #define MEMPOOL_CACHE_SIZE 512
20 #define MIN_POOL_SIZE 65536U
21 #define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
22 #define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
23 #define CMD_LINE_OPT_PORTMASK "portmask"
24 #define CMD_LINE_OPT_NB_QUEUE "nb-queue"
25 #define CMD_LINE_OPT_COPY_TYPE "copy-type"
26 #define CMD_LINE_OPT_RING_SIZE "ring-size"
28 /* configurable number of RX/TX ring descriptors */
29 #define RX_DEFAULT_RINGSIZE 1024
30 #define TX_DEFAULT_RINGSIZE 1024
32 /* max number of RX queues per port */
33 #define MAX_RX_QUEUES_COUNT 8
35 struct rxtx_port_config {
39 /* for software copy mode */
40 struct rte_ring *rx_to_tx_ring;
41 /* for IOAT rawdev copy mode */
42 uint16_t ioat_ids[MAX_RX_QUEUES_COUNT];
45 /* Configuring ports and number of assigned lcores in struct. 8< */
46 struct rxtx_transmission_config {
47 struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
51 /* >8 End of configuration of ports and number of assigned lcores. */
53 /* per-port statistics struct */
54 struct ioat_port_statistics {
55 uint64_t rx[RTE_MAX_ETHPORTS];
56 uint64_t tx[RTE_MAX_ETHPORTS];
57 uint64_t tx_dropped[RTE_MAX_ETHPORTS];
58 uint64_t copy_dropped[RTE_MAX_ETHPORTS];
60 struct ioat_port_statistics port_statistics;
62 struct total_statistics {
63 uint64_t total_packets_dropped;
64 uint64_t total_packets_tx;
65 uint64_t total_packets_rx;
66 uint64_t total_successful_enqueues;
67 uint64_t total_failed_enqueues;
70 typedef enum copy_mode_t {
71 #define COPY_MODE_SW "sw"
73 #define COPY_MODE_IOAT "hw"
75 COPY_MODE_INVALID_NUM,
76 COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM
79 /* mask of enabled ports */
80 static uint32_t ioat_enabled_port_mask;
82 /* number of RX queues per port */
83 static uint16_t nb_queues = 1;
85 /* MAC updating enabled by default. */
86 static int mac_updating = 1;
88 /* hardare copy mode enabled by default. */
89 static copy_mode_t copy_mode = COPY_MODE_IOAT_NUM;
91 /* size of IOAT rawdev ring for hardware copy mode or
92 * rte_ring for software copy mode
94 static unsigned short ring_size = 2048;
96 /* global transmission config */
97 struct rxtx_transmission_config cfg;
99 /* configurable number of RX/TX ring descriptors */
100 static uint16_t nb_rxd = RX_DEFAULT_RINGSIZE;
101 static uint16_t nb_txd = TX_DEFAULT_RINGSIZE;
103 static volatile bool force_quit;
105 /* ethernet addresses of ports */
106 static struct rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS];
108 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
109 struct rte_mempool *ioat_pktmbuf_pool;
111 /* Print out statistics for one port. */
113 print_port_stats(uint16_t port_id)
115 printf("\nStatistics for port %u ------------------------------"
116 "\nPackets sent: %34"PRIu64
117 "\nPackets received: %30"PRIu64
118 "\nPackets dropped on tx: %25"PRIu64
119 "\nPackets dropped on copy: %23"PRIu64,
121 port_statistics.tx[port_id],
122 port_statistics.rx[port_id],
123 port_statistics.tx_dropped[port_id],
124 port_statistics.copy_dropped[port_id]);
127 /* Print out statistics for one IOAT rawdev device. */
129 print_rawdev_stats(uint32_t dev_id, uint64_t *xstats,
130 unsigned int *ids_xstats, uint16_t nb_xstats,
131 struct rte_rawdev_xstats_name *names_xstats)
135 printf("\nIOAT channel %u", dev_id);
136 for (i = 0; i < nb_xstats; i++)
137 printf("\n\t %s: %*"PRIu64,
138 names_xstats[ids_xstats[i]].name,
139 (int)(37 - strlen(names_xstats[ids_xstats[i]].name)),
144 print_total_stats(struct total_statistics *ts)
146 printf("\nAggregate statistics ==============================="
147 "\nTotal packets Tx: %24"PRIu64" [pps]"
148 "\nTotal packets Rx: %24"PRIu64" [pps]"
149 "\nTotal packets dropped: %19"PRIu64" [pps]",
150 ts->total_packets_tx,
151 ts->total_packets_rx,
152 ts->total_packets_dropped);
154 if (copy_mode == COPY_MODE_IOAT_NUM) {
155 printf("\nTotal IOAT successful enqueues: %8"PRIu64" [enq/s]"
156 "\nTotal IOAT failed enqueues: %12"PRIu64" [enq/s]",
157 ts->total_successful_enqueues,
158 ts->total_failed_enqueues);
161 printf("\n====================================================\n");
164 /* Print out statistics on packets dropped. */
166 print_stats(char *prgname)
168 struct total_statistics ts, delta_ts;
169 uint32_t i, port_id, dev_id;
170 struct rte_rawdev_xstats_name *names_xstats;
172 unsigned int *ids_xstats, nb_xstats;
173 char status_string[255]; /* to print at the top of the output */
177 const char clr[] = { 27, '[', '2', 'J', '\0' };
178 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
180 status_strlen = snprintf(status_string, sizeof(status_string),
182 status_strlen += snprintf(status_string + status_strlen,
183 sizeof(status_string) - status_strlen,
184 "Worker Threads = %d, ",
185 rte_lcore_count() > 2 ? 2 : 1);
186 status_strlen += snprintf(status_string + status_strlen,
187 sizeof(status_string) - status_strlen,
188 "Copy Mode = %s,\n", copy_mode == COPY_MODE_SW_NUM ?
189 COPY_MODE_SW : COPY_MODE_IOAT);
190 status_strlen += snprintf(status_string + status_strlen,
191 sizeof(status_string) - status_strlen,
192 "Updating MAC = %s, ", mac_updating ?
193 "enabled" : "disabled");
194 status_strlen += snprintf(status_string + status_strlen,
195 sizeof(status_string) - status_strlen,
196 "Rx Queues = %d, ", nb_queues);
197 status_strlen += snprintf(status_string + status_strlen,
198 sizeof(status_string) - status_strlen,
199 "Ring Size = %d", ring_size);
201 /* Allocate memory for xstats names and values */
202 ret = rte_rawdev_xstats_names_get(
203 cfg.ports[0].ioat_ids[0], NULL, 0);
206 nb_xstats = (unsigned int)ret;
208 names_xstats = malloc(sizeof(*names_xstats) * nb_xstats);
209 if (names_xstats == NULL) {
210 rte_exit(EXIT_FAILURE,
211 "Error allocating xstat names memory\n");
213 rte_rawdev_xstats_names_get(cfg.ports[0].ioat_ids[0],
214 names_xstats, nb_xstats);
216 ids_xstats = malloc(sizeof(*ids_xstats) * 2);
217 if (ids_xstats == NULL) {
218 rte_exit(EXIT_FAILURE,
219 "Error allocating xstat ids_xstats memory\n");
222 xstats = malloc(sizeof(*xstats) * 2);
223 if (xstats == NULL) {
224 rte_exit(EXIT_FAILURE,
225 "Error allocating xstat memory\n");
228 /* Get failed/successful enqueues stats index */
229 ids_xstats[0] = ids_xstats[1] = nb_xstats;
230 for (i = 0; i < nb_xstats; i++) {
231 if (!strcmp(names_xstats[i].name, "failed_enqueues"))
233 else if (!strcmp(names_xstats[i].name, "successful_enqueues"))
235 if (ids_xstats[0] < nb_xstats && ids_xstats[1] < nb_xstats)
238 if (ids_xstats[0] == nb_xstats || ids_xstats[1] == nb_xstats) {
239 rte_exit(EXIT_FAILURE,
240 "Error getting failed/successful enqueues stats index\n");
243 memset(&ts, 0, sizeof(struct total_statistics));
245 while (!force_quit) {
246 /* Sleep for 1 second each round - init sleep allows reading
247 * messages from app startup.
251 /* Clear screen and move to top left */
252 printf("%s%s", clr, topLeft);
254 memset(&delta_ts, 0, sizeof(struct total_statistics));
256 printf("%s\n", status_string);
258 for (i = 0; i < cfg.nb_ports; i++) {
259 port_id = cfg.ports[i].rxtx_port;
260 print_port_stats(port_id);
262 delta_ts.total_packets_dropped +=
263 port_statistics.tx_dropped[port_id]
264 + port_statistics.copy_dropped[port_id];
265 delta_ts.total_packets_tx +=
266 port_statistics.tx[port_id];
267 delta_ts.total_packets_rx +=
268 port_statistics.rx[port_id];
270 if (copy_mode == COPY_MODE_IOAT_NUM) {
273 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
274 dev_id = cfg.ports[i].ioat_ids[j];
275 rte_rawdev_xstats_get(dev_id,
276 ids_xstats, xstats, 2);
278 print_rawdev_stats(dev_id, xstats,
279 ids_xstats, 2, names_xstats);
281 delta_ts.total_failed_enqueues +=
282 xstats[ids_xstats[0]];
283 delta_ts.total_successful_enqueues +=
284 xstats[ids_xstats[1]];
289 delta_ts.total_packets_tx -= ts.total_packets_tx;
290 delta_ts.total_packets_rx -= ts.total_packets_rx;
291 delta_ts.total_packets_dropped -= ts.total_packets_dropped;
292 delta_ts.total_failed_enqueues -= ts.total_failed_enqueues;
293 delta_ts.total_successful_enqueues -=
294 ts.total_successful_enqueues;
297 print_total_stats(&delta_ts);
301 ts.total_packets_tx += delta_ts.total_packets_tx;
302 ts.total_packets_rx += delta_ts.total_packets_rx;
303 ts.total_packets_dropped += delta_ts.total_packets_dropped;
304 ts.total_failed_enqueues += delta_ts.total_failed_enqueues;
305 ts.total_successful_enqueues +=
306 delta_ts.total_successful_enqueues;
315 update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid)
317 struct rte_ether_hdr *eth;
320 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
322 /* 02:00:00:00:00:xx - overwriting 2 bytes of source address but
323 * it's acceptable cause it gets overwritten by rte_ether_addr_copy
325 tmp = ð->dst_addr.addr_bytes[0];
326 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
329 rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->src_addr);
332 /* Perform packet copy there is a user-defined function. 8< */
334 pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
336 /* Copy packet metadata */
337 rte_memcpy(&dst->rearm_data,
339 offsetof(struct rte_mbuf, cacheline1)
340 - offsetof(struct rte_mbuf, rearm_data));
342 /* Copy packet data */
343 rte_memcpy(rte_pktmbuf_mtod(dst, char *),
344 rte_pktmbuf_mtod(src, char *), src->data_len);
346 /* >8 End of perform packet copy there is a user-defined function. */
349 ioat_enqueue_packets(struct rte_mbuf **pkts,
350 uint32_t nb_rx, uint16_t dev_id)
354 struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
356 const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
357 &pkts[0]->rearm_data);
359 ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
360 (void *)pkts_copy, nb_rx);
362 if (unlikely(ret < 0))
363 rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
365 for (i = 0; i < nb_rx; i++) {
366 /* Perform data copy */
367 ret = rte_ioat_enqueue_copy(dev_id,
368 pkts[i]->buf_iova - addr_offset,
369 pkts_copy[i]->buf_iova - addr_offset,
370 rte_pktmbuf_data_len(pkts[i]) + addr_offset,
372 (uintptr_t)pkts_copy[i]);
379 /* Free any not enqueued packets. */
380 rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
381 rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
387 /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */
389 ioat_rx_port(struct rxtx_port_config *rx_config)
391 uint32_t nb_rx, nb_enq, i, j;
392 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
394 for (i = 0; i < rx_config->nb_queues; i++) {
396 nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i,
397 pkts_burst, MAX_PKT_BURST);
402 port_statistics.rx[rx_config->rxtx_port] += nb_rx;
404 if (copy_mode == COPY_MODE_IOAT_NUM) {
405 /* Perform packet hardware copy */
406 nb_enq = ioat_enqueue_packets(pkts_burst,
407 nb_rx, rx_config->ioat_ids[i]);
409 rte_ioat_perform_ops(rx_config->ioat_ids[i]);
411 /* Perform packet software copy, free source packets */
413 struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
415 ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
416 (void *)pkts_burst_copy, nb_rx);
418 if (unlikely(ret < 0))
419 rte_exit(EXIT_FAILURE,
420 "Unable to allocate memory.\n");
422 for (j = 0; j < nb_rx; j++)
423 pktmbuf_sw_copy(pkts_burst[j],
426 rte_mempool_put_bulk(ioat_pktmbuf_pool,
427 (void *)pkts_burst, nb_rx);
429 nb_enq = rte_ring_enqueue_burst(
430 rx_config->rx_to_tx_ring,
431 (void *)pkts_burst_copy, nb_rx, NULL);
433 /* Free any not enqueued packets. */
434 rte_mempool_put_bulk(ioat_pktmbuf_pool,
435 (void *)&pkts_burst_copy[nb_enq],
439 port_statistics.copy_dropped[rx_config->rxtx_port] +=
443 /* >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
445 /* Transmit packets from IOAT rawdev/rte_ring for one port. 8< */
447 ioat_tx_port(struct rxtx_port_config *tx_config)
449 uint32_t i, j, nb_dq = 0;
450 struct rte_mbuf *mbufs_src[MAX_PKT_BURST];
451 struct rte_mbuf *mbufs_dst[MAX_PKT_BURST];
453 for (i = 0; i < tx_config->nb_queues; i++) {
454 if (copy_mode == COPY_MODE_IOAT_NUM) {
455 /* Dequeue the mbufs from IOAT device. Since all memory
456 * is DPDK pinned memory and therefore all addresses should
457 * be valid, we don't check for copy errors
459 nb_dq = rte_ioat_completed_ops(
460 tx_config->ioat_ids[i], MAX_PKT_BURST, NULL, NULL,
461 (void *)mbufs_src, (void *)mbufs_dst);
463 /* Dequeue the mbufs from rx_to_tx_ring. */
464 nb_dq = rte_ring_dequeue_burst(
465 tx_config->rx_to_tx_ring, (void *)mbufs_dst,
466 MAX_PKT_BURST, NULL);
469 if ((int32_t) nb_dq <= 0)
472 if (copy_mode == COPY_MODE_IOAT_NUM)
473 rte_mempool_put_bulk(ioat_pktmbuf_pool,
474 (void *)mbufs_src, nb_dq);
476 /* Update macs if enabled */
478 for (j = 0; j < nb_dq; j++)
479 update_mac_addrs(mbufs_dst[j],
480 tx_config->rxtx_port);
483 const uint16_t nb_tx = rte_eth_tx_burst(
484 tx_config->rxtx_port, 0,
485 (void *)mbufs_dst, nb_dq);
487 port_statistics.tx[tx_config->rxtx_port] += nb_tx;
489 /* Free any unsent packets. */
490 if (unlikely(nb_tx < nb_dq))
491 rte_mempool_put_bulk(ioat_pktmbuf_pool,
492 (void *)&mbufs_dst[nb_tx],
496 /* >8 End of transmitting packets from IOAT. */
498 /* Main rx processing loop for IOAT rawdev. */
503 uint16_t nb_ports = cfg.nb_ports;
505 RTE_LOG(INFO, IOAT, "Entering main rx loop for copy on lcore %u\n",
509 for (i = 0; i < nb_ports; i++)
510 ioat_rx_port(&cfg.ports[i]);
513 /* Main tx processing loop for hardware copy. */
518 uint16_t nb_ports = cfg.nb_ports;
520 RTE_LOG(INFO, IOAT, "Entering main tx loop for copy on lcore %u\n",
524 for (i = 0; i < nb_ports; i++)
525 ioat_tx_port(&cfg.ports[i]);
528 /* Main rx and tx loop if only one worker lcore available */
533 uint16_t nb_ports = cfg.nb_ports;
535 RTE_LOG(INFO, IOAT, "Entering main rx and tx loop for copy on"
536 " lcore %u\n", rte_lcore_id());
539 for (i = 0; i < nb_ports; i++) {
540 ioat_rx_port(&cfg.ports[i]);
541 ioat_tx_port(&cfg.ports[i]);
545 /* Start processing for each lcore. 8< */
546 static void start_forwarding_cores(void)
548 uint32_t lcore_id = rte_lcore_id();
550 RTE_LOG(INFO, IOAT, "Entering %s on lcore %u\n",
551 __func__, rte_lcore_id());
553 if (cfg.nb_lcores == 1) {
554 lcore_id = rte_get_next_lcore(lcore_id, true, true);
555 rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop,
557 } else if (cfg.nb_lcores > 1) {
558 lcore_id = rte_get_next_lcore(lcore_id, true, true);
559 rte_eal_remote_launch((lcore_function_t *)rx_main_loop,
562 lcore_id = rte_get_next_lcore(lcore_id, true, true);
563 rte_eal_remote_launch((lcore_function_t *)tx_main_loop, NULL,
567 /* >8 End of starting to processfor each lcore. */
571 ioat_usage(const char *prgname)
573 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
574 " -p --portmask: hexadecimal bitmask of ports to configure\n"
575 " -q NQ: number of RX queues per port (default is 1)\n"
576 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
578 " - The source MAC address is replaced by the TX port MAC address\n"
579 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
580 " -c --copy-type CT: type of copy: sw|hw\n"
581 " -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n",
586 ioat_parse_portmask(const char *portmask)
591 /* Parse hexadecimal string */
592 pm = strtoul(portmask, &end, 16);
593 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
600 ioat_parse_copy_mode(const char *copy_mode)
602 if (strcmp(copy_mode, COPY_MODE_SW) == 0)
603 return COPY_MODE_SW_NUM;
604 else if (strcmp(copy_mode, COPY_MODE_IOAT) == 0)
605 return COPY_MODE_IOAT_NUM;
607 return COPY_MODE_INVALID_NUM;
610 /* Parse the argument given in the command line of the application */
612 ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
614 static const char short_options[] =
616 "q:" /* number of RX queues per port */
617 "c:" /* copy type (sw|hw) */
621 static const struct option lgopts[] = {
622 {CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
623 {CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
624 {CMD_LINE_OPT_PORTMASK, required_argument, NULL, 'p'},
625 {CMD_LINE_OPT_NB_QUEUE, required_argument, NULL, 'q'},
626 {CMD_LINE_OPT_COPY_TYPE, required_argument, NULL, 'c'},
627 {CMD_LINE_OPT_RING_SIZE, required_argument, NULL, 's'},
631 const unsigned int default_port_mask = (1 << nb_ports) - 1;
635 char *prgname = argv[0];
637 ioat_enabled_port_mask = default_port_mask;
640 while ((opt = getopt_long(argc, argvopt, short_options,
641 lgopts, &option_index)) != EOF) {
646 ioat_enabled_port_mask = ioat_parse_portmask(optarg);
647 if (ioat_enabled_port_mask & ~default_port_mask ||
648 ioat_enabled_port_mask <= 0) {
649 printf("Invalid portmask, %s, suggest 0x%x\n",
650 optarg, default_port_mask);
657 nb_queues = atoi(optarg);
658 if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) {
659 printf("Invalid RX queues number %s. Max %u\n",
660 optarg, MAX_RX_QUEUES_COUNT);
667 copy_mode = ioat_parse_copy_mode(optarg);
668 if (copy_mode == COPY_MODE_INVALID_NUM) {
669 printf("Invalid copy type. Use: sw, hw\n");
676 ring_size = atoi(optarg);
677 if (ring_size == 0) {
678 printf("Invalid ring size, %s.\n", optarg);
694 printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
696 argv[optind - 1] = prgname;
699 optind = 1; /* reset getopt lib */
703 /* check link status, return true if at least one port is up */
705 check_link_status(uint32_t port_mask)
708 struct rte_eth_link link;
709 int ret, link_status = 0;
710 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
712 printf("\nChecking link status\n");
713 RTE_ETH_FOREACH_DEV(portid) {
714 if ((port_mask & (1 << portid)) == 0)
717 memset(&link, 0, sizeof(link));
718 ret = rte_eth_link_get(portid, &link);
720 printf("Port %u link get failed: err=%d\n",
725 /* Print link status */
726 rte_eth_link_to_str(link_status_text,
727 sizeof(link_status_text), &link);
728 printf("Port %d %s\n", portid, link_status_text);
730 if (link.link_status)
736 /* Configuration of device. 8< */
738 configure_rawdev_queue(uint32_t dev_id)
740 struct rte_ioat_rawdev_config dev_config = {
741 .ring_size = ring_size,
742 .no_prefetch_completions = (cfg.nb_lcores > 1),
744 struct rte_rawdev_info info = { .dev_private = &dev_config };
746 if (rte_rawdev_configure(dev_id, &info, sizeof(dev_config)) != 0) {
747 rte_exit(EXIT_FAILURE,
748 "Error with rte_rawdev_configure()\n");
750 if (rte_rawdev_start(dev_id) != 0) {
751 rte_exit(EXIT_FAILURE,
752 "Error with rte_rawdev_start()\n");
755 /* >8 End of configuration of device. */
757 /* Using IOAT rawdev API functions. 8< */
761 uint16_t nb_rawdev = 0, rdev_id = 0;
764 for (i = 0; i < cfg.nb_ports; i++) {
765 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
766 struct rte_rawdev_info rdev_info = { 0 };
769 if (rdev_id == rte_rawdev_count())
771 rte_rawdev_info_get(rdev_id++, &rdev_info, 0);
772 } while (rdev_info.driver_name == NULL ||
773 strcmp(rdev_info.driver_name,
774 IOAT_PMD_RAWDEV_NAME_STR) != 0);
776 cfg.ports[i].ioat_ids[j] = rdev_id - 1;
777 configure_rawdev_queue(cfg.ports[i].ioat_ids[j]);
782 if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues)
783 rte_exit(EXIT_FAILURE,
784 "Not enough IOAT rawdevs (%u) for all queues (%u).\n",
785 nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
786 RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
788 /* >8 End of using IOAT rawdev API functions. */
790 /* Assign ring structures for packet exchanging. 8< */
796 for (i = 0; i < cfg.nb_ports; i++) {
797 char ring_name[RTE_RING_NAMESIZE];
799 snprintf(ring_name, sizeof(ring_name), "rx_to_tx_ring_%u", i);
800 /* Create ring for inter core communication */
801 cfg.ports[i].rx_to_tx_ring = rte_ring_create(
802 ring_name, ring_size,
803 rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
805 if (cfg.ports[i].rx_to_tx_ring == NULL)
806 rte_exit(EXIT_FAILURE, "Ring create failed: %s\n",
807 rte_strerror(rte_errno));
810 /* >8 End of assigning ring structures for packet exchanging. */
813 * Initializes a given port using global settings and with the RX buffers
814 * coming from the mbuf_pool passed as a parameter.
817 port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
819 /* Configuring port to use RSS for multiple RX queues. 8< */
820 static const struct rte_eth_conf port_conf = {
822 .mq_mode = ETH_MQ_RX_RSS,
823 .max_rx_pkt_len = RTE_ETHER_MAX_LEN
828 .rss_hf = ETH_RSS_PROTO_MASK,
832 /* >8 End of configuring port to use RSS for multiple RX queues. */
834 struct rte_eth_rxconf rxq_conf;
835 struct rte_eth_txconf txq_conf;
836 struct rte_eth_conf local_port_conf = port_conf;
837 struct rte_eth_dev_info dev_info;
840 /* Skip ports that are not enabled */
841 if ((ioat_enabled_port_mask & (1 << portid)) == 0) {
842 printf("Skipping disabled port %u\n", portid);
847 printf("Initializing port %u... ", portid);
849 ret = rte_eth_dev_info_get(portid, &dev_info);
851 rte_exit(EXIT_FAILURE, "Cannot get device info: %s, port=%u\n",
852 rte_strerror(-ret), portid);
854 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
855 dev_info.flow_type_rss_offloads;
856 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
857 local_port_conf.txmode.offloads |=
858 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
859 ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
861 rte_exit(EXIT_FAILURE, "Cannot configure device:"
862 " err=%d, port=%u\n", ret, portid);
864 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
867 rte_exit(EXIT_FAILURE,
868 "Cannot adjust number of descriptors: err=%d, port=%u\n",
871 rte_eth_macaddr_get(portid, &ioat_ports_eth_addr[portid]);
874 rxq_conf = dev_info.default_rxconf;
875 rxq_conf.offloads = local_port_conf.rxmode.offloads;
876 for (i = 0; i < nb_queues; i++) {
877 ret = rte_eth_rx_queue_setup(portid, i, nb_rxd,
878 rte_eth_dev_socket_id(portid), &rxq_conf,
881 rte_exit(EXIT_FAILURE,
882 "rte_eth_rx_queue_setup:err=%d,port=%u, queue_id=%u\n",
886 /* Init one TX queue on each port */
887 txq_conf = dev_info.default_txconf;
888 txq_conf.offloads = local_port_conf.txmode.offloads;
889 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
890 rte_eth_dev_socket_id(portid),
893 rte_exit(EXIT_FAILURE,
894 "rte_eth_tx_queue_setup:err=%d,port=%u\n",
897 /* Initialize TX buffers */
898 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
899 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
900 rte_eth_dev_socket_id(portid));
901 if (tx_buffer[portid] == NULL)
902 rte_exit(EXIT_FAILURE,
903 "Cannot allocate buffer for tx on port %u\n",
906 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
908 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
909 rte_eth_tx_buffer_count_callback,
910 &port_statistics.tx_dropped[portid]);
912 rte_exit(EXIT_FAILURE,
913 "Cannot set error callback for tx buffer on port %u\n",
916 /* Start device. 8< */
917 ret = rte_eth_dev_start(portid);
919 rte_exit(EXIT_FAILURE,
920 "rte_eth_dev_start:err=%d, port=%u\n",
922 /* >8 End of starting device. */
924 /* RX port is set in promiscuous mode. 8< */
925 rte_eth_promiscuous_enable(portid);
926 /* >8 End of RX port is set in promiscuous mode. */
928 printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
930 RTE_ETHER_ADDR_BYTES(&ioat_ports_eth_addr[portid]));
932 cfg.ports[cfg.nb_ports].rxtx_port = portid;
933 cfg.ports[cfg.nb_ports++].nb_queues = nb_queues;
937 signal_handler(int signum)
939 if (signum == SIGINT || signum == SIGTERM) {
940 printf("\n\nSignal %d received, preparing to exit...\n",
947 main(int argc, char **argv)
950 uint16_t nb_ports, portid;
952 unsigned int nb_mbufs;
955 ret = rte_eal_init(argc, argv);
957 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
958 /* >8 End of init EAL. */
963 signal(SIGINT, signal_handler);
964 signal(SIGTERM, signal_handler);
966 nb_ports = rte_eth_dev_count_avail();
968 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
970 /* Parse application arguments (after the EAL ones) */
971 ret = ioat_parse_args(argc, argv, nb_ports);
973 rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n");
975 /* Allocates mempool to hold the mbufs. 8< */
976 nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
977 4 * MAX_PKT_BURST) + rte_lcore_count() * MEMPOOL_CACHE_SIZE),
980 /* Create the mbuf pool */
981 ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
982 MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
984 if (ioat_pktmbuf_pool == NULL)
985 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
986 /* >8 End of allocates mempool to hold the mbufs. */
988 /* Initialize each port. 8< */
990 RTE_ETH_FOREACH_DEV(portid)
991 port_init(portid, ioat_pktmbuf_pool, nb_queues);
992 /* >8 End of initializing each port. */
994 /* Initialize port xstats */
995 memset(&port_statistics, 0, sizeof(port_statistics));
997 /* Assigning each port resources. 8< */
998 while (!check_link_status(ioat_enabled_port_mask) && !force_quit)
1001 /* Check if there is enough lcores for all ports. */
1002 cfg.nb_lcores = rte_lcore_count() - 1;
1003 if (cfg.nb_lcores < 1)
1004 rte_exit(EXIT_FAILURE,
1005 "There should be at least one worker lcore.\n");
1007 if (copy_mode == COPY_MODE_IOAT_NUM)
1009 else /* copy_mode == COPY_MODE_SW_NUM */
1011 /* >8 End of assigning each port resources. */
1013 start_forwarding_cores();
1014 /* main core prints stats while other cores forward */
1015 print_stats(argv[0]);
1017 /* force_quit is true when we get here */
1018 rte_eal_mp_wait_lcore();
1021 for (i = 0; i < cfg.nb_ports; i++) {
1022 printf("Closing port %d\n", cfg.ports[i].rxtx_port);
1023 ret = rte_eth_dev_stop(cfg.ports[i].rxtx_port);
1025 RTE_LOG(ERR, IOAT, "rte_eth_dev_stop: err=%s, port=%u\n",
1026 rte_strerror(-ret), cfg.ports[i].rxtx_port);
1028 rte_eth_dev_close(cfg.ports[i].rxtx_port);
1029 if (copy_mode == COPY_MODE_IOAT_NUM) {
1030 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
1031 printf("Stopping rawdev %d\n",
1032 cfg.ports[i].ioat_ids[j]);
1033 rte_rawdev_stop(cfg.ports[i].ioat_ids[j]);
1035 } else /* copy_mode == COPY_MODE_SW_NUM */
1036 rte_ring_free(cfg.ports[i].rx_to_tx_ring);
1039 /* clean up the EAL */