1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
11 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_rawdev.h>
14 #include <rte_ioat_rawdev.h>
16 /* size of ring used for software copying between rx and tx. */
17 #define RTE_LOGTYPE_IOAT RTE_LOGTYPE_USER1
18 #define MAX_PKT_BURST 32
19 #define MEMPOOL_CACHE_SIZE 512
20 #define MIN_POOL_SIZE 65536U
21 #define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
22 #define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
23 #define CMD_LINE_OPT_PORTMASK "portmask"
24 #define CMD_LINE_OPT_NB_QUEUE "nb-queue"
25 #define CMD_LINE_OPT_COPY_TYPE "copy-type"
26 #define CMD_LINE_OPT_RING_SIZE "ring-size"
27 #define CMD_LINE_OPT_BATCH_SIZE "dma-batch-size"
29 /* configurable number of RX/TX ring descriptors */
30 #define RX_DEFAULT_RINGSIZE 1024
31 #define TX_DEFAULT_RINGSIZE 1024
33 /* max number of RX queues per port */
34 #define MAX_RX_QUEUES_COUNT 8
36 struct rxtx_port_config {
40 /* for software copy mode */
41 struct rte_ring *rx_to_tx_ring;
42 /* for IOAT rawdev copy mode */
43 uint16_t ioat_ids[MAX_RX_QUEUES_COUNT];
46 /* Configuring ports and number of assigned lcores in struct. 8< */
47 struct rxtx_transmission_config {
48 struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
52 /* >8 End of configuration of ports and number of assigned lcores. */
54 /* per-port statistics struct */
55 struct ioat_port_statistics {
56 uint64_t rx[RTE_MAX_ETHPORTS];
57 uint64_t tx[RTE_MAX_ETHPORTS];
58 uint64_t tx_dropped[RTE_MAX_ETHPORTS];
59 uint64_t copy_dropped[RTE_MAX_ETHPORTS];
61 struct ioat_port_statistics port_statistics;
63 struct total_statistics {
64 uint64_t total_packets_dropped;
65 uint64_t total_packets_tx;
66 uint64_t total_packets_rx;
67 uint64_t total_successful_enqueues;
68 uint64_t total_failed_enqueues;
71 typedef enum copy_mode_t {
72 #define COPY_MODE_SW "sw"
74 #define COPY_MODE_IOAT "hw"
76 COPY_MODE_INVALID_NUM,
77 COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM
80 /* mask of enabled ports */
81 static uint32_t ioat_enabled_port_mask;
83 /* number of RX queues per port */
84 static uint16_t nb_queues = 1;
86 /* MAC updating enabled by default. */
87 static int mac_updating = 1;
89 /* hardare copy mode enabled by default. */
90 static copy_mode_t copy_mode = COPY_MODE_IOAT_NUM;
92 /* size of IOAT rawdev ring for hardware copy mode or
93 * rte_ring for software copy mode
95 static unsigned short ring_size = 2048;
97 /* global transmission config */
98 struct rxtx_transmission_config cfg;
100 /* configurable number of RX/TX ring descriptors */
101 static uint16_t nb_rxd = RX_DEFAULT_RINGSIZE;
102 static uint16_t nb_txd = TX_DEFAULT_RINGSIZE;
104 static volatile bool force_quit;
106 static uint32_t ioat_batch_sz = MAX_PKT_BURST;
108 /* ethernet addresses of ports */
109 static struct rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
112 struct rte_mempool *ioat_pktmbuf_pool;
114 /* Print out statistics for one port. */
116 print_port_stats(uint16_t port_id)
118 printf("\nStatistics for port %u ------------------------------"
119 "\nPackets sent: %34"PRIu64
120 "\nPackets received: %30"PRIu64
121 "\nPackets dropped on tx: %25"PRIu64
122 "\nPackets dropped on copy: %23"PRIu64,
124 port_statistics.tx[port_id],
125 port_statistics.rx[port_id],
126 port_statistics.tx_dropped[port_id],
127 port_statistics.copy_dropped[port_id]);
130 /* Print out statistics for one IOAT rawdev device. */
132 print_rawdev_stats(uint32_t dev_id, uint64_t *xstats,
133 unsigned int *ids_xstats, uint16_t nb_xstats,
134 struct rte_rawdev_xstats_name *names_xstats)
138 printf("\nIOAT channel %u", dev_id);
139 for (i = 0; i < nb_xstats; i++)
140 printf("\n\t %s: %*"PRIu64,
141 names_xstats[ids_xstats[i]].name,
142 (int)(37 - strlen(names_xstats[ids_xstats[i]].name)),
147 print_total_stats(struct total_statistics *ts)
149 printf("\nAggregate statistics ==============================="
150 "\nTotal packets Tx: %24"PRIu64" [pps]"
151 "\nTotal packets Rx: %24"PRIu64" [pps]"
152 "\nTotal packets dropped: %19"PRIu64" [pps]",
153 ts->total_packets_tx,
154 ts->total_packets_rx,
155 ts->total_packets_dropped);
157 if (copy_mode == COPY_MODE_IOAT_NUM) {
158 printf("\nTotal IOAT successful enqueues: %8"PRIu64" [enq/s]"
159 "\nTotal IOAT failed enqueues: %12"PRIu64" [enq/s]",
160 ts->total_successful_enqueues,
161 ts->total_failed_enqueues);
164 printf("\n====================================================\n");
167 /* Print out statistics on packets dropped. */
169 print_stats(char *prgname)
171 struct total_statistics ts, delta_ts;
172 uint32_t i, port_id, dev_id;
173 struct rte_rawdev_xstats_name *names_xstats;
175 unsigned int *ids_xstats, nb_xstats;
176 char status_string[255]; /* to print at the top of the output */
180 const char clr[] = { 27, '[', '2', 'J', '\0' };
181 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
183 status_strlen = snprintf(status_string, sizeof(status_string),
185 status_strlen += snprintf(status_string + status_strlen,
186 sizeof(status_string) - status_strlen,
187 "Worker Threads = %d, ",
188 rte_lcore_count() > 2 ? 2 : 1);
189 status_strlen += snprintf(status_string + status_strlen,
190 sizeof(status_string) - status_strlen,
191 "Copy Mode = %s,\n", copy_mode == COPY_MODE_SW_NUM ?
192 COPY_MODE_SW : COPY_MODE_IOAT);
193 status_strlen += snprintf(status_string + status_strlen,
194 sizeof(status_string) - status_strlen,
195 "Updating MAC = %s, ", mac_updating ?
196 "enabled" : "disabled");
197 status_strlen += snprintf(status_string + status_strlen,
198 sizeof(status_string) - status_strlen,
199 "Rx Queues = %d, ", nb_queues);
200 status_strlen += snprintf(status_string + status_strlen,
201 sizeof(status_string) - status_strlen,
202 "Ring Size = %d", ring_size);
204 /* Allocate memory for xstats names and values */
205 ret = rte_rawdev_xstats_names_get(
206 cfg.ports[0].ioat_ids[0], NULL, 0);
209 nb_xstats = (unsigned int)ret;
211 names_xstats = malloc(sizeof(*names_xstats) * nb_xstats);
212 if (names_xstats == NULL) {
213 rte_exit(EXIT_FAILURE,
214 "Error allocating xstat names memory\n");
216 rte_rawdev_xstats_names_get(cfg.ports[0].ioat_ids[0],
217 names_xstats, nb_xstats);
219 ids_xstats = malloc(sizeof(*ids_xstats) * 2);
220 if (ids_xstats == NULL) {
221 rte_exit(EXIT_FAILURE,
222 "Error allocating xstat ids_xstats memory\n");
225 xstats = malloc(sizeof(*xstats) * 2);
226 if (xstats == NULL) {
227 rte_exit(EXIT_FAILURE,
228 "Error allocating xstat memory\n");
231 /* Get failed/successful enqueues stats index */
232 ids_xstats[0] = ids_xstats[1] = nb_xstats;
233 for (i = 0; i < nb_xstats; i++) {
234 if (!strcmp(names_xstats[i].name, "failed_enqueues"))
236 else if (!strcmp(names_xstats[i].name, "successful_enqueues"))
238 if (ids_xstats[0] < nb_xstats && ids_xstats[1] < nb_xstats)
241 if (ids_xstats[0] == nb_xstats || ids_xstats[1] == nb_xstats) {
242 rte_exit(EXIT_FAILURE,
243 "Error getting failed/successful enqueues stats index\n");
246 memset(&ts, 0, sizeof(struct total_statistics));
248 while (!force_quit) {
249 /* Sleep for 1 second each round - init sleep allows reading
250 * messages from app startup.
254 /* Clear screen and move to top left */
255 printf("%s%s", clr, topLeft);
257 memset(&delta_ts, 0, sizeof(struct total_statistics));
259 printf("%s\n", status_string);
261 for (i = 0; i < cfg.nb_ports; i++) {
262 port_id = cfg.ports[i].rxtx_port;
263 print_port_stats(port_id);
265 delta_ts.total_packets_dropped +=
266 port_statistics.tx_dropped[port_id]
267 + port_statistics.copy_dropped[port_id];
268 delta_ts.total_packets_tx +=
269 port_statistics.tx[port_id];
270 delta_ts.total_packets_rx +=
271 port_statistics.rx[port_id];
273 if (copy_mode == COPY_MODE_IOAT_NUM) {
276 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
277 dev_id = cfg.ports[i].ioat_ids[j];
278 rte_rawdev_xstats_get(dev_id,
279 ids_xstats, xstats, 2);
281 print_rawdev_stats(dev_id, xstats,
282 ids_xstats, 2, names_xstats);
284 delta_ts.total_failed_enqueues +=
285 xstats[ids_xstats[0]];
286 delta_ts.total_successful_enqueues +=
287 xstats[ids_xstats[1]];
292 delta_ts.total_packets_tx -= ts.total_packets_tx;
293 delta_ts.total_packets_rx -= ts.total_packets_rx;
294 delta_ts.total_packets_dropped -= ts.total_packets_dropped;
295 delta_ts.total_failed_enqueues -= ts.total_failed_enqueues;
296 delta_ts.total_successful_enqueues -=
297 ts.total_successful_enqueues;
300 print_total_stats(&delta_ts);
304 ts.total_packets_tx += delta_ts.total_packets_tx;
305 ts.total_packets_rx += delta_ts.total_packets_rx;
306 ts.total_packets_dropped += delta_ts.total_packets_dropped;
307 ts.total_failed_enqueues += delta_ts.total_failed_enqueues;
308 ts.total_successful_enqueues +=
309 delta_ts.total_successful_enqueues;
318 update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid)
320 struct rte_ether_hdr *eth;
323 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
325 /* 02:00:00:00:00:xx - overwriting 2 bytes of source address but
326 * it's acceptable cause it gets overwritten by rte_ether_addr_copy
328 tmp = ð->dst_addr.addr_bytes[0];
329 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
332 rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->src_addr);
335 /* Perform packet copy there is a user-defined function. 8< */
337 pktmbuf_metadata_copy(const struct rte_mbuf *src, struct rte_mbuf *dst)
339 dst->data_off = src->data_off;
340 memcpy(&dst->rx_descriptor_fields1, &src->rx_descriptor_fields1,
341 offsetof(struct rte_mbuf, buf_len) -
342 offsetof(struct rte_mbuf, rx_descriptor_fields1));
345 /* Copy packet data */
347 pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
349 rte_memcpy(rte_pktmbuf_mtod(dst, char *),
350 rte_pktmbuf_mtod(src, char *), src->data_len);
352 /* >8 End of perform packet copy there is a user-defined function. */
355 ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
356 uint32_t nb_rx, uint16_t dev_id)
361 for (i = 0; i < nb_rx; i++) {
362 /* Perform data copy */
363 ret = rte_ioat_enqueue_copy(dev_id,
364 rte_pktmbuf_iova(pkts[i]),
365 rte_pktmbuf_iova(pkts_copy[i]),
366 rte_pktmbuf_data_len(pkts[i]),
368 (uintptr_t)pkts_copy[i]);
378 static inline uint32_t
379 ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
380 uint32_t num, uint32_t step, uint16_t dev_id)
385 for (i = 0; i < num; i += m) {
387 m = RTE_MIN(step, num - i);
388 n = ioat_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id);
391 rte_ioat_perform_ops(dev_id);
393 /* don't try to enqueue more if HW queue is full */
401 static inline uint32_t
402 ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
406 /* Dequeue the mbufs from IOAT device. Since all memory
407 * is DPDK pinned memory and therefore all addresses should
408 * be valid, we don't check for copy errors
410 rc = rte_ioat_completed_ops(dev_id, num, NULL, NULL,
411 (void *)src, (void *)dst);
414 "rte_ioat_completed_ops(%hu) failedi, error: %d\n",
421 /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */
423 ioat_rx_port(struct rxtx_port_config *rx_config)
426 uint32_t nb_rx, nb_enq, i, j;
427 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
428 struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
430 for (i = 0; i < rx_config->nb_queues; i++) {
432 nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i,
433 pkts_burst, MAX_PKT_BURST);
438 port_statistics.rx[rx_config->rxtx_port] += nb_rx;
440 ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
441 (void *)pkts_burst_copy, nb_rx);
443 if (unlikely(ret < 0))
444 rte_exit(EXIT_FAILURE,
445 "Unable to allocate memory.\n");
447 for (j = 0; j < nb_rx; j++)
448 pktmbuf_metadata_copy(pkts_burst[j],
451 if (copy_mode == COPY_MODE_IOAT_NUM) {
453 /* enqueue packets for hardware copy */
454 nb_enq = ioat_enqueue(pkts_burst, pkts_burst_copy,
455 nb_rx, ioat_batch_sz, rx_config->ioat_ids[i]);
457 /* free any not enqueued packets. */
458 rte_mempool_put_bulk(ioat_pktmbuf_pool,
459 (void *)&pkts_burst[nb_enq],
461 rte_mempool_put_bulk(ioat_pktmbuf_pool,
462 (void *)&pkts_burst_copy[nb_enq],
465 port_statistics.copy_dropped[rx_config->rxtx_port] +=
468 /* get completed copies */
469 nb_rx = ioat_dequeue(pkts_burst, pkts_burst_copy,
470 MAX_PKT_BURST, rx_config->ioat_ids[i]);
472 /* Perform packet software copy, free source packets */
473 for (j = 0; j < nb_rx; j++)
474 pktmbuf_sw_copy(pkts_burst[j],
478 rte_mempool_put_bulk(ioat_pktmbuf_pool,
479 (void *)pkts_burst, nb_rx);
481 nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring,
482 (void *)pkts_burst_copy, nb_rx, NULL);
484 /* Free any not enqueued packets. */
485 rte_mempool_put_bulk(ioat_pktmbuf_pool,
486 (void *)&pkts_burst_copy[nb_enq],
489 port_statistics.copy_dropped[rx_config->rxtx_port] +=
493 /* >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
495 /* Transmit packets from IOAT rawdev/rte_ring for one port. 8< */
497 ioat_tx_port(struct rxtx_port_config *tx_config)
499 uint32_t i, j, nb_dq, nb_tx;
500 struct rte_mbuf *mbufs[MAX_PKT_BURST];
502 for (i = 0; i < tx_config->nb_queues; i++) {
504 /* Dequeue the mbufs from rx_to_tx_ring. */
505 nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring,
506 (void *)mbufs, MAX_PKT_BURST, NULL);
510 /* Update macs if enabled */
512 for (j = 0; j < nb_dq; j++)
513 update_mac_addrs(mbufs[j],
514 tx_config->rxtx_port);
517 nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, 0,
518 (void *)mbufs, nb_dq);
520 port_statistics.tx[tx_config->rxtx_port] += nb_tx;
522 /* Free any unsent packets. */
523 if (unlikely(nb_tx < nb_dq))
524 rte_mempool_put_bulk(ioat_pktmbuf_pool,
525 (void *)&mbufs[nb_tx], nb_dq - nb_tx);
528 /* >8 End of transmitting packets from IOAT. */
530 /* Main rx processing loop for IOAT rawdev. */
535 uint16_t nb_ports = cfg.nb_ports;
537 RTE_LOG(INFO, IOAT, "Entering main rx loop for copy on lcore %u\n",
541 for (i = 0; i < nb_ports; i++)
542 ioat_rx_port(&cfg.ports[i]);
545 /* Main tx processing loop for hardware copy. */
550 uint16_t nb_ports = cfg.nb_ports;
552 RTE_LOG(INFO, IOAT, "Entering main tx loop for copy on lcore %u\n",
556 for (i = 0; i < nb_ports; i++)
557 ioat_tx_port(&cfg.ports[i]);
560 /* Main rx and tx loop if only one worker lcore available */
565 uint16_t nb_ports = cfg.nb_ports;
567 RTE_LOG(INFO, IOAT, "Entering main rx and tx loop for copy on"
568 " lcore %u\n", rte_lcore_id());
571 for (i = 0; i < nb_ports; i++) {
572 ioat_rx_port(&cfg.ports[i]);
573 ioat_tx_port(&cfg.ports[i]);
577 /* Start processing for each lcore. 8< */
578 static void start_forwarding_cores(void)
580 uint32_t lcore_id = rte_lcore_id();
582 RTE_LOG(INFO, IOAT, "Entering %s on lcore %u\n",
583 __func__, rte_lcore_id());
585 if (cfg.nb_lcores == 1) {
586 lcore_id = rte_get_next_lcore(lcore_id, true, true);
587 rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop,
589 } else if (cfg.nb_lcores > 1) {
590 lcore_id = rte_get_next_lcore(lcore_id, true, true);
591 rte_eal_remote_launch((lcore_function_t *)rx_main_loop,
594 lcore_id = rte_get_next_lcore(lcore_id, true, true);
595 rte_eal_remote_launch((lcore_function_t *)tx_main_loop, NULL,
599 /* >8 End of starting to processfor each lcore. */
603 ioat_usage(const char *prgname)
605 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
606 " -b --dma-batch-size: number of requests per DMA batch\n"
607 " -p --portmask: hexadecimal bitmask of ports to configure\n"
608 " -q NQ: number of RX queues per port (default is 1)\n"
609 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
611 " - The source MAC address is replaced by the TX port MAC address\n"
612 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
613 " -c --copy-type CT: type of copy: sw|hw\n"
614 " -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n",
619 ioat_parse_portmask(const char *portmask)
624 /* Parse hexadecimal string */
625 pm = strtoul(portmask, &end, 16);
626 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
633 ioat_parse_copy_mode(const char *copy_mode)
635 if (strcmp(copy_mode, COPY_MODE_SW) == 0)
636 return COPY_MODE_SW_NUM;
637 else if (strcmp(copy_mode, COPY_MODE_IOAT) == 0)
638 return COPY_MODE_IOAT_NUM;
640 return COPY_MODE_INVALID_NUM;
643 /* Parse the argument given in the command line of the application */
645 ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
647 static const char short_options[] =
648 "b:" /* dma batch size */
649 "c:" /* copy type (sw|hw) */
651 "q:" /* number of RX queues per port */
655 static const struct option lgopts[] = {
656 {CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
657 {CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
658 {CMD_LINE_OPT_PORTMASK, required_argument, NULL, 'p'},
659 {CMD_LINE_OPT_NB_QUEUE, required_argument, NULL, 'q'},
660 {CMD_LINE_OPT_COPY_TYPE, required_argument, NULL, 'c'},
661 {CMD_LINE_OPT_RING_SIZE, required_argument, NULL, 's'},
662 {CMD_LINE_OPT_BATCH_SIZE, required_argument, NULL, 'b'},
666 const unsigned int default_port_mask = (1 << nb_ports) - 1;
670 char *prgname = argv[0];
672 ioat_enabled_port_mask = default_port_mask;
675 while ((opt = getopt_long(argc, argvopt, short_options,
676 lgopts, &option_index)) != EOF) {
680 ioat_batch_sz = atoi(optarg);
681 if (ioat_batch_sz > MAX_PKT_BURST) {
682 printf("Invalid dma batch size, %s.\n", optarg);
689 ioat_enabled_port_mask = ioat_parse_portmask(optarg);
690 if (ioat_enabled_port_mask & ~default_port_mask ||
691 ioat_enabled_port_mask <= 0) {
692 printf("Invalid portmask, %s, suggest 0x%x\n",
693 optarg, default_port_mask);
700 nb_queues = atoi(optarg);
701 if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) {
702 printf("Invalid RX queues number %s. Max %u\n",
703 optarg, MAX_RX_QUEUES_COUNT);
710 copy_mode = ioat_parse_copy_mode(optarg);
711 if (copy_mode == COPY_MODE_INVALID_NUM) {
712 printf("Invalid copy type. Use: sw, hw\n");
719 ring_size = atoi(optarg);
720 if (ring_size == 0) {
721 printf("Invalid ring size, %s.\n", optarg);
737 printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
739 argv[optind - 1] = prgname;
742 optind = 1; /* reset getopt lib */
746 /* check link status, return true if at least one port is up */
748 check_link_status(uint32_t port_mask)
751 struct rte_eth_link link;
752 int ret, link_status = 0;
753 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
755 printf("\nChecking link status\n");
756 RTE_ETH_FOREACH_DEV(portid) {
757 if ((port_mask & (1 << portid)) == 0)
760 memset(&link, 0, sizeof(link));
761 ret = rte_eth_link_get(portid, &link);
763 printf("Port %u link get failed: err=%d\n",
768 /* Print link status */
769 rte_eth_link_to_str(link_status_text,
770 sizeof(link_status_text), &link);
771 printf("Port %d %s\n", portid, link_status_text);
773 if (link.link_status)
779 /* Configuration of device. 8< */
781 configure_rawdev_queue(uint32_t dev_id)
783 struct rte_ioat_rawdev_config dev_config = {
784 .ring_size = ring_size,
785 .no_prefetch_completions = (cfg.nb_lcores > 1),
787 struct rte_rawdev_info info = { .dev_private = &dev_config };
789 if (rte_rawdev_configure(dev_id, &info, sizeof(dev_config)) != 0) {
790 rte_exit(EXIT_FAILURE,
791 "Error with rte_rawdev_configure()\n");
793 if (rte_rawdev_start(dev_id) != 0) {
794 rte_exit(EXIT_FAILURE,
795 "Error with rte_rawdev_start()\n");
798 /* >8 End of configuration of device. */
800 /* Using IOAT rawdev API functions. 8< */
804 uint16_t nb_rawdev = 0, rdev_id = 0;
807 for (i = 0; i < cfg.nb_ports; i++) {
808 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
809 struct rte_rawdev_info rdev_info = { 0 };
812 if (rdev_id == rte_rawdev_count())
814 rte_rawdev_info_get(rdev_id++, &rdev_info, 0);
815 } while (rdev_info.driver_name == NULL ||
816 strcmp(rdev_info.driver_name,
817 IOAT_PMD_RAWDEV_NAME_STR) != 0);
819 cfg.ports[i].ioat_ids[j] = rdev_id - 1;
820 configure_rawdev_queue(cfg.ports[i].ioat_ids[j]);
825 if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues)
826 rte_exit(EXIT_FAILURE,
827 "Not enough IOAT rawdevs (%u) for all queues (%u).\n",
828 nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
829 RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
831 /* >8 End of using IOAT rawdev API functions. */
833 /* Assign ring structures for packet exchanging. 8< */
839 for (i = 0; i < cfg.nb_ports; i++) {
840 char ring_name[RTE_RING_NAMESIZE];
842 snprintf(ring_name, sizeof(ring_name), "rx_to_tx_ring_%u", i);
843 /* Create ring for inter core communication */
844 cfg.ports[i].rx_to_tx_ring = rte_ring_create(
845 ring_name, ring_size,
846 rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
848 if (cfg.ports[i].rx_to_tx_ring == NULL)
849 rte_exit(EXIT_FAILURE, "Ring create failed: %s\n",
850 rte_strerror(rte_errno));
853 /* >8 End of assigning ring structures for packet exchanging. */
856 * Initializes a given port using global settings and with the RX buffers
857 * coming from the mbuf_pool passed as a parameter.
860 port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
862 /* Configuring port to use RSS for multiple RX queues. 8< */
863 static const struct rte_eth_conf port_conf = {
865 .mq_mode = RTE_ETH_MQ_RX_RSS,
870 .rss_hf = RTE_ETH_RSS_PROTO_MASK,
874 /* >8 End of configuring port to use RSS for multiple RX queues. */
876 struct rte_eth_rxconf rxq_conf;
877 struct rte_eth_txconf txq_conf;
878 struct rte_eth_conf local_port_conf = port_conf;
879 struct rte_eth_dev_info dev_info;
882 /* Skip ports that are not enabled */
883 if ((ioat_enabled_port_mask & (1 << portid)) == 0) {
884 printf("Skipping disabled port %u\n", portid);
889 printf("Initializing port %u... ", portid);
891 ret = rte_eth_dev_info_get(portid, &dev_info);
893 rte_exit(EXIT_FAILURE, "Cannot get device info: %s, port=%u\n",
894 rte_strerror(-ret), portid);
896 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
897 dev_info.flow_type_rss_offloads;
898 ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
900 rte_exit(EXIT_FAILURE, "Cannot configure device:"
901 " err=%d, port=%u\n", ret, portid);
903 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
906 rte_exit(EXIT_FAILURE,
907 "Cannot adjust number of descriptors: err=%d, port=%u\n",
910 rte_eth_macaddr_get(portid, &ioat_ports_eth_addr[portid]);
913 rxq_conf = dev_info.default_rxconf;
914 rxq_conf.offloads = local_port_conf.rxmode.offloads;
915 for (i = 0; i < nb_queues; i++) {
916 ret = rte_eth_rx_queue_setup(portid, i, nb_rxd,
917 rte_eth_dev_socket_id(portid), &rxq_conf,
920 rte_exit(EXIT_FAILURE,
921 "rte_eth_rx_queue_setup:err=%d,port=%u, queue_id=%u\n",
925 /* Init one TX queue on each port */
926 txq_conf = dev_info.default_txconf;
927 txq_conf.offloads = local_port_conf.txmode.offloads;
928 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
929 rte_eth_dev_socket_id(portid),
932 rte_exit(EXIT_FAILURE,
933 "rte_eth_tx_queue_setup:err=%d,port=%u\n",
936 /* Initialize TX buffers */
937 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
938 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
939 rte_eth_dev_socket_id(portid));
940 if (tx_buffer[portid] == NULL)
941 rte_exit(EXIT_FAILURE,
942 "Cannot allocate buffer for tx on port %u\n",
945 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
947 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
948 rte_eth_tx_buffer_count_callback,
949 &port_statistics.tx_dropped[portid]);
951 rte_exit(EXIT_FAILURE,
952 "Cannot set error callback for tx buffer on port %u\n",
955 /* Start device. 8< */
956 ret = rte_eth_dev_start(portid);
958 rte_exit(EXIT_FAILURE,
959 "rte_eth_dev_start:err=%d, port=%u\n",
961 /* >8 End of starting device. */
963 /* RX port is set in promiscuous mode. 8< */
964 rte_eth_promiscuous_enable(portid);
965 /* >8 End of RX port is set in promiscuous mode. */
967 printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
969 RTE_ETHER_ADDR_BYTES(&ioat_ports_eth_addr[portid]));
971 cfg.ports[cfg.nb_ports].rxtx_port = portid;
972 cfg.ports[cfg.nb_ports++].nb_queues = nb_queues;
976 signal_handler(int signum)
978 if (signum == SIGINT || signum == SIGTERM) {
979 printf("\n\nSignal %d received, preparing to exit...\n",
986 main(int argc, char **argv)
989 uint16_t nb_ports, portid;
991 unsigned int nb_mbufs;
994 ret = rte_eal_init(argc, argv);
996 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
997 /* >8 End of init EAL. */
1002 signal(SIGINT, signal_handler);
1003 signal(SIGTERM, signal_handler);
1005 nb_ports = rte_eth_dev_count_avail();
1007 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
1009 /* Parse application arguments (after the EAL ones) */
1010 ret = ioat_parse_args(argc, argv, nb_ports);
1012 rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n");
1014 /* Allocates mempool to hold the mbufs. 8< */
1015 nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
1016 4 * MAX_PKT_BURST + ring_size) + ring_size +
1017 rte_lcore_count() * MEMPOOL_CACHE_SIZE),
1020 /* Create the mbuf pool */
1021 ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
1022 MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1024 if (ioat_pktmbuf_pool == NULL)
1025 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
1026 /* >8 End of allocates mempool to hold the mbufs. */
1028 /* Initialize each port. 8< */
1030 RTE_ETH_FOREACH_DEV(portid)
1031 port_init(portid, ioat_pktmbuf_pool, nb_queues);
1032 /* >8 End of initializing each port. */
1034 /* Initialize port xstats */
1035 memset(&port_statistics, 0, sizeof(port_statistics));
1037 /* Assigning each port resources. 8< */
1038 while (!check_link_status(ioat_enabled_port_mask) && !force_quit)
1041 /* Check if there is enough lcores for all ports. */
1042 cfg.nb_lcores = rte_lcore_count() - 1;
1043 if (cfg.nb_lcores < 1)
1044 rte_exit(EXIT_FAILURE,
1045 "There should be at least one worker lcore.\n");
1047 if (copy_mode == COPY_MODE_IOAT_NUM)
1051 /* >8 End of assigning each port resources. */
1053 start_forwarding_cores();
1054 /* main core prints stats while other cores forward */
1055 print_stats(argv[0]);
1057 /* force_quit is true when we get here */
1058 rte_eal_mp_wait_lcore();
1061 for (i = 0; i < cfg.nb_ports; i++) {
1062 printf("Closing port %d\n", cfg.ports[i].rxtx_port);
1063 ret = rte_eth_dev_stop(cfg.ports[i].rxtx_port);
1065 RTE_LOG(ERR, IOAT, "rte_eth_dev_stop: err=%s, port=%u\n",
1066 rte_strerror(-ret), cfg.ports[i].rxtx_port);
1068 rte_eth_dev_close(cfg.ports[i].rxtx_port);
1069 if (copy_mode == COPY_MODE_IOAT_NUM) {
1070 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
1071 printf("Stopping rawdev %d\n",
1072 cfg.ports[i].ioat_ids[j]);
1073 rte_rawdev_stop(cfg.ports[i].ioat_ids[j]);
1075 } else /* copy_mode == COPY_MODE_SW_NUM */
1076 rte_ring_free(cfg.ports[i].rx_to_tx_ring);
1079 /* clean up the EAL */