1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Intel Corporation
11 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_dmadev.h>
15 /* size of ring used for software copying between rx and tx. */
16 #define RTE_LOGTYPE_DMA RTE_LOGTYPE_USER1
17 #define MAX_PKT_BURST 32
18 #define MEMPOOL_CACHE_SIZE 512
19 #define MIN_POOL_SIZE 65536U
20 #define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
21 #define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
22 #define CMD_LINE_OPT_PORTMASK "portmask"
23 #define CMD_LINE_OPT_NB_QUEUE "nb-queue"
24 #define CMD_LINE_OPT_COPY_TYPE "copy-type"
25 #define CMD_LINE_OPT_RING_SIZE "ring-size"
26 #define CMD_LINE_OPT_BATCH_SIZE "dma-batch-size"
27 #define CMD_LINE_OPT_FRAME_SIZE "max-frame-size"
28 #define CMD_LINE_OPT_FORCE_COPY_SIZE "force-min-copy-size"
29 #define CMD_LINE_OPT_STATS_INTERVAL "stats-interval"
31 /* configurable number of RX/TX ring descriptors */
32 #define RX_DEFAULT_RINGSIZE 1024
33 #define TX_DEFAULT_RINGSIZE 1024
35 /* max number of RX queues per port */
36 #define MAX_RX_QUEUES_COUNT 8
38 struct rxtx_port_config {
42 /* for software copy mode */
43 struct rte_ring *rx_to_tx_ring;
44 /* for dmadev HW copy mode */
45 uint16_t dmadev_ids[MAX_RX_QUEUES_COUNT];
48 /* Configuring ports and number of assigned lcores in struct. 8< */
49 struct rxtx_transmission_config {
50 struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
54 /* >8 End of configuration of ports and number of assigned lcores. */
56 /* per-port statistics struct */
57 struct dma_port_statistics {
58 uint64_t rx[RTE_MAX_ETHPORTS];
59 uint64_t tx[RTE_MAX_ETHPORTS];
60 uint64_t tx_dropped[RTE_MAX_ETHPORTS];
61 uint64_t copy_dropped[RTE_MAX_ETHPORTS];
63 struct dma_port_statistics port_statistics;
64 struct total_statistics {
65 uint64_t total_packets_dropped;
66 uint64_t total_packets_tx;
67 uint64_t total_packets_rx;
68 uint64_t total_submitted;
69 uint64_t total_completed;
70 uint64_t total_failed;
73 typedef enum copy_mode_t {
74 #define COPY_MODE_SW "sw"
76 #define COPY_MODE_DMA "hw"
78 COPY_MODE_INVALID_NUM,
79 COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM
82 /* mask of enabled ports */
83 static uint32_t dma_enabled_port_mask;
85 /* number of RX queues per port */
86 static uint16_t nb_queues = 1;
88 /* MAC updating enabled by default. */
89 static int mac_updating = 1;
91 /* hardware copy mode enabled by default. */
92 static copy_mode_t copy_mode = COPY_MODE_DMA_NUM;
94 /* size of descriptor ring for hardware copy mode or
95 * rte_ring for software copy mode
97 static unsigned short ring_size = 2048;
99 /* interval, in seconds, between stats prints */
100 static unsigned short stats_interval = 1;
101 /* global mbuf arrays for tracking DMA bufs */
102 #define MBUF_RING_SIZE 2048
103 #define MBUF_RING_MASK (MBUF_RING_SIZE - 1)
105 struct rte_mbuf *bufs[MBUF_RING_SIZE];
106 struct rte_mbuf *copies[MBUF_RING_SIZE];
109 static struct dma_bufs dma_bufs[RTE_DMADEV_DEFAULT_MAX];
111 /* global transmission config */
112 struct rxtx_transmission_config cfg;
114 /* configurable number of RX/TX ring descriptors */
115 static uint16_t nb_rxd = RX_DEFAULT_RINGSIZE;
116 static uint16_t nb_txd = TX_DEFAULT_RINGSIZE;
118 static volatile bool force_quit;
120 static uint32_t dma_batch_sz = MAX_PKT_BURST;
121 static uint32_t max_frame_size;
122 static uint32_t force_min_copy_size;
124 /* ethernet addresses of ports */
125 static struct rte_ether_addr dma_ports_eth_addr[RTE_MAX_ETHPORTS];
127 struct rte_mempool *dma_pktmbuf_pool;
129 /* Print out statistics for one port. */
131 print_port_stats(uint16_t port_id)
133 printf("\nStatistics for port %u ------------------------------"
134 "\nPackets sent: %34"PRIu64
135 "\nPackets received: %30"PRIu64
136 "\nPackets dropped on tx: %25"PRIu64
137 "\nPackets dropped on copy: %23"PRIu64,
139 port_statistics.tx[port_id],
140 port_statistics.rx[port_id],
141 port_statistics.tx_dropped[port_id],
142 port_statistics.copy_dropped[port_id]);
145 /* Print out statistics for one dmadev device. */
147 print_dmadev_stats(uint32_t dev_id, struct rte_dma_stats stats)
149 printf("\nDMA channel %u", dev_id);
150 printf("\n\t Total submitted ops: %"PRIu64"", stats.submitted);
151 printf("\n\t Total completed ops: %"PRIu64"", stats.completed);
152 printf("\n\t Total failed ops: %"PRIu64"", stats.errors);
156 print_total_stats(struct total_statistics *ts)
158 printf("\nAggregate statistics ==============================="
159 "\nTotal packets Tx: %22"PRIu64" [pkt/s]"
160 "\nTotal packets Rx: %22"PRIu64" [pkt/s]"
161 "\nTotal packets dropped: %17"PRIu64" [pkt/s]",
162 ts->total_packets_tx / stats_interval,
163 ts->total_packets_rx / stats_interval,
164 ts->total_packets_dropped / stats_interval);
166 if (copy_mode == COPY_MODE_DMA_NUM) {
167 printf("\nTotal submitted ops: %19"PRIu64" [ops/s]"
168 "\nTotal completed ops: %19"PRIu64" [ops/s]"
169 "\nTotal failed ops: %22"PRIu64" [ops/s]",
170 ts->total_submitted / stats_interval,
171 ts->total_completed / stats_interval,
172 ts->total_failed / stats_interval);
175 printf("\n====================================================\n");
178 /* Print out statistics on packets dropped. */
180 print_stats(char *prgname)
182 struct total_statistics ts, delta_ts;
183 struct rte_dma_stats stats = {0};
184 uint32_t i, port_id, dev_id;
185 char status_string[255]; /* to print at the top of the output */
188 const char clr[] = { 27, '[', '2', 'J', '\0' };
189 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
191 status_strlen = snprintf(status_string, sizeof(status_string),
193 status_strlen += snprintf(status_string + status_strlen,
194 sizeof(status_string) - status_strlen,
195 "Worker Threads = %d, ",
196 rte_lcore_count() > 2 ? 2 : 1);
197 status_strlen += snprintf(status_string + status_strlen,
198 sizeof(status_string) - status_strlen,
199 "Copy Mode = %s,\n", copy_mode == COPY_MODE_SW_NUM ?
200 COPY_MODE_SW : COPY_MODE_DMA);
201 status_strlen += snprintf(status_string + status_strlen,
202 sizeof(status_string) - status_strlen,
203 "Updating MAC = %s, ", mac_updating ?
204 "enabled" : "disabled");
205 status_strlen += snprintf(status_string + status_strlen,
206 sizeof(status_string) - status_strlen,
207 "Rx Queues = %d, ", nb_queues);
208 status_strlen += snprintf(status_string + status_strlen,
209 sizeof(status_string) - status_strlen,
210 "Ring Size = %d\n", ring_size);
211 status_strlen += snprintf(status_string + status_strlen,
212 sizeof(status_string) - status_strlen,
213 "Force Min Copy Size = %u Packet Data Room Size = %u",
215 rte_pktmbuf_data_room_size(dma_pktmbuf_pool) -
216 RTE_PKTMBUF_HEADROOM);
218 memset(&ts, 0, sizeof(struct total_statistics));
220 while (!force_quit) {
221 /* Sleep for "stats_interval" seconds each round - init sleep allows reading
222 * messages from app startup.
224 sleep(stats_interval);
226 /* Clear screen and move to top left */
227 printf("%s%s", clr, topLeft);
229 memset(&delta_ts, 0, sizeof(struct total_statistics));
231 printf("%s\n", status_string);
233 for (i = 0; i < cfg.nb_ports; i++) {
234 port_id = cfg.ports[i].rxtx_port;
235 print_port_stats(port_id);
237 delta_ts.total_packets_dropped +=
238 port_statistics.tx_dropped[port_id]
239 + port_statistics.copy_dropped[port_id];
240 delta_ts.total_packets_tx +=
241 port_statistics.tx[port_id];
242 delta_ts.total_packets_rx +=
243 port_statistics.rx[port_id];
245 if (copy_mode == COPY_MODE_DMA_NUM) {
248 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
249 dev_id = cfg.ports[i].dmadev_ids[j];
250 rte_dma_stats_get(dev_id, 0, &stats);
251 print_dmadev_stats(dev_id, stats);
253 delta_ts.total_submitted += stats.submitted;
254 delta_ts.total_completed += stats.completed;
255 delta_ts.total_failed += stats.errors;
260 delta_ts.total_packets_tx -= ts.total_packets_tx;
261 delta_ts.total_packets_rx -= ts.total_packets_rx;
262 delta_ts.total_packets_dropped -= ts.total_packets_dropped;
263 delta_ts.total_submitted -= ts.total_submitted;
264 delta_ts.total_completed -= ts.total_completed;
265 delta_ts.total_failed -= ts.total_failed;
268 print_total_stats(&delta_ts);
272 ts.total_packets_tx += delta_ts.total_packets_tx;
273 ts.total_packets_rx += delta_ts.total_packets_rx;
274 ts.total_packets_dropped += delta_ts.total_packets_dropped;
275 ts.total_submitted += delta_ts.total_submitted;
276 ts.total_completed += delta_ts.total_completed;
277 ts.total_failed += delta_ts.total_failed;
282 update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid)
284 struct rte_ether_hdr *eth;
287 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
289 /* 02:00:00:00:00:xx - overwriting 2 bytes of source address but
290 * it's acceptable cause it gets overwritten by rte_ether_addr_copy
292 tmp = ð->dst_addr.addr_bytes[0];
293 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
296 rte_ether_addr_copy(&dma_ports_eth_addr[dest_portid], ð->src_addr);
299 /* Perform packet copy there is a user-defined function. 8< */
301 pktmbuf_metadata_copy(const struct rte_mbuf *src, struct rte_mbuf *dst)
303 dst->data_off = src->data_off;
304 memcpy(&dst->rx_descriptor_fields1, &src->rx_descriptor_fields1,
305 offsetof(struct rte_mbuf, buf_len) -
306 offsetof(struct rte_mbuf, rx_descriptor_fields1));
309 /* Copy packet data */
311 pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
313 rte_memcpy(rte_pktmbuf_mtod(dst, char *),
314 rte_pktmbuf_mtod(src, char *),
315 RTE_MAX(src->data_len, force_min_copy_size));
317 /* >8 End of perform packet copy there is a user-defined function. */
320 dma_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
321 uint32_t nb_rx, uint16_t dev_id)
323 struct dma_bufs *dma = &dma_bufs[dev_id];
327 for (i = 0; i < nb_rx; i++) {
328 /* Perform data copy */
329 ret = rte_dma_copy(dev_id, 0,
330 rte_pktmbuf_iova(pkts[i]),
331 rte_pktmbuf_iova(pkts_copy[i]),
332 RTE_MAX(rte_pktmbuf_data_len(pkts[i]),
333 force_min_copy_size),
339 dma->bufs[ret & MBUF_RING_MASK] = pkts[i];
340 dma->copies[ret & MBUF_RING_MASK] = pkts_copy[i];
347 static inline uint32_t
348 dma_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
349 uint32_t num, uint32_t step, uint16_t dev_id)
354 for (i = 0; i < num; i += m) {
356 m = RTE_MIN(step, num - i);
357 n = dma_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id);
360 rte_dma_submit(dev_id, 0);
362 /* don't try to enqueue more if HW queue is full */
370 static inline uint32_t
371 dma_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
374 struct dma_bufs *dma = &dma_bufs[dev_id];
375 uint16_t nb_dq, filled;
376 /* Dequeue the mbufs from DMA device. Since all memory
377 * is DPDK pinned memory and therefore all addresses should
378 * be valid, we don't check for copy errors
380 nb_dq = rte_dma_completed(dev_id, 0, num, NULL, NULL);
382 /* Return early if no work to do */
383 if (unlikely(nb_dq == 0))
386 /* Populate pkts_copy with the copies bufs from dma->copies for tx */
387 for (filled = 0; filled < nb_dq; filled++) {
388 src[filled] = dma->bufs[(dma->sent + filled) & MBUF_RING_MASK];
389 dst[filled] = dma->copies[(dma->sent + filled) & MBUF_RING_MASK];
397 /* Receive packets on one port and enqueue to dmadev or rte_ring. 8< */
399 dma_rx_port(struct rxtx_port_config *rx_config)
402 uint32_t nb_rx, nb_enq, i, j;
403 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
404 struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
406 for (i = 0; i < rx_config->nb_queues; i++) {
408 nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i,
409 pkts_burst, MAX_PKT_BURST);
414 port_statistics.rx[rx_config->rxtx_port] += nb_rx;
416 ret = rte_mempool_get_bulk(dma_pktmbuf_pool,
417 (void *)pkts_burst_copy, nb_rx);
419 if (unlikely(ret < 0))
420 rte_exit(EXIT_FAILURE,
421 "Unable to allocate memory.\n");
423 for (j = 0; j < nb_rx; j++)
424 pktmbuf_metadata_copy(pkts_burst[j],
427 if (copy_mode == COPY_MODE_DMA_NUM) {
428 /* enqueue packets for hardware copy */
429 nb_enq = dma_enqueue(pkts_burst, pkts_burst_copy,
430 nb_rx, dma_batch_sz, rx_config->dmadev_ids[i]);
432 /* free any not enqueued packets. */
433 rte_mempool_put_bulk(dma_pktmbuf_pool,
434 (void *)&pkts_burst[nb_enq],
436 rte_mempool_put_bulk(dma_pktmbuf_pool,
437 (void *)&pkts_burst_copy[nb_enq],
440 port_statistics.copy_dropped[rx_config->rxtx_port] +=
443 /* get completed copies */
444 nb_rx = dma_dequeue(pkts_burst, pkts_burst_copy,
445 MAX_PKT_BURST, rx_config->dmadev_ids[i]);
447 /* Perform packet software copy, free source packets */
448 for (j = 0; j < nb_rx; j++)
449 pktmbuf_sw_copy(pkts_burst[j],
453 rte_mempool_put_bulk(dma_pktmbuf_pool,
454 (void *)pkts_burst, nb_rx);
456 nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring,
457 (void *)pkts_burst_copy, nb_rx, NULL);
459 /* Free any not enqueued packets. */
460 rte_mempool_put_bulk(dma_pktmbuf_pool,
461 (void *)&pkts_burst_copy[nb_enq],
464 port_statistics.copy_dropped[rx_config->rxtx_port] +=
468 /* >8 End of receive packets on one port and enqueue to dmadev or rte_ring. */
470 /* Transmit packets from dmadev/rte_ring for one port. 8< */
472 dma_tx_port(struct rxtx_port_config *tx_config)
474 uint32_t i, j, nb_dq, nb_tx;
475 struct rte_mbuf *mbufs[MAX_PKT_BURST];
477 for (i = 0; i < tx_config->nb_queues; i++) {
479 /* Dequeue the mbufs from rx_to_tx_ring. */
480 nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring,
481 (void *)mbufs, MAX_PKT_BURST, NULL);
485 /* Update macs if enabled */
487 for (j = 0; j < nb_dq; j++)
488 update_mac_addrs(mbufs[j],
489 tx_config->rxtx_port);
492 nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, 0,
493 (void *)mbufs, nb_dq);
495 port_statistics.tx[tx_config->rxtx_port] += nb_tx;
497 if (unlikely(nb_tx < nb_dq)) {
498 port_statistics.tx_dropped[tx_config->rxtx_port] +=
500 /* Free any unsent packets. */
501 rte_mempool_put_bulk(dma_pktmbuf_pool,
502 (void *)&mbufs[nb_tx], nb_dq - nb_tx);
506 /* >8 End of transmitting packets from dmadev. */
508 /* Main rx processing loop for dmadev. */
513 uint16_t nb_ports = cfg.nb_ports;
515 RTE_LOG(INFO, DMA, "Entering main rx loop for copy on lcore %u\n",
519 for (i = 0; i < nb_ports; i++)
520 dma_rx_port(&cfg.ports[i]);
523 /* Main tx processing loop for hardware copy. */
528 uint16_t nb_ports = cfg.nb_ports;
530 RTE_LOG(INFO, DMA, "Entering main tx loop for copy on lcore %u\n",
534 for (i = 0; i < nb_ports; i++)
535 dma_tx_port(&cfg.ports[i]);
538 /* Main rx and tx loop if only one worker lcore available */
543 uint16_t nb_ports = cfg.nb_ports;
545 RTE_LOG(INFO, DMA, "Entering main rx and tx loop for copy on"
546 " lcore %u\n", rte_lcore_id());
549 for (i = 0; i < nb_ports; i++) {
550 dma_rx_port(&cfg.ports[i]);
551 dma_tx_port(&cfg.ports[i]);
555 /* Start processing for each lcore. 8< */
556 static void start_forwarding_cores(void)
558 uint32_t lcore_id = rte_lcore_id();
560 RTE_LOG(INFO, DMA, "Entering %s on lcore %u\n",
561 __func__, rte_lcore_id());
563 if (cfg.nb_lcores == 1) {
564 lcore_id = rte_get_next_lcore(lcore_id, true, true);
565 rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop,
567 } else if (cfg.nb_lcores > 1) {
568 lcore_id = rte_get_next_lcore(lcore_id, true, true);
569 rte_eal_remote_launch((lcore_function_t *)rx_main_loop,
572 lcore_id = rte_get_next_lcore(lcore_id, true, true);
573 rte_eal_remote_launch((lcore_function_t *)tx_main_loop, NULL,
577 /* >8 End of starting to process for each lcore. */
581 dma_usage(const char *prgname)
583 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
584 " -b --dma-batch-size: number of requests per DMA batch\n"
585 " -f --max-frame-size: max frame size\n"
586 " -m --force-min-copy-size: force a minimum copy length, even for smaller packets\n"
587 " -p --portmask: hexadecimal bitmask of ports to configure\n"
588 " -q NQ: number of RX queues per port (default is 1)\n"
589 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
591 " - The source MAC address is replaced by the TX port MAC address\n"
592 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
593 " -c --copy-type CT: type of copy: sw|hw\n"
594 " -s --ring-size RS: size of dmadev descriptor ring for hardware copy mode or rte_ring for software copy mode\n"
595 " -i --stats-interval SI: interval, in seconds, between stats prints (default is 1)\n",
600 dma_parse_portmask(const char *portmask)
605 /* Parse hexadecimal string */
606 pm = strtoul(portmask, &end, 16);
607 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
614 dma_parse_copy_mode(const char *copy_mode)
616 if (strcmp(copy_mode, COPY_MODE_SW) == 0)
617 return COPY_MODE_SW_NUM;
618 else if (strcmp(copy_mode, COPY_MODE_DMA) == 0)
619 return COPY_MODE_DMA_NUM;
621 return COPY_MODE_INVALID_NUM;
624 /* Parse the argument given in the command line of the application */
626 dma_parse_args(int argc, char **argv, unsigned int nb_ports)
628 static const char short_options[] =
629 "b:" /* dma batch size */
630 "c:" /* copy type (sw|hw) */
631 "f:" /* max frame size */
632 "m:" /* force min copy size */
634 "q:" /* number of RX queues per port */
636 "i:" /* interval, in seconds, between stats prints */
639 static const struct option lgopts[] = {
640 {CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
641 {CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
642 {CMD_LINE_OPT_PORTMASK, required_argument, NULL, 'p'},
643 {CMD_LINE_OPT_NB_QUEUE, required_argument, NULL, 'q'},
644 {CMD_LINE_OPT_COPY_TYPE, required_argument, NULL, 'c'},
645 {CMD_LINE_OPT_RING_SIZE, required_argument, NULL, 's'},
646 {CMD_LINE_OPT_BATCH_SIZE, required_argument, NULL, 'b'},
647 {CMD_LINE_OPT_FRAME_SIZE, required_argument, NULL, 'f'},
648 {CMD_LINE_OPT_FORCE_COPY_SIZE, required_argument, NULL, 'm'},
649 {CMD_LINE_OPT_STATS_INTERVAL, required_argument, NULL, 'i'},
653 const unsigned int default_port_mask = (1 << nb_ports) - 1;
657 char *prgname = argv[0];
659 dma_enabled_port_mask = default_port_mask;
662 while ((opt = getopt_long(argc, argvopt, short_options,
663 lgopts, &option_index)) != EOF) {
667 dma_batch_sz = atoi(optarg);
668 if (dma_batch_sz > MAX_PKT_BURST) {
669 printf("Invalid dma batch size, %s.\n", optarg);
675 max_frame_size = atoi(optarg);
676 if (max_frame_size > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
677 printf("Invalid max frame size, %s.\n", optarg);
684 force_min_copy_size = atoi(optarg);
689 dma_enabled_port_mask = dma_parse_portmask(optarg);
690 if (dma_enabled_port_mask & ~default_port_mask ||
691 dma_enabled_port_mask <= 0) {
692 printf("Invalid portmask, %s, suggest 0x%x\n",
693 optarg, default_port_mask);
700 nb_queues = atoi(optarg);
701 if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) {
702 printf("Invalid RX queues number %s. Max %u\n",
703 optarg, MAX_RX_QUEUES_COUNT);
710 copy_mode = dma_parse_copy_mode(optarg);
711 if (copy_mode == COPY_MODE_INVALID_NUM) {
712 printf("Invalid copy type. Use: sw, hw\n");
719 ring_size = atoi(optarg);
720 if (ring_size == 0) {
721 printf("Invalid ring size, %s.\n", optarg);
725 /* ring_size must be less-than or equal to MBUF_RING_SIZE
726 * to avoid overwriting bufs
728 if (ring_size > MBUF_RING_SIZE) {
729 printf("Max ring_size is %d, setting ring_size to max",
731 ring_size = MBUF_RING_SIZE;
736 stats_interval = atoi(optarg);
737 if (stats_interval == 0) {
738 printf("Invalid stats interval, setting to 1\n");
739 stats_interval = 1; /* set to default */
753 printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
755 argv[optind - 1] = prgname;
758 optind = 1; /* reset getopt lib */
762 /* check link status, return true if at least one port is up */
764 check_link_status(uint32_t port_mask)
767 struct rte_eth_link link;
768 int ret, link_status = 0;
769 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
771 printf("\nChecking link status\n");
772 RTE_ETH_FOREACH_DEV(portid) {
773 if ((port_mask & (1 << portid)) == 0)
776 memset(&link, 0, sizeof(link));
777 ret = rte_eth_link_get(portid, &link);
779 printf("Port %u link get failed: err=%d\n",
784 /* Print link status */
785 rte_eth_link_to_str(link_status_text,
786 sizeof(link_status_text), &link);
787 printf("Port %d %s\n", portid, link_status_text);
789 if (link.link_status)
795 /* Configuration of device. 8< */
797 configure_dmadev_queue(uint32_t dev_id)
799 struct rte_dma_info info;
800 struct rte_dma_conf dev_config = { .nb_vchans = 1 };
801 struct rte_dma_vchan_conf qconf = {
802 .direction = RTE_DMA_DIR_MEM_TO_MEM,
807 if (rte_dma_configure(dev_id, &dev_config) != 0)
808 rte_exit(EXIT_FAILURE, "Error with rte_dma_configure()\n");
810 if (rte_dma_vchan_setup(dev_id, vchan, &qconf) != 0) {
811 printf("Error with queue configuration\n");
814 rte_dma_info_get(dev_id, &info);
815 if (info.nb_vchans != 1) {
816 printf("Error, no configured queues reported on device id %u\n", dev_id);
819 if (rte_dma_start(dev_id) != 0)
820 rte_exit(EXIT_FAILURE, "Error with rte_dma_start()\n");
822 /* >8 End of configuration of device. */
824 /* Using dmadev API functions. 8< */
828 uint16_t nb_dmadev = 0;
829 int16_t dev_id = rte_dma_next_dev(0);
832 for (i = 0; i < cfg.nb_ports; i++) {
833 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
837 cfg.ports[i].dmadev_ids[j] = dev_id;
838 configure_dmadev_queue(cfg.ports[i].dmadev_ids[j]);
839 dev_id = rte_dma_next_dev(dev_id + 1);
844 if (nb_dmadev < cfg.nb_ports * cfg.ports[0].nb_queues)
845 rte_exit(EXIT_FAILURE,
846 "Not enough dmadevs (%u) for all queues (%u).\n",
847 nb_dmadev, cfg.nb_ports * cfg.ports[0].nb_queues);
848 RTE_LOG(INFO, DMA, "Number of used dmadevs: %u.\n", nb_dmadev);
850 /* >8 End of using dmadev API functions. */
852 /* Assign ring structures for packet exchanging. 8< */
858 for (i = 0; i < cfg.nb_ports; i++) {
859 char ring_name[RTE_RING_NAMESIZE];
861 snprintf(ring_name, sizeof(ring_name), "rx_to_tx_ring_%u", i);
862 /* Create ring for inter core communication */
863 cfg.ports[i].rx_to_tx_ring = rte_ring_create(
864 ring_name, ring_size,
865 rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
867 if (cfg.ports[i].rx_to_tx_ring == NULL)
868 rte_exit(EXIT_FAILURE, "Ring create failed: %s\n",
869 rte_strerror(rte_errno));
872 /* >8 End of assigning ring structures for packet exchanging. */
875 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
877 uint32_t overhead_len;
879 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
880 overhead_len = max_rx_pktlen - max_mtu;
882 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
888 config_port_max_pkt_len(struct rte_eth_conf *conf,
889 struct rte_eth_dev_info *dev_info)
891 uint32_t overhead_len;
893 if (max_frame_size == 0)
896 if (max_frame_size < RTE_ETHER_MIN_LEN)
899 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
901 conf->rxmode.mtu = max_frame_size - overhead_len;
907 * Initializes a given port using global settings and with the RX buffers
908 * coming from the mbuf_pool passed as a parameter.
911 port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
913 /* Configuring port to use RSS for multiple RX queues. 8< */
914 static const struct rte_eth_conf port_conf = {
916 .mq_mode = RTE_ETH_MQ_RX_RSS,
921 .rss_hf = RTE_ETH_RSS_PROTO_MASK,
925 /* >8 End of configuring port to use RSS for multiple RX queues. */
927 struct rte_eth_rxconf rxq_conf;
928 struct rte_eth_txconf txq_conf;
929 struct rte_eth_conf local_port_conf = port_conf;
930 struct rte_eth_dev_info dev_info;
933 /* Skip ports that are not enabled */
934 if ((dma_enabled_port_mask & (1 << portid)) == 0) {
935 printf("Skipping disabled port %u\n", portid);
940 printf("Initializing port %u... ", portid);
942 ret = rte_eth_dev_info_get(portid, &dev_info);
944 rte_exit(EXIT_FAILURE, "Cannot get device info: %s, port=%u\n",
945 rte_strerror(-ret), portid);
947 ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
949 rte_exit(EXIT_FAILURE,
950 "Invalid max frame size: %u (port %u)\n",
951 max_frame_size, portid);
953 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
954 dev_info.flow_type_rss_offloads;
955 ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
957 rte_exit(EXIT_FAILURE, "Cannot configure device:"
958 " err=%d, port=%u\n", ret, portid);
960 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
963 rte_exit(EXIT_FAILURE,
964 "Cannot adjust number of descriptors: err=%d, port=%u\n",
967 rte_eth_macaddr_get(portid, &dma_ports_eth_addr[portid]);
970 rxq_conf = dev_info.default_rxconf;
971 rxq_conf.offloads = local_port_conf.rxmode.offloads;
972 for (i = 0; i < nb_queues; i++) {
973 ret = rte_eth_rx_queue_setup(portid, i, nb_rxd,
974 rte_eth_dev_socket_id(portid), &rxq_conf,
977 rte_exit(EXIT_FAILURE,
978 "rte_eth_rx_queue_setup:err=%d,port=%u, queue_id=%u\n",
982 /* Init one TX queue on each port */
983 txq_conf = dev_info.default_txconf;
984 txq_conf.offloads = local_port_conf.txmode.offloads;
985 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
986 rte_eth_dev_socket_id(portid),
989 rte_exit(EXIT_FAILURE,
990 "rte_eth_tx_queue_setup:err=%d,port=%u\n",
993 /* Start device. 8< */
994 ret = rte_eth_dev_start(portid);
996 rte_exit(EXIT_FAILURE,
997 "rte_eth_dev_start:err=%d, port=%u\n",
999 /* >8 End of starting device. */
1001 /* RX port is set in promiscuous mode. 8< */
1002 rte_eth_promiscuous_enable(portid);
1003 /* >8 End of RX port is set in promiscuous mode. */
1005 printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
1007 RTE_ETHER_ADDR_BYTES(&dma_ports_eth_addr[portid]));
1009 cfg.ports[cfg.nb_ports].rxtx_port = portid;
1010 cfg.ports[cfg.nb_ports++].nb_queues = nb_queues;
1013 /* Get a device dump for each device being used by the application */
1019 if (copy_mode != COPY_MODE_DMA_NUM)
1022 for (i = 0; i < cfg.nb_ports; i++)
1023 for (j = 0; j < cfg.ports[i].nb_queues; j++)
1024 rte_dma_dump(cfg.ports[i].dmadev_ids[j], stdout);
1028 signal_handler(int signum)
1030 if (signum == SIGINT || signum == SIGTERM) {
1031 printf("\n\nSignal %d received, preparing to exit...\n",
1034 } else if (signum == SIGUSR1) {
1040 main(int argc, char **argv)
1043 uint16_t nb_ports, portid;
1045 unsigned int nb_mbufs;
1049 ret = rte_eal_init(argc, argv);
1051 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
1052 /* >8 End of init EAL. */
1057 signal(SIGINT, signal_handler);
1058 signal(SIGTERM, signal_handler);
1059 signal(SIGUSR1, signal_handler);
1061 nb_ports = rte_eth_dev_count_avail();
1063 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
1065 /* Parse application arguments (after the EAL ones) */
1066 ret = dma_parse_args(argc, argv, nb_ports);
1068 rte_exit(EXIT_FAILURE, "Invalid DMA arguments\n");
1070 /* Allocates mempool to hold the mbufs. 8< */
1071 nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
1072 4 * MAX_PKT_BURST + ring_size) + ring_size +
1073 rte_lcore_count() * MEMPOOL_CACHE_SIZE),
1076 /* Create the mbuf pool */
1077 sz = max_frame_size + RTE_PKTMBUF_HEADROOM;
1078 sz = RTE_MAX(sz, (size_t)RTE_MBUF_DEFAULT_BUF_SIZE);
1079 dma_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
1080 MEMPOOL_CACHE_SIZE, 0, sz, rte_socket_id());
1081 if (dma_pktmbuf_pool == NULL)
1082 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
1083 /* >8 End of allocates mempool to hold the mbufs. */
1085 if (force_min_copy_size >
1086 (uint32_t)(rte_pktmbuf_data_room_size(dma_pktmbuf_pool) -
1087 RTE_PKTMBUF_HEADROOM))
1088 rte_exit(EXIT_FAILURE,
1089 "Force min copy size > packet mbuf size\n");
1091 /* Initialize each port. 8< */
1093 RTE_ETH_FOREACH_DEV(portid)
1094 port_init(portid, dma_pktmbuf_pool, nb_queues);
1095 /* >8 End of initializing each port. */
1097 /* Initialize port xstats */
1098 memset(&port_statistics, 0, sizeof(port_statistics));
1100 /* Assigning each port resources. 8< */
1101 while (!check_link_status(dma_enabled_port_mask) && !force_quit)
1104 /* Check if there is enough lcores for all ports. */
1105 cfg.nb_lcores = rte_lcore_count() - 1;
1106 if (cfg.nb_lcores < 1)
1107 rte_exit(EXIT_FAILURE,
1108 "There should be at least one worker lcore.\n");
1110 if (copy_mode == COPY_MODE_DMA_NUM)
1114 /* >8 End of assigning each port resources. */
1116 start_forwarding_cores();
1117 /* main core prints stats while other cores forward */
1118 print_stats(argv[0]);
1120 /* force_quit is true when we get here */
1121 rte_eal_mp_wait_lcore();
1124 for (i = 0; i < cfg.nb_ports; i++) {
1125 printf("Closing port %d\n", cfg.ports[i].rxtx_port);
1126 ret = rte_eth_dev_stop(cfg.ports[i].rxtx_port);
1128 RTE_LOG(ERR, DMA, "rte_eth_dev_stop: err=%s, port=%u\n",
1129 rte_strerror(-ret), cfg.ports[i].rxtx_port);
1131 rte_eth_dev_close(cfg.ports[i].rxtx_port);
1132 if (copy_mode == COPY_MODE_DMA_NUM) {
1133 for (j = 0; j < cfg.ports[i].nb_queues; j++) {
1134 printf("Stopping dmadev %d\n",
1135 cfg.ports[i].dmadev_ids[j]);
1136 rte_dma_stop(cfg.ports[i].dmadev_ids[j]);
1138 } else /* copy_mode == COPY_MODE_SW_NUM */
1139 rte_ring_free(cfg.ports[i].rx_to_tx_ring);
1142 /* clean up the EAL */