4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
100 * NUMA support configuration.
101 * When set, the NUMA support attempts to dispatch the allocation of the
102 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103 * probed ports among the CPU sockets 0 and 1.
104 * Otherwise, all memory is allocated from CPU socket 0.
106 uint8_t numa_support = 0; /**< No numa support by default */
109 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112 uint8_t socket_num = UMA_NO_CONFIG;
115 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120 * Record the Ethernet address of peer target ports to which packets are
122 * Must be instantiated with the ethernet addresses of peer traffic generator
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
129 * Probed Target Environment.
131 struct rte_port *ports; /**< For all probed ethernet ports. */
132 portid_t nb_ports; /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
137 * Test Forwarding Configuration.
138 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t nb_cfg_ports; /**< Number of configured ports. */
144 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
153 * Forwarding engines.
155 struct fwd_engine * fwd_engines[] = {
164 #ifdef RTE_LIBRTE_IEEE1588
165 &ieee1588_fwd_engine,
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
178 * specified on command-line. */
181 * Configuration of packet segments used by the "txonly" processing engine.
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 TXONLY_DEF_PACKET_LEN,
187 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
202 * Configurable number of RX/TX queues.
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
208 * Configurable number of RX/TX ring descriptors.
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 #define RTE_PMD_PARAM_UNSET -1
217 * Configurable values of RX and TX ring threshold registers.
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of RX free threshold.
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX drop enable.
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
239 * Configurable value of TX free threshold.
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX RS bit threshold.
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX queue flags.
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Avoids to check link status when starting/stopping a port.
271 uint8_t no_link_check = 0; /* check by default */
274 * NIC bypass mode configuration options.
276 #ifdef RTE_NIC_BYPASS
278 /* The NIC bypass watchdog timeout. */
279 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
283 #ifdef RTE_LIBRTE_LATENCY_STATS
286 * Set when latency stats is enabled in the commandline
288 uint8_t latencystats_enabled;
291 * Lcore ID to serive latency statistics.
293 lcoreid_t latencystats_lcore_id = -1;
298 * Ethernet device configuration.
300 struct rte_eth_rxmode rx_mode = {
301 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
303 .header_split = 0, /**< Header Split disabled. */
304 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
305 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
306 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
307 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
308 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
309 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
312 struct rte_fdir_conf fdir_conf = {
313 .mode = RTE_FDIR_MODE_NONE,
314 .pballoc = RTE_FDIR_PBALLOC_64K,
315 .status = RTE_FDIR_REPORT_STATUS,
317 .vlan_tci_mask = 0x0,
319 .src_ip = 0xFFFFFFFF,
320 .dst_ip = 0xFFFFFFFF,
323 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
324 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
326 .src_port_mask = 0xFFFF,
327 .dst_port_mask = 0xFFFF,
328 .mac_addr_byte_mask = 0xFF,
329 .tunnel_type_mask = 1,
330 .tunnel_id_mask = 0xFFFFFFFF,
335 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
337 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
338 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
340 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
341 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
343 uint16_t nb_tx_queue_stats_mappings = 0;
344 uint16_t nb_rx_queue_stats_mappings = 0;
346 unsigned max_socket = 0;
348 /* Bitrate statistics */
349 struct rte_stats_bitrates *bitrate_data;
351 /* Forward function declarations */
352 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
353 static void check_all_ports_link_status(uint32_t port_mask);
354 static void eth_event_callback(uint8_t port_id,
355 enum rte_eth_event_type type,
359 * Check if all the ports are started.
360 * If yes, return positive value. If not, return zero.
362 static int all_ports_started(void);
365 * Setup default configuration.
368 set_default_fwd_lcores_config(void)
372 unsigned int sock_num;
375 for (i = 0; i < RTE_MAX_LCORE; i++) {
376 sock_num = rte_lcore_to_socket_id(i) + 1;
377 if (sock_num > max_socket) {
378 if (sock_num > RTE_MAX_NUMA_NODES)
379 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
380 max_socket = sock_num;
382 if (!rte_lcore_is_enabled(i))
384 if (i == rte_get_master_lcore())
386 fwd_lcores_cpuids[nb_lc++] = i;
388 nb_lcores = (lcoreid_t) nb_lc;
389 nb_cfg_lcores = nb_lcores;
394 set_def_peer_eth_addrs(void)
398 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
399 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
400 peer_eth_addrs[i].addr_bytes[5] = i;
405 set_default_fwd_ports_config(void)
409 for (pt_id = 0; pt_id < nb_ports; pt_id++)
410 fwd_ports_ids[pt_id] = pt_id;
412 nb_cfg_ports = nb_ports;
413 nb_fwd_ports = nb_ports;
417 set_def_fwd_config(void)
419 set_default_fwd_lcores_config();
420 set_def_peer_eth_addrs();
421 set_default_fwd_ports_config();
425 * Configuration initialisation done once at init time.
428 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
429 unsigned int socket_id)
431 char pool_name[RTE_MEMPOOL_NAMESIZE];
432 struct rte_mempool *rte_mp = NULL;
435 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
436 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
439 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
440 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444 (unsigned) mb_mempool_cache,
445 sizeof(struct rte_pktmbuf_pool_private),
446 rte_pktmbuf_pool_init, NULL,
447 rte_pktmbuf_init, NULL,
451 /* if the former XEN allocation failed fall back to normal allocation */
452 if (rte_mp == NULL) {
454 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
455 mb_size, (unsigned) mb_mempool_cache,
456 sizeof(struct rte_pktmbuf_pool_private),
461 if (rte_mempool_populate_anon(rte_mp) == 0) {
462 rte_mempool_free(rte_mp);
466 rte_pktmbuf_pool_init(rte_mp, NULL);
467 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
469 /* wrapper to rte_mempool_create() */
470 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
471 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
476 if (rte_mp == NULL) {
477 rte_exit(EXIT_FAILURE,
478 "Creation of mbuf pool for socket %u failed: %s\n",
479 socket_id, rte_strerror(rte_errno));
480 } else if (verbose_level > 0) {
481 rte_mempool_dump(stdout, rte_mp);
486 * Check given socket id is valid or not with NUMA mode,
487 * if valid, return 0, else return -1
490 check_socket_id(const unsigned int socket_id)
492 static int warning_once = 0;
494 if (socket_id >= max_socket) {
495 if (!warning_once && numa_support)
496 printf("Warning: NUMA should be configured manually by"
497 " using --port-numa-config and"
498 " --ring-numa-config parameters along with"
510 struct rte_port *port;
511 struct rte_mempool *mbp;
512 unsigned int nb_mbuf_per_pool;
514 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
516 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
517 /* Configuration of logical cores. */
518 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
519 sizeof(struct fwd_lcore *) * nb_lcores,
520 RTE_CACHE_LINE_SIZE);
521 if (fwd_lcores == NULL) {
522 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
523 "failed\n", nb_lcores);
525 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
526 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
527 sizeof(struct fwd_lcore),
528 RTE_CACHE_LINE_SIZE);
529 if (fwd_lcores[lc_id] == NULL) {
530 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
533 fwd_lcores[lc_id]->cpuid_idx = lc_id;
537 * Create pools of mbuf.
538 * If NUMA support is disabled, create a single pool of mbuf in
539 * socket 0 memory by default.
540 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
542 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
543 * nb_txd can be configured at run time.
545 if (param_total_num_mbufs)
546 nb_mbuf_per_pool = param_total_num_mbufs;
548 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
549 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
553 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
557 if (socket_num == UMA_NO_CONFIG)
558 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
560 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
564 RTE_ETH_FOREACH_DEV(pid) {
566 rte_eth_dev_info_get(pid, &port->dev_info);
569 if (port_numa[pid] != NUMA_NO_CONFIG)
570 port_per_socket[port_numa[pid]]++;
572 uint32_t socket_id = rte_eth_dev_socket_id(pid);
574 /* if socket_id is invalid, set to 0 */
575 if (check_socket_id(socket_id) < 0)
577 port_per_socket[socket_id]++;
581 /* set flag to initialize port/queue */
582 port->need_reconfig = 1;
583 port->need_reconfig_queues = 1;
588 unsigned int nb_mbuf;
590 if (param_total_num_mbufs)
591 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
593 for (i = 0; i < max_socket; i++) {
594 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
596 mbuf_pool_create(mbuf_data_size,
603 * Records which Mbuf pool to use by each logical core, if needed.
605 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
606 mbp = mbuf_pool_find(
607 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
610 mbp = mbuf_pool_find(0);
611 fwd_lcores[lc_id]->mbp = mbp;
614 /* Configuration of packet forwarding streams. */
615 if (init_fwd_streams() < 0)
616 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
623 reconfig(portid_t new_port_id, unsigned socket_id)
625 struct rte_port *port;
627 /* Reconfiguration of Ethernet ports. */
628 port = &ports[new_port_id];
629 rte_eth_dev_info_get(new_port_id, &port->dev_info);
631 /* set flag to initialize port/queue */
632 port->need_reconfig = 1;
633 port->need_reconfig_queues = 1;
634 port->socket_id = socket_id;
641 init_fwd_streams(void)
644 struct rte_port *port;
645 streamid_t sm_id, nb_fwd_streams_new;
648 /* set socket id according to numa or not */
649 RTE_ETH_FOREACH_DEV(pid) {
651 if (nb_rxq > port->dev_info.max_rx_queues) {
652 printf("Fail: nb_rxq(%d) is greater than "
653 "max_rx_queues(%d)\n", nb_rxq,
654 port->dev_info.max_rx_queues);
657 if (nb_txq > port->dev_info.max_tx_queues) {
658 printf("Fail: nb_txq(%d) is greater than "
659 "max_tx_queues(%d)\n", nb_txq,
660 port->dev_info.max_tx_queues);
664 if (port_numa[pid] != NUMA_NO_CONFIG)
665 port->socket_id = port_numa[pid];
667 port->socket_id = rte_eth_dev_socket_id(pid);
669 /* if socket_id is invalid, set to 0 */
670 if (check_socket_id(port->socket_id) < 0)
675 if (socket_num == UMA_NO_CONFIG)
678 port->socket_id = socket_num;
682 q = RTE_MAX(nb_rxq, nb_txq);
684 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
687 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
688 if (nb_fwd_streams_new == nb_fwd_streams)
691 if (fwd_streams != NULL) {
692 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693 if (fwd_streams[sm_id] == NULL)
695 rte_free(fwd_streams[sm_id]);
696 fwd_streams[sm_id] = NULL;
698 rte_free(fwd_streams);
703 nb_fwd_streams = nb_fwd_streams_new;
704 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
705 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
706 if (fwd_streams == NULL)
707 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
708 "failed\n", nb_fwd_streams);
710 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
711 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
712 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
713 if (fwd_streams[sm_id] == NULL)
714 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
721 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
723 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
725 unsigned int total_burst;
726 unsigned int nb_burst;
727 unsigned int burst_stats[3];
728 uint16_t pktnb_stats[3];
730 int burst_percent[3];
733 * First compute the total number of packet bursts and the
734 * two highest numbers of bursts of the same number of packets.
737 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
738 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
739 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
740 nb_burst = pbs->pkt_burst_spread[nb_pkt];
743 total_burst += nb_burst;
744 if (nb_burst > burst_stats[0]) {
745 burst_stats[1] = burst_stats[0];
746 pktnb_stats[1] = pktnb_stats[0];
747 burst_stats[0] = nb_burst;
748 pktnb_stats[0] = nb_pkt;
751 if (total_burst == 0)
753 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
754 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
755 burst_percent[0], (int) pktnb_stats[0]);
756 if (burst_stats[0] == total_burst) {
760 if (burst_stats[0] + burst_stats[1] == total_burst) {
761 printf(" + %d%% of %d pkts]\n",
762 100 - burst_percent[0], pktnb_stats[1]);
765 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
766 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
767 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
768 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
771 printf(" + %d%% of %d pkts + %d%% of others]\n",
772 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
774 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
777 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
779 struct rte_port *port;
782 static const char *fwd_stats_border = "----------------------";
784 port = &ports[port_id];
785 printf("\n %s Forward statistics for port %-2d %s\n",
786 fwd_stats_border, port_id, fwd_stats_border);
788 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
789 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
791 stats->ipackets, stats->imissed,
792 (uint64_t) (stats->ipackets + stats->imissed));
794 if (cur_fwd_eng == &csum_fwd_engine)
795 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
796 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
797 if ((stats->ierrors + stats->rx_nombuf) > 0) {
798 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
799 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
802 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
804 stats->opackets, port->tx_dropped,
805 (uint64_t) (stats->opackets + port->tx_dropped));
808 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
810 stats->ipackets, stats->imissed,
811 (uint64_t) (stats->ipackets + stats->imissed));
813 if (cur_fwd_eng == &csum_fwd_engine)
814 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
815 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
816 if ((stats->ierrors + stats->rx_nombuf) > 0) {
817 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
818 printf(" RX-nombufs: %14"PRIu64"\n",
822 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
824 stats->opackets, port->tx_dropped,
825 (uint64_t) (stats->opackets + port->tx_dropped));
828 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
830 pkt_burst_stats_display("RX",
831 &port->rx_stream->rx_burst_stats);
833 pkt_burst_stats_display("TX",
834 &port->tx_stream->tx_burst_stats);
837 if (port->rx_queue_stats_mapping_enabled) {
839 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
840 printf(" Stats reg %2d RX-packets:%14"PRIu64
841 " RX-errors:%14"PRIu64
842 " RX-bytes:%14"PRIu64"\n",
843 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
847 if (port->tx_queue_stats_mapping_enabled) {
848 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
849 printf(" Stats reg %2d TX-packets:%14"PRIu64
850 " TX-bytes:%14"PRIu64"\n",
851 i, stats->q_opackets[i], stats->q_obytes[i]);
855 printf(" %s--------------------------------%s\n",
856 fwd_stats_border, fwd_stats_border);
860 fwd_stream_stats_display(streamid_t stream_id)
862 struct fwd_stream *fs;
863 static const char *fwd_top_stats_border = "-------";
865 fs = fwd_streams[stream_id];
866 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
867 (fs->fwd_dropped == 0))
869 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
870 "TX Port=%2d/Queue=%2d %s\n",
871 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
872 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
873 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
874 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
876 /* if checksum mode */
877 if (cur_fwd_eng == &csum_fwd_engine) {
878 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
879 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
884 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
889 flush_fwd_rx_queues(void)
891 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
898 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
899 uint64_t timer_period;
901 /* convert to number of cycles */
902 timer_period = rte_get_timer_hz(); /* 1 second timeout */
904 for (j = 0; j < 2; j++) {
905 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
906 for (rxq = 0; rxq < nb_rxq; rxq++) {
907 port_id = fwd_ports_ids[rxp];
909 * testpmd can stuck in the below do while loop
910 * if rte_eth_rx_burst() always returns nonzero
911 * packets. So timer is added to exit this loop
912 * after 1sec timer expiry.
914 prev_tsc = rte_rdtsc();
916 nb_rx = rte_eth_rx_burst(port_id, rxq,
917 pkts_burst, MAX_PKT_BURST);
918 for (i = 0; i < nb_rx; i++)
919 rte_pktmbuf_free(pkts_burst[i]);
921 cur_tsc = rte_rdtsc();
922 diff_tsc = cur_tsc - prev_tsc;
923 timer_tsc += diff_tsc;
924 } while ((nb_rx > 0) &&
925 (timer_tsc < timer_period));
929 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
934 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
936 struct fwd_stream **fsm;
939 #ifdef RTE_LIBRTE_BITRATE
940 uint64_t tics_per_1sec;
942 uint64_t tics_current;
943 uint8_t idx_port, cnt_ports;
945 cnt_ports = rte_eth_dev_count();
946 tics_datum = rte_rdtsc();
947 tics_per_1sec = rte_get_timer_hz();
949 fsm = &fwd_streams[fc->stream_idx];
950 nb_fs = fc->stream_nb;
952 for (sm_id = 0; sm_id < nb_fs; sm_id++)
953 (*pkt_fwd)(fsm[sm_id]);
954 #ifdef RTE_LIBRTE_BITRATE
955 tics_current = rte_rdtsc();
956 if (tics_current - tics_datum >= tics_per_1sec) {
957 /* Periodic bitrate calculation */
958 for (idx_port = 0; idx_port < cnt_ports; idx_port++)
959 rte_stats_bitrate_calc(bitrate_data, idx_port);
960 tics_datum = tics_current;
963 #ifdef RTE_LIBRTE_LATENCY_STATS
964 if (latencystats_lcore_id == rte_lcore_id())
965 rte_latencystats_update();
968 } while (! fc->stopped);
972 start_pkt_forward_on_core(void *fwd_arg)
974 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
975 cur_fwd_config.fwd_eng->packet_fwd);
980 * Run the TXONLY packet forwarding engine to send a single burst of packets.
981 * Used to start communication flows in network loopback test configurations.
984 run_one_txonly_burst_on_core(void *fwd_arg)
986 struct fwd_lcore *fwd_lc;
987 struct fwd_lcore tmp_lcore;
989 fwd_lc = (struct fwd_lcore *) fwd_arg;
991 tmp_lcore.stopped = 1;
992 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
997 * Launch packet forwarding:
998 * - Setup per-port forwarding context.
999 * - launch logical cores with their forwarding configuration.
1002 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1004 port_fwd_begin_t port_fwd_begin;
1009 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1010 if (port_fwd_begin != NULL) {
1011 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1012 (*port_fwd_begin)(fwd_ports_ids[i]);
1014 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1015 lc_id = fwd_lcores_cpuids[i];
1016 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1017 fwd_lcores[i]->stopped = 0;
1018 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1019 fwd_lcores[i], lc_id);
1021 printf("launch lcore %u failed - diag=%d\n",
1028 * Launch packet forwarding configuration.
1031 start_packet_forwarding(int with_tx_first)
1033 port_fwd_begin_t port_fwd_begin;
1034 port_fwd_end_t port_fwd_end;
1035 struct rte_port *port;
1040 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1041 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1043 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1044 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1046 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1047 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1048 (!nb_rxq || !nb_txq))
1049 rte_exit(EXIT_FAILURE,
1050 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1051 cur_fwd_eng->fwd_mode_name);
1053 if (all_ports_started() == 0) {
1054 printf("Not all ports were started\n");
1057 if (test_done == 0) {
1058 printf("Packet forwarding already started\n");
1062 if (init_fwd_streams() < 0) {
1063 printf("Fail from init_fwd_streams()\n");
1068 for (i = 0; i < nb_fwd_ports; i++) {
1069 pt_id = fwd_ports_ids[i];
1070 port = &ports[pt_id];
1071 if (!port->dcb_flag) {
1072 printf("In DCB mode, all forwarding ports must "
1073 "be configured in this mode.\n");
1077 if (nb_fwd_lcores == 1) {
1078 printf("In DCB mode,the nb forwarding cores "
1079 "should be larger than 1.\n");
1086 flush_fwd_rx_queues();
1089 pkt_fwd_config_display(&cur_fwd_config);
1090 rxtx_config_display();
1092 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1093 pt_id = fwd_ports_ids[i];
1094 port = &ports[pt_id];
1095 rte_eth_stats_get(pt_id, &port->stats);
1096 port->tx_dropped = 0;
1098 map_port_queue_stats_mapping_registers(pt_id, port);
1100 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1101 fwd_streams[sm_id]->rx_packets = 0;
1102 fwd_streams[sm_id]->tx_packets = 0;
1103 fwd_streams[sm_id]->fwd_dropped = 0;
1104 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1105 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1107 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1108 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1109 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1110 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1111 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1113 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1114 fwd_streams[sm_id]->core_cycles = 0;
1117 if (with_tx_first) {
1118 port_fwd_begin = tx_only_engine.port_fwd_begin;
1119 if (port_fwd_begin != NULL) {
1120 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1121 (*port_fwd_begin)(fwd_ports_ids[i]);
1123 while (with_tx_first--) {
1124 launch_packet_forwarding(
1125 run_one_txonly_burst_on_core);
1126 rte_eal_mp_wait_lcore();
1128 port_fwd_end = tx_only_engine.port_fwd_end;
1129 if (port_fwd_end != NULL) {
1130 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1131 (*port_fwd_end)(fwd_ports_ids[i]);
1134 launch_packet_forwarding(start_pkt_forward_on_core);
1138 stop_packet_forwarding(void)
1140 struct rte_eth_stats stats;
1141 struct rte_port *port;
1142 port_fwd_end_t port_fwd_end;
1147 uint64_t total_recv;
1148 uint64_t total_xmit;
1149 uint64_t total_rx_dropped;
1150 uint64_t total_tx_dropped;
1151 uint64_t total_rx_nombuf;
1152 uint64_t tx_dropped;
1153 uint64_t rx_bad_ip_csum;
1154 uint64_t rx_bad_l4_csum;
1155 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1156 uint64_t fwd_cycles;
1158 static const char *acc_stats_border = "+++++++++++++++";
1161 printf("Packet forwarding not started\n");
1164 printf("Telling cores to stop...");
1165 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1166 fwd_lcores[lc_id]->stopped = 1;
1167 printf("\nWaiting for lcores to finish...\n");
1168 rte_eal_mp_wait_lcore();
1169 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1170 if (port_fwd_end != NULL) {
1171 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1172 pt_id = fwd_ports_ids[i];
1173 (*port_fwd_end)(pt_id);
1176 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1180 if (cur_fwd_config.nb_fwd_streams >
1181 cur_fwd_config.nb_fwd_ports) {
1182 fwd_stream_stats_display(sm_id);
1183 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1184 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1186 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1188 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1191 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1192 tx_dropped = (uint64_t) (tx_dropped +
1193 fwd_streams[sm_id]->fwd_dropped);
1194 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1197 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1198 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1199 fwd_streams[sm_id]->rx_bad_ip_csum);
1200 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1204 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1205 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1206 fwd_streams[sm_id]->rx_bad_l4_csum);
1207 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1210 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1211 fwd_cycles = (uint64_t) (fwd_cycles +
1212 fwd_streams[sm_id]->core_cycles);
1217 total_rx_dropped = 0;
1218 total_tx_dropped = 0;
1219 total_rx_nombuf = 0;
1220 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1221 pt_id = fwd_ports_ids[i];
1223 port = &ports[pt_id];
1224 rte_eth_stats_get(pt_id, &stats);
1225 stats.ipackets -= port->stats.ipackets;
1226 port->stats.ipackets = 0;
1227 stats.opackets -= port->stats.opackets;
1228 port->stats.opackets = 0;
1229 stats.ibytes -= port->stats.ibytes;
1230 port->stats.ibytes = 0;
1231 stats.obytes -= port->stats.obytes;
1232 port->stats.obytes = 0;
1233 stats.imissed -= port->stats.imissed;
1234 port->stats.imissed = 0;
1235 stats.oerrors -= port->stats.oerrors;
1236 port->stats.oerrors = 0;
1237 stats.rx_nombuf -= port->stats.rx_nombuf;
1238 port->stats.rx_nombuf = 0;
1240 total_recv += stats.ipackets;
1241 total_xmit += stats.opackets;
1242 total_rx_dropped += stats.imissed;
1243 total_tx_dropped += port->tx_dropped;
1244 total_rx_nombuf += stats.rx_nombuf;
1246 fwd_port_stats_display(pt_id, &stats);
1248 printf("\n %s Accumulated forward statistics for all ports"
1250 acc_stats_border, acc_stats_border);
1251 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1253 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1255 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1256 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1257 if (total_rx_nombuf > 0)
1258 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1259 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1261 acc_stats_border, acc_stats_border);
1262 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1264 printf("\n CPU cycles/packet=%u (total cycles="
1265 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1266 (unsigned int)(fwd_cycles / total_recv),
1267 fwd_cycles, total_recv);
1269 printf("\nDone.\n");
1274 dev_set_link_up(portid_t pid)
1276 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1277 printf("\nSet link up fail.\n");
1281 dev_set_link_down(portid_t pid)
1283 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1284 printf("\nSet link down fail.\n");
1288 all_ports_started(void)
1291 struct rte_port *port;
1293 RTE_ETH_FOREACH_DEV(pi) {
1295 /* Check if there is a port which is not started */
1296 if ((port->port_status != RTE_PORT_STARTED) &&
1297 (port->slave_flag == 0))
1301 /* No port is not started */
1306 all_ports_stopped(void)
1309 struct rte_port *port;
1311 RTE_ETH_FOREACH_DEV(pi) {
1313 if ((port->port_status != RTE_PORT_STOPPED) &&
1314 (port->slave_flag == 0))
1322 port_is_started(portid_t port_id)
1324 if (port_id_is_invalid(port_id, ENABLED_WARN))
1327 if (ports[port_id].port_status != RTE_PORT_STARTED)
1334 port_is_closed(portid_t port_id)
1336 if (port_id_is_invalid(port_id, ENABLED_WARN))
1339 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1346 start_port(portid_t pid)
1348 int diag, need_check_link_status = -1;
1351 struct rte_port *port;
1352 struct ether_addr mac_addr;
1353 enum rte_eth_event_type event_type;
1355 if (port_id_is_invalid(pid, ENABLED_WARN))
1360 RTE_ETH_FOREACH_DEV(pi) {
1361 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1364 need_check_link_status = 0;
1366 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1367 RTE_PORT_HANDLING) == 0) {
1368 printf("Port %d is now not stopped\n", pi);
1372 if (port->need_reconfig > 0) {
1373 port->need_reconfig = 0;
1375 printf("Configuring Port %d (socket %u)\n", pi,
1377 /* configure port */
1378 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1381 if (rte_atomic16_cmpset(&(port->port_status),
1382 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1383 printf("Port %d can not be set back "
1384 "to stopped\n", pi);
1385 printf("Fail to configure port %d\n", pi);
1386 /* try to reconfigure port next time */
1387 port->need_reconfig = 1;
1391 if (port->need_reconfig_queues > 0) {
1392 port->need_reconfig_queues = 0;
1393 /* setup tx queues */
1394 for (qi = 0; qi < nb_txq; qi++) {
1395 if ((numa_support) &&
1396 (txring_numa[pi] != NUMA_NO_CONFIG))
1397 diag = rte_eth_tx_queue_setup(pi, qi,
1398 nb_txd,txring_numa[pi],
1401 diag = rte_eth_tx_queue_setup(pi, qi,
1402 nb_txd,port->socket_id,
1408 /* Fail to setup tx queue, return */
1409 if (rte_atomic16_cmpset(&(port->port_status),
1411 RTE_PORT_STOPPED) == 0)
1412 printf("Port %d can not be set back "
1413 "to stopped\n", pi);
1414 printf("Fail to configure port %d tx queues\n", pi);
1415 /* try to reconfigure queues next time */
1416 port->need_reconfig_queues = 1;
1419 /* setup rx queues */
1420 for (qi = 0; qi < nb_rxq; qi++) {
1421 if ((numa_support) &&
1422 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1423 struct rte_mempool * mp =
1424 mbuf_pool_find(rxring_numa[pi]);
1426 printf("Failed to setup RX queue:"
1427 "No mempool allocation"
1428 " on the socket %d\n",
1433 diag = rte_eth_rx_queue_setup(pi, qi,
1434 nb_rxd,rxring_numa[pi],
1435 &(port->rx_conf),mp);
1437 struct rte_mempool *mp =
1438 mbuf_pool_find(port->socket_id);
1440 printf("Failed to setup RX queue:"
1441 "No mempool allocation"
1442 " on the socket %d\n",
1446 diag = rte_eth_rx_queue_setup(pi, qi,
1447 nb_rxd,port->socket_id,
1448 &(port->rx_conf), mp);
1453 /* Fail to setup rx queue, return */
1454 if (rte_atomic16_cmpset(&(port->port_status),
1456 RTE_PORT_STOPPED) == 0)
1457 printf("Port %d can not be set back "
1458 "to stopped\n", pi);
1459 printf("Fail to configure port %d rx queues\n", pi);
1460 /* try to reconfigure queues next time */
1461 port->need_reconfig_queues = 1;
1466 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1467 event_type < RTE_ETH_EVENT_MAX;
1469 diag = rte_eth_dev_callback_register(pi,
1474 printf("Failed to setup even callback for event %d\n",
1481 if (rte_eth_dev_start(pi) < 0) {
1482 printf("Fail to start port %d\n", pi);
1484 /* Fail to setup rx queue, return */
1485 if (rte_atomic16_cmpset(&(port->port_status),
1486 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1487 printf("Port %d can not be set back to "
1492 if (rte_atomic16_cmpset(&(port->port_status),
1493 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1494 printf("Port %d can not be set into started\n", pi);
1496 rte_eth_macaddr_get(pi, &mac_addr);
1497 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1498 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1499 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1500 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1502 /* at least one port started, need checking link status */
1503 need_check_link_status = 1;
1506 if (need_check_link_status == 1 && !no_link_check)
1507 check_all_ports_link_status(RTE_PORT_ALL);
1508 else if (need_check_link_status == 0)
1509 printf("Please stop the ports first\n");
1516 stop_port(portid_t pid)
1519 struct rte_port *port;
1520 int need_check_link_status = 0;
1527 if (port_id_is_invalid(pid, ENABLED_WARN))
1530 printf("Stopping ports...\n");
1532 RTE_ETH_FOREACH_DEV(pi) {
1533 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1536 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1537 printf("Please remove port %d from forwarding configuration.\n", pi);
1541 if (port_is_bonding_slave(pi)) {
1542 printf("Please remove port %d from bonded device.\n", pi);
1547 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1548 RTE_PORT_HANDLING) == 0)
1551 rte_eth_dev_stop(pi);
1553 if (rte_atomic16_cmpset(&(port->port_status),
1554 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1555 printf("Port %d can not be set into stopped\n", pi);
1556 need_check_link_status = 1;
1558 if (need_check_link_status && !no_link_check)
1559 check_all_ports_link_status(RTE_PORT_ALL);
1565 close_port(portid_t pid)
1568 struct rte_port *port;
1570 if (port_id_is_invalid(pid, ENABLED_WARN))
1573 printf("Closing ports...\n");
1575 RTE_ETH_FOREACH_DEV(pi) {
1576 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1579 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1580 printf("Please remove port %d from forwarding configuration.\n", pi);
1584 if (port_is_bonding_slave(pi)) {
1585 printf("Please remove port %d from bonded device.\n", pi);
1590 if (rte_atomic16_cmpset(&(port->port_status),
1591 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1592 printf("Port %d is already closed\n", pi);
1596 if (rte_atomic16_cmpset(&(port->port_status),
1597 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1598 printf("Port %d is now not stopped\n", pi);
1602 if (port->flow_list)
1603 port_flow_flush(pi);
1604 rte_eth_dev_close(pi);
1606 if (rte_atomic16_cmpset(&(port->port_status),
1607 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1608 printf("Port %d cannot be set to closed\n", pi);
1615 attach_port(char *identifier)
1618 unsigned int socket_id;
1620 printf("Attaching a new port...\n");
1622 if (identifier == NULL) {
1623 printf("Invalid parameters are specified\n");
1627 if (rte_eth_dev_attach(identifier, &pi))
1630 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1631 /* if socket_id is invalid, set to 0 */
1632 if (check_socket_id(socket_id) < 0)
1634 reconfig(pi, socket_id);
1635 rte_eth_promiscuous_enable(pi);
1637 nb_ports = rte_eth_dev_count();
1639 ports[pi].port_status = RTE_PORT_STOPPED;
1641 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1646 detach_port(uint8_t port_id)
1648 char name[RTE_ETH_NAME_MAX_LEN];
1650 printf("Detaching a port...\n");
1652 if (!port_is_closed(port_id)) {
1653 printf("Please close port first\n");
1657 if (ports[port_id].flow_list)
1658 port_flow_flush(port_id);
1660 if (rte_eth_dev_detach(port_id, name))
1663 nb_ports = rte_eth_dev_count();
1665 printf("Port '%s' is detached. Now total ports is %d\n",
1677 stop_packet_forwarding();
1679 if (ports != NULL) {
1681 RTE_ETH_FOREACH_DEV(pt_id) {
1682 printf("\nShutting down port %d...\n", pt_id);
1688 printf("\nBye...\n");
1691 typedef void (*cmd_func_t)(void);
1692 struct pmd_test_command {
1693 const char *cmd_name;
1694 cmd_func_t cmd_func;
1697 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1699 /* Check the link status of all ports in up to 9s, and print them finally */
1701 check_all_ports_link_status(uint32_t port_mask)
1703 #define CHECK_INTERVAL 100 /* 100ms */
1704 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1705 uint8_t portid, count, all_ports_up, print_flag = 0;
1706 struct rte_eth_link link;
1708 printf("Checking link statuses...\n");
1710 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1712 RTE_ETH_FOREACH_DEV(portid) {
1713 if ((port_mask & (1 << portid)) == 0)
1715 memset(&link, 0, sizeof(link));
1716 rte_eth_link_get_nowait(portid, &link);
1717 /* print link status if flag set */
1718 if (print_flag == 1) {
1719 if (link.link_status)
1720 printf("Port %d Link Up - speed %u "
1721 "Mbps - %s\n", (uint8_t)portid,
1722 (unsigned)link.link_speed,
1723 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1724 ("full-duplex") : ("half-duplex\n"));
1726 printf("Port %d Link Down\n",
1730 /* clear all_ports_up flag if any link down */
1731 if (link.link_status == ETH_LINK_DOWN) {
1736 /* after finally printing all link status, get out */
1737 if (print_flag == 1)
1740 if (all_ports_up == 0) {
1742 rte_delay_ms(CHECK_INTERVAL);
1745 /* set the print_flag if all ports up or timeout */
1746 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1752 /* This function is used by the interrupt thread */
1754 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1756 static const char * const event_desc[] = {
1757 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1758 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1759 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1760 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1761 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1762 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1763 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1764 [RTE_ETH_EVENT_MAX] = NULL,
1767 RTE_SET_USED(param);
1769 if (type >= RTE_ETH_EVENT_MAX) {
1770 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1771 port_id, __func__, type);
1774 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1781 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1785 uint8_t mapping_found = 0;
1787 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1788 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1789 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1790 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1791 tx_queue_stats_mappings[i].queue_id,
1792 tx_queue_stats_mappings[i].stats_counter_id);
1799 port->tx_queue_stats_mapping_enabled = 1;
1804 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1808 uint8_t mapping_found = 0;
1810 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1811 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1812 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1813 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1814 rx_queue_stats_mappings[i].queue_id,
1815 rx_queue_stats_mappings[i].stats_counter_id);
1822 port->rx_queue_stats_mapping_enabled = 1;
1827 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1831 diag = set_tx_queue_stats_mapping_registers(pi, port);
1833 if (diag == -ENOTSUP) {
1834 port->tx_queue_stats_mapping_enabled = 0;
1835 printf("TX queue stats mapping not supported port id=%d\n", pi);
1838 rte_exit(EXIT_FAILURE,
1839 "set_tx_queue_stats_mapping_registers "
1840 "failed for port id=%d diag=%d\n",
1844 diag = set_rx_queue_stats_mapping_registers(pi, port);
1846 if (diag == -ENOTSUP) {
1847 port->rx_queue_stats_mapping_enabled = 0;
1848 printf("RX queue stats mapping not supported port id=%d\n", pi);
1851 rte_exit(EXIT_FAILURE,
1852 "set_rx_queue_stats_mapping_registers "
1853 "failed for port id=%d diag=%d\n",
1859 rxtx_port_config(struct rte_port *port)
1861 port->rx_conf = port->dev_info.default_rxconf;
1862 port->tx_conf = port->dev_info.default_txconf;
1864 /* Check if any RX/TX parameters have been passed */
1865 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1866 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1868 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1869 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1871 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1872 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1874 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1875 port->rx_conf.rx_free_thresh = rx_free_thresh;
1877 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1878 port->rx_conf.rx_drop_en = rx_drop_en;
1880 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1881 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1883 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1884 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1886 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1887 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1889 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1890 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1892 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1893 port->tx_conf.tx_free_thresh = tx_free_thresh;
1895 if (txq_flags != RTE_PMD_PARAM_UNSET)
1896 port->tx_conf.txq_flags = txq_flags;
1900 init_port_config(void)
1903 struct rte_port *port;
1905 RTE_ETH_FOREACH_DEV(pid) {
1907 port->dev_conf.rxmode = rx_mode;
1908 port->dev_conf.fdir_conf = fdir_conf;
1910 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1911 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1913 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1914 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1917 if (port->dcb_flag == 0) {
1918 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1919 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1921 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1924 rxtx_port_config(port);
1926 rte_eth_macaddr_get(pid, &port->eth_addr);
1928 map_port_queue_stats_mapping_registers(pid, port);
1929 #ifdef RTE_NIC_BYPASS
1930 rte_eth_dev_bypass_init(pid);
1935 void set_port_slave_flag(portid_t slave_pid)
1937 struct rte_port *port;
1939 port = &ports[slave_pid];
1940 port->slave_flag = 1;
1943 void clear_port_slave_flag(portid_t slave_pid)
1945 struct rte_port *port;
1947 port = &ports[slave_pid];
1948 port->slave_flag = 0;
1951 uint8_t port_is_bonding_slave(portid_t slave_pid)
1953 struct rte_port *port;
1955 port = &ports[slave_pid];
1956 return port->slave_flag;
1959 const uint16_t vlan_tags[] = {
1960 0, 1, 2, 3, 4, 5, 6, 7,
1961 8, 9, 10, 11, 12, 13, 14, 15,
1962 16, 17, 18, 19, 20, 21, 22, 23,
1963 24, 25, 26, 27, 28, 29, 30, 31
1967 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1968 enum dcb_mode_enable dcb_mode,
1969 enum rte_eth_nb_tcs num_tcs,
1975 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1976 * given above, and the number of traffic classes available for use.
1978 if (dcb_mode == DCB_VT_ENABLED) {
1979 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1980 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1981 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1982 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1984 /* VMDQ+DCB RX and TX configurations */
1985 vmdq_rx_conf->enable_default_pool = 0;
1986 vmdq_rx_conf->default_pool = 0;
1987 vmdq_rx_conf->nb_queue_pools =
1988 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1989 vmdq_tx_conf->nb_queue_pools =
1990 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1992 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1993 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1994 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1995 vmdq_rx_conf->pool_map[i].pools =
1996 1 << (i % vmdq_rx_conf->nb_queue_pools);
1998 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1999 vmdq_rx_conf->dcb_tc[i] = i;
2000 vmdq_tx_conf->dcb_tc[i] = i;
2003 /* set DCB mode of RX and TX of multiple queues */
2004 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2005 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2007 struct rte_eth_dcb_rx_conf *rx_conf =
2008 ð_conf->rx_adv_conf.dcb_rx_conf;
2009 struct rte_eth_dcb_tx_conf *tx_conf =
2010 ð_conf->tx_adv_conf.dcb_tx_conf;
2012 rx_conf->nb_tcs = num_tcs;
2013 tx_conf->nb_tcs = num_tcs;
2015 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2016 rx_conf->dcb_tc[i] = i % num_tcs;
2017 tx_conf->dcb_tc[i] = i % num_tcs;
2019 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2020 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2021 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2025 eth_conf->dcb_capability_en =
2026 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2028 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2034 init_port_dcb_config(portid_t pid,
2035 enum dcb_mode_enable dcb_mode,
2036 enum rte_eth_nb_tcs num_tcs,
2039 struct rte_eth_conf port_conf;
2040 struct rte_port *rte_port;
2044 rte_port = &ports[pid];
2046 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2047 /* Enter DCB configuration status */
2050 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2051 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2054 port_conf.rxmode.hw_vlan_filter = 1;
2057 * Write the configuration into the device.
2058 * Set the numbers of RX & TX queues to 0, so
2059 * the RX & TX queues will not be setup.
2061 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2063 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2065 /* If dev_info.vmdq_pool_base is greater than 0,
2066 * the queue id of vmdq pools is started after pf queues.
2068 if (dcb_mode == DCB_VT_ENABLED &&
2069 rte_port->dev_info.vmdq_pool_base > 0) {
2070 printf("VMDQ_DCB multi-queue mode is nonsensical"
2071 " for port %d.", pid);
2075 /* Assume the ports in testpmd have the same dcb capability
2076 * and has the same number of rxq and txq in dcb mode
2078 if (dcb_mode == DCB_VT_ENABLED) {
2079 if (rte_port->dev_info.max_vfs > 0) {
2080 nb_rxq = rte_port->dev_info.nb_rx_queues;
2081 nb_txq = rte_port->dev_info.nb_tx_queues;
2083 nb_rxq = rte_port->dev_info.max_rx_queues;
2084 nb_txq = rte_port->dev_info.max_tx_queues;
2087 /*if vt is disabled, use all pf queues */
2088 if (rte_port->dev_info.vmdq_pool_base == 0) {
2089 nb_rxq = rte_port->dev_info.max_rx_queues;
2090 nb_txq = rte_port->dev_info.max_tx_queues;
2092 nb_rxq = (queueid_t)num_tcs;
2093 nb_txq = (queueid_t)num_tcs;
2097 rx_free_thresh = 64;
2099 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2101 rxtx_port_config(rte_port);
2103 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2104 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2105 rx_vft_set(pid, vlan_tags[i], 1);
2107 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2108 map_port_queue_stats_mapping_registers(pid, rte_port);
2110 rte_port->dcb_flag = 1;
2118 /* Configuration of Ethernet ports. */
2119 ports = rte_zmalloc("testpmd: ports",
2120 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2121 RTE_CACHE_LINE_SIZE);
2122 if (ports == NULL) {
2123 rte_exit(EXIT_FAILURE,
2124 "rte_zmalloc(%d struct rte_port) failed\n",
2137 signal_handler(int signum)
2139 if (signum == SIGINT || signum == SIGTERM) {
2140 printf("\nSignal %d received, preparing to exit...\n",
2142 #ifdef RTE_LIBRTE_PDUMP
2143 /* uninitialize packet capture framework */
2146 #ifdef RTE_LIBRTE_LATENCY_STATS
2147 rte_latencystats_uninit();
2150 /* exit with the expected status */
2151 signal(signum, SIG_DFL);
2152 kill(getpid(), signum);
2157 main(int argc, char** argv)
2162 signal(SIGINT, signal_handler);
2163 signal(SIGTERM, signal_handler);
2165 diag = rte_eal_init(argc, argv);
2167 rte_panic("Cannot init EAL\n");
2169 #ifdef RTE_LIBRTE_PDUMP
2170 /* initialize packet capture framework */
2171 rte_pdump_init(NULL);
2174 nb_ports = (portid_t) rte_eth_dev_count();
2176 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2178 /* allocate port structures, and init them */
2181 set_def_fwd_config();
2183 rte_panic("Empty set of forwarding logical cores - check the "
2184 "core mask supplied in the command parameters\n");
2189 launch_args_parse(argc, argv);
2191 if (!nb_rxq && !nb_txq)
2192 printf("Warning: Either rx or tx queues should be non-zero\n");
2194 if (nb_rxq > 1 && nb_rxq > nb_txq)
2195 printf("Warning: nb_rxq=%d enables RSS configuration, "
2196 "but nb_txq=%d will prevent to fully test it.\n",
2200 if (start_port(RTE_PORT_ALL) != 0)
2201 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2203 /* set all ports to promiscuous mode by default */
2204 RTE_ETH_FOREACH_DEV(port_id)
2205 rte_eth_promiscuous_enable(port_id);
2207 /* Init metrics library */
2208 rte_metrics_init(rte_socket_id());
2210 #ifdef RTE_LIBRTE_LATENCY_STATS
2211 if (latencystats_enabled != 0) {
2212 int ret = rte_latencystats_init(1, NULL);
2214 printf("Warning: latencystats init()"
2215 " returned error %d\n", ret);
2216 printf("Latencystats running on lcore %d\n",
2217 latencystats_lcore_id);
2221 /* Setup bitrate stats */
2222 #ifdef RTE_LIBRTE_BITRATE
2223 bitrate_data = rte_stats_bitrate_create();
2224 if (bitrate_data == NULL)
2225 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2226 rte_stats_bitrate_reg(bitrate_data);
2230 #ifdef RTE_LIBRTE_CMDLINE
2231 if (interactive == 1) {
2233 printf("Start automatic packet forwarding\n");
2234 start_packet_forwarding(0);
2243 printf("No commandline core given, start packet forwarding\n");
2244 start_packet_forwarding(0);
2245 printf("Press enter to exit\n");
2246 rc = read(0, &c, 1);