4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
84 uint16_t verbose_level = 0; /**< Silent by default. */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
91 * NUMA support configuration.
92 * When set, the NUMA support attempts to dispatch the allocation of the
93 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94 * probed ports among the CPU sockets 0 and 1.
95 * Otherwise, all memory is allocated from CPU socket 0.
97 uint8_t numa_support = 0; /**< No numa support by default */
100 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103 uint8_t socket_num = UMA_NO_CONFIG;
106 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
111 * Record the Ethernet address of peer target ports to which packets are
113 * Must be instanciated with the ethernet addresses of peer traffic generator
116 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117 portid_t nb_peer_eth_addrs = 0;
120 * Probed Target Environment.
122 struct rte_port *ports; /**< For all probed ethernet ports. */
123 portid_t nb_ports; /**< Number of probed ethernet ports. */
124 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
128 * Test Forwarding Configuration.
129 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
132 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134 portid_t nb_cfg_ports; /**< Number of configured ports. */
135 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
137 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
140 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
144 * Forwarding engines.
146 struct fwd_engine * fwd_engines[] = {
155 #ifdef RTE_LIBRTE_IEEE1588
156 &ieee1588_fwd_engine,
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint32_t retry_enabled;
164 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
169 * specified on command-line. */
172 * Configuration of packet segments used by the "txonly" processing engine.
174 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176 TXONLY_DEF_PACKET_LEN,
178 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181 /**< Split policy for packets to TX. */
183 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186 /* current configuration is in DCB or not,0 means it is not in DCB mode */
187 uint8_t dcb_config = 0;
189 /* Whether the dcb is in testing status */
190 uint8_t dcb_test = 0;
193 * Configurable number of RX/TX queues.
195 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
199 * Configurable number of RX/TX ring descriptors.
201 #define RTE_TEST_RX_DESC_DEFAULT 128
202 #define RTE_TEST_TX_DESC_DEFAULT 512
203 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206 #define RTE_PMD_PARAM_UNSET -1
208 * Configurable values of RX and TX ring threshold registers.
211 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
220 * Configurable value of RX free threshold.
222 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
225 * Configurable value of RX drop enable.
227 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
230 * Configurable value of TX free threshold.
232 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of TX RS bit threshold.
237 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX queue flags.
242 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
245 * Receive Side Scaling (RSS) configuration.
247 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
250 * Port topology configuration
252 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255 * Avoids to flush all the RX streams before starts forwarding.
257 uint8_t no_flush_rx = 0; /* flush by default */
260 * Avoids to check link status when starting/stopping a port.
262 uint8_t no_link_check = 0; /* check by default */
265 * NIC bypass mode configuration options.
267 #ifdef RTE_NIC_BYPASS
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
274 /* default period is 1 second */
275 static uint64_t timer_period = 1;
278 * Ethernet device configuration.
280 struct rte_eth_rxmode rx_mode = {
281 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
283 .header_split = 0, /**< Header Split disabled. */
284 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
285 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
286 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
287 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
288 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
289 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
292 struct rte_fdir_conf fdir_conf = {
293 .mode = RTE_FDIR_MODE_NONE,
294 .pballoc = RTE_FDIR_PBALLOC_64K,
295 .status = RTE_FDIR_REPORT_STATUS,
297 .vlan_tci_mask = 0x0,
299 .src_ip = 0xFFFFFFFF,
300 .dst_ip = 0xFFFFFFFF,
303 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
304 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
306 .src_port_mask = 0xFFFF,
307 .dst_port_mask = 0xFFFF,
308 .mac_addr_byte_mask = 0xFF,
309 .tunnel_type_mask = 1,
310 .tunnel_id_mask = 0xFFFFFFFF,
315 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
317 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
318 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
320 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
321 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
323 uint16_t nb_tx_queue_stats_mappings = 0;
324 uint16_t nb_rx_queue_stats_mappings = 0;
326 unsigned max_socket = 0;
328 /* Forward function declarations */
329 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
330 static void check_all_ports_link_status(uint32_t port_mask);
333 * Check if all the ports are started.
334 * If yes, return positive value. If not, return zero.
336 static int all_ports_started(void);
339 * Find next enabled port
342 find_next_port(portid_t p, struct rte_port *ports, int size)
345 rte_exit(-EINVAL, "failed to find a next port id\n");
347 while ((p < size) && (ports[p].enabled == 0))
353 * Setup default configuration.
356 set_default_fwd_lcores_config(void)
360 unsigned int sock_num;
363 for (i = 0; i < RTE_MAX_LCORE; i++) {
364 sock_num = rte_lcore_to_socket_id(i) + 1;
365 if (sock_num > max_socket) {
366 if (sock_num > RTE_MAX_NUMA_NODES)
367 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
368 max_socket = sock_num;
370 if (!rte_lcore_is_enabled(i))
372 if (i == rte_get_master_lcore())
374 fwd_lcores_cpuids[nb_lc++] = i;
376 nb_lcores = (lcoreid_t) nb_lc;
377 nb_cfg_lcores = nb_lcores;
382 set_def_peer_eth_addrs(void)
386 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
387 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
388 peer_eth_addrs[i].addr_bytes[5] = i;
393 set_default_fwd_ports_config(void)
397 for (pt_id = 0; pt_id < nb_ports; pt_id++)
398 fwd_ports_ids[pt_id] = pt_id;
400 nb_cfg_ports = nb_ports;
401 nb_fwd_ports = nb_ports;
405 set_def_fwd_config(void)
407 set_default_fwd_lcores_config();
408 set_def_peer_eth_addrs();
409 set_default_fwd_ports_config();
413 * Configuration initialisation done once at init time.
416 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
417 unsigned int socket_id)
419 char pool_name[RTE_MEMPOOL_NAMESIZE];
420 struct rte_mempool *rte_mp = NULL;
423 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
424 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
427 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
428 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
430 #ifdef RTE_LIBRTE_PMD_XENVIRT
431 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
432 (unsigned) mb_mempool_cache,
433 sizeof(struct rte_pktmbuf_pool_private),
434 rte_pktmbuf_pool_init, NULL,
435 rte_pktmbuf_init, NULL,
439 /* if the former XEN allocation failed fall back to normal allocation */
440 if (rte_mp == NULL) {
442 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
443 mb_size, (unsigned) mb_mempool_cache,
444 sizeof(struct rte_pktmbuf_pool_private),
449 if (rte_mempool_populate_anon(rte_mp) == 0) {
450 rte_mempool_free(rte_mp);
454 rte_pktmbuf_pool_init(rte_mp, NULL);
455 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
457 /* wrapper to rte_mempool_create() */
458 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
459 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
464 if (rte_mp == NULL) {
465 rte_exit(EXIT_FAILURE,
466 "Creation of mbuf pool for socket %u failed: %s\n",
467 socket_id, rte_strerror(rte_errno));
468 } else if (verbose_level > 0) {
469 rte_mempool_dump(stdout, rte_mp);
474 * Check given socket id is valid or not with NUMA mode,
475 * if valid, return 0, else return -1
478 check_socket_id(const unsigned int socket_id)
480 static int warning_once = 0;
482 if (socket_id >= max_socket) {
483 if (!warning_once && numa_support)
484 printf("Warning: NUMA should be configured manually by"
485 " using --port-numa-config and"
486 " --ring-numa-config parameters along with"
498 struct rte_port *port;
499 struct rte_mempool *mbp;
500 unsigned int nb_mbuf_per_pool;
502 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
504 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
505 /* Configuration of logical cores. */
506 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
507 sizeof(struct fwd_lcore *) * nb_lcores,
508 RTE_CACHE_LINE_SIZE);
509 if (fwd_lcores == NULL) {
510 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
511 "failed\n", nb_lcores);
513 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
514 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
515 sizeof(struct fwd_lcore),
516 RTE_CACHE_LINE_SIZE);
517 if (fwd_lcores[lc_id] == NULL) {
518 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
521 fwd_lcores[lc_id]->cpuid_idx = lc_id;
525 * Create pools of mbuf.
526 * If NUMA support is disabled, create a single pool of mbuf in
527 * socket 0 memory by default.
528 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
530 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
531 * nb_txd can be configured at run time.
533 if (param_total_num_mbufs)
534 nb_mbuf_per_pool = param_total_num_mbufs;
536 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
537 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
541 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
545 if (socket_num == UMA_NO_CONFIG)
546 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
548 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
552 FOREACH_PORT(pid, ports) {
554 rte_eth_dev_info_get(pid, &port->dev_info);
557 if (port_numa[pid] != NUMA_NO_CONFIG)
558 port_per_socket[port_numa[pid]]++;
560 uint32_t socket_id = rte_eth_dev_socket_id(pid);
562 /* if socket_id is invalid, set to 0 */
563 if (check_socket_id(socket_id) < 0)
565 port_per_socket[socket_id]++;
569 /* set flag to initialize port/queue */
570 port->need_reconfig = 1;
571 port->need_reconfig_queues = 1;
576 unsigned int nb_mbuf;
578 if (param_total_num_mbufs)
579 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
581 for (i = 0; i < max_socket; i++) {
582 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
584 mbuf_pool_create(mbuf_data_size,
591 * Records which Mbuf pool to use by each logical core, if needed.
593 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
594 mbp = mbuf_pool_find(
595 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
598 mbp = mbuf_pool_find(0);
599 fwd_lcores[lc_id]->mbp = mbp;
602 /* Configuration of packet forwarding streams. */
603 if (init_fwd_streams() < 0)
604 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
611 reconfig(portid_t new_port_id, unsigned socket_id)
613 struct rte_port *port;
615 /* Reconfiguration of Ethernet ports. */
616 port = &ports[new_port_id];
617 rte_eth_dev_info_get(new_port_id, &port->dev_info);
619 /* set flag to initialize port/queue */
620 port->need_reconfig = 1;
621 port->need_reconfig_queues = 1;
622 port->socket_id = socket_id;
629 init_fwd_streams(void)
632 struct rte_port *port;
633 streamid_t sm_id, nb_fwd_streams_new;
636 /* set socket id according to numa or not */
637 FOREACH_PORT(pid, ports) {
639 if (nb_rxq > port->dev_info.max_rx_queues) {
640 printf("Fail: nb_rxq(%d) is greater than "
641 "max_rx_queues(%d)\n", nb_rxq,
642 port->dev_info.max_rx_queues);
645 if (nb_txq > port->dev_info.max_tx_queues) {
646 printf("Fail: nb_txq(%d) is greater than "
647 "max_tx_queues(%d)\n", nb_txq,
648 port->dev_info.max_tx_queues);
652 if (port_numa[pid] != NUMA_NO_CONFIG)
653 port->socket_id = port_numa[pid];
655 port->socket_id = rte_eth_dev_socket_id(pid);
657 /* if socket_id is invalid, set to 0 */
658 if (check_socket_id(port->socket_id) < 0)
663 if (socket_num == UMA_NO_CONFIG)
666 port->socket_id = socket_num;
670 q = RTE_MAX(nb_rxq, nb_txq);
672 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
675 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
676 if (nb_fwd_streams_new == nb_fwd_streams)
679 if (fwd_streams != NULL) {
680 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
681 if (fwd_streams[sm_id] == NULL)
683 rte_free(fwd_streams[sm_id]);
684 fwd_streams[sm_id] = NULL;
686 rte_free(fwd_streams);
691 nb_fwd_streams = nb_fwd_streams_new;
692 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
693 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
694 if (fwd_streams == NULL)
695 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
696 "failed\n", nb_fwd_streams);
698 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
699 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
700 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
701 if (fwd_streams[sm_id] == NULL)
702 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
709 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
711 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
713 unsigned int total_burst;
714 unsigned int nb_burst;
715 unsigned int burst_stats[3];
716 uint16_t pktnb_stats[3];
718 int burst_percent[3];
721 * First compute the total number of packet bursts and the
722 * two highest numbers of bursts of the same number of packets.
725 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
726 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
727 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
728 nb_burst = pbs->pkt_burst_spread[nb_pkt];
731 total_burst += nb_burst;
732 if (nb_burst > burst_stats[0]) {
733 burst_stats[1] = burst_stats[0];
734 pktnb_stats[1] = pktnb_stats[0];
735 burst_stats[0] = nb_burst;
736 pktnb_stats[0] = nb_pkt;
739 if (total_burst == 0)
741 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
742 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
743 burst_percent[0], (int) pktnb_stats[0]);
744 if (burst_stats[0] == total_burst) {
748 if (burst_stats[0] + burst_stats[1] == total_burst) {
749 printf(" + %d%% of %d pkts]\n",
750 100 - burst_percent[0], pktnb_stats[1]);
753 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
754 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
755 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
756 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
759 printf(" + %d%% of %d pkts + %d%% of others]\n",
760 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
762 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
765 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
767 struct rte_port *port;
770 static const char *fwd_stats_border = "----------------------";
772 port = &ports[port_id];
773 printf("\n %s Forward statistics for port %-2d %s\n",
774 fwd_stats_border, port_id, fwd_stats_border);
776 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
777 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
779 stats->ipackets, stats->imissed,
780 (uint64_t) (stats->ipackets + stats->imissed));
782 if (cur_fwd_eng == &csum_fwd_engine)
783 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
784 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
785 if ((stats->ierrors + stats->rx_nombuf) > 0) {
786 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
787 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
790 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
792 stats->opackets, port->tx_dropped,
793 (uint64_t) (stats->opackets + port->tx_dropped));
796 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
798 stats->ipackets, stats->imissed,
799 (uint64_t) (stats->ipackets + stats->imissed));
801 if (cur_fwd_eng == &csum_fwd_engine)
802 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
803 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
804 if ((stats->ierrors + stats->rx_nombuf) > 0) {
805 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
806 printf(" RX-nombufs: %14"PRIu64"\n",
810 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
812 stats->opackets, port->tx_dropped,
813 (uint64_t) (stats->opackets + port->tx_dropped));
816 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
818 pkt_burst_stats_display("RX",
819 &port->rx_stream->rx_burst_stats);
821 pkt_burst_stats_display("TX",
822 &port->tx_stream->tx_burst_stats);
825 if (port->rx_queue_stats_mapping_enabled) {
827 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828 printf(" Stats reg %2d RX-packets:%14"PRIu64
829 " RX-errors:%14"PRIu64
830 " RX-bytes:%14"PRIu64"\n",
831 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
835 if (port->tx_queue_stats_mapping_enabled) {
836 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
837 printf(" Stats reg %2d TX-packets:%14"PRIu64
838 " TX-bytes:%14"PRIu64"\n",
839 i, stats->q_opackets[i], stats->q_obytes[i]);
843 printf(" %s--------------------------------%s\n",
844 fwd_stats_border, fwd_stats_border);
848 fwd_stream_stats_display(streamid_t stream_id)
850 struct fwd_stream *fs;
851 static const char *fwd_top_stats_border = "-------";
853 fs = fwd_streams[stream_id];
854 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
855 (fs->fwd_dropped == 0))
857 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
858 "TX Port=%2d/Queue=%2d %s\n",
859 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
860 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
861 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
862 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
864 /* if checksum mode */
865 if (cur_fwd_eng == &csum_fwd_engine) {
866 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
867 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
870 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
871 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
872 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
877 flush_fwd_rx_queues(void)
879 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
886 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
888 /* convert to number of cycles */
889 timer_period *= rte_get_timer_hz();
891 for (j = 0; j < 2; j++) {
892 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
893 for (rxq = 0; rxq < nb_rxq; rxq++) {
894 port_id = fwd_ports_ids[rxp];
896 * testpmd can stuck in the below do while loop
897 * if rte_eth_rx_burst() always returns nonzero
898 * packets. So timer is added to exit this loop
899 * after 1sec timer expiry.
901 prev_tsc = rte_rdtsc();
903 nb_rx = rte_eth_rx_burst(port_id, rxq,
904 pkts_burst, MAX_PKT_BURST);
905 for (i = 0; i < nb_rx; i++)
906 rte_pktmbuf_free(pkts_burst[i]);
908 cur_tsc = rte_rdtsc();
909 diff_tsc = cur_tsc - prev_tsc;
910 timer_tsc += diff_tsc;
911 } while ((nb_rx > 0) &&
912 (timer_tsc < timer_period));
916 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
921 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
923 struct fwd_stream **fsm;
927 fsm = &fwd_streams[fc->stream_idx];
928 nb_fs = fc->stream_nb;
930 for (sm_id = 0; sm_id < nb_fs; sm_id++)
931 (*pkt_fwd)(fsm[sm_id]);
932 } while (! fc->stopped);
936 start_pkt_forward_on_core(void *fwd_arg)
938 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
939 cur_fwd_config.fwd_eng->packet_fwd);
944 * Run the TXONLY packet forwarding engine to send a single burst of packets.
945 * Used to start communication flows in network loopback test configurations.
948 run_one_txonly_burst_on_core(void *fwd_arg)
950 struct fwd_lcore *fwd_lc;
951 struct fwd_lcore tmp_lcore;
953 fwd_lc = (struct fwd_lcore *) fwd_arg;
955 tmp_lcore.stopped = 1;
956 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
961 * Launch packet forwarding:
962 * - Setup per-port forwarding context.
963 * - launch logical cores with their forwarding configuration.
966 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
968 port_fwd_begin_t port_fwd_begin;
973 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
974 if (port_fwd_begin != NULL) {
975 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
976 (*port_fwd_begin)(fwd_ports_ids[i]);
978 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
979 lc_id = fwd_lcores_cpuids[i];
980 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
981 fwd_lcores[i]->stopped = 0;
982 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
983 fwd_lcores[i], lc_id);
985 printf("launch lcore %u failed - diag=%d\n",
992 * Launch packet forwarding configuration.
995 start_packet_forwarding(int with_tx_first)
997 port_fwd_begin_t port_fwd_begin;
998 port_fwd_end_t port_fwd_end;
999 struct rte_port *port;
1004 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1005 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1007 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1008 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1010 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1011 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1012 (!nb_rxq || !nb_txq))
1013 rte_exit(EXIT_FAILURE,
1014 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1015 cur_fwd_eng->fwd_mode_name);
1017 if (all_ports_started() == 0) {
1018 printf("Not all ports were started\n");
1021 if (test_done == 0) {
1022 printf("Packet forwarding already started\n");
1026 if (init_fwd_streams() < 0) {
1027 printf("Fail from init_fwd_streams()\n");
1032 for (i = 0; i < nb_fwd_ports; i++) {
1033 pt_id = fwd_ports_ids[i];
1034 port = &ports[pt_id];
1035 if (!port->dcb_flag) {
1036 printf("In DCB mode, all forwarding ports must "
1037 "be configured in this mode.\n");
1041 if (nb_fwd_lcores == 1) {
1042 printf("In DCB mode,the nb forwarding cores "
1043 "should be larger than 1.\n");
1050 flush_fwd_rx_queues();
1053 pkt_fwd_config_display(&cur_fwd_config);
1054 rxtx_config_display();
1056 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1057 pt_id = fwd_ports_ids[i];
1058 port = &ports[pt_id];
1059 rte_eth_stats_get(pt_id, &port->stats);
1060 port->tx_dropped = 0;
1062 map_port_queue_stats_mapping_registers(pt_id, port);
1064 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1065 fwd_streams[sm_id]->rx_packets = 0;
1066 fwd_streams[sm_id]->tx_packets = 0;
1067 fwd_streams[sm_id]->fwd_dropped = 0;
1068 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1069 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1071 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1072 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1073 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1074 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1075 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1077 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1078 fwd_streams[sm_id]->core_cycles = 0;
1081 if (with_tx_first) {
1082 port_fwd_begin = tx_only_engine.port_fwd_begin;
1083 if (port_fwd_begin != NULL) {
1084 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1085 (*port_fwd_begin)(fwd_ports_ids[i]);
1087 while (with_tx_first--) {
1088 launch_packet_forwarding(
1089 run_one_txonly_burst_on_core);
1090 rte_eal_mp_wait_lcore();
1092 port_fwd_end = tx_only_engine.port_fwd_end;
1093 if (port_fwd_end != NULL) {
1094 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095 (*port_fwd_end)(fwd_ports_ids[i]);
1098 launch_packet_forwarding(start_pkt_forward_on_core);
1102 stop_packet_forwarding(void)
1104 struct rte_eth_stats stats;
1105 struct rte_port *port;
1106 port_fwd_end_t port_fwd_end;
1111 uint64_t total_recv;
1112 uint64_t total_xmit;
1113 uint64_t total_rx_dropped;
1114 uint64_t total_tx_dropped;
1115 uint64_t total_rx_nombuf;
1116 uint64_t tx_dropped;
1117 uint64_t rx_bad_ip_csum;
1118 uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120 uint64_t fwd_cycles;
1122 static const char *acc_stats_border = "+++++++++++++++";
1125 printf("Packet forwarding not started\n");
1128 printf("Telling cores to stop...");
1129 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1130 fwd_lcores[lc_id]->stopped = 1;
1131 printf("\nWaiting for lcores to finish...\n");
1132 rte_eal_mp_wait_lcore();
1133 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1134 if (port_fwd_end != NULL) {
1135 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1136 pt_id = fwd_ports_ids[i];
1137 (*port_fwd_end)(pt_id);
1140 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1143 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1144 if (cur_fwd_config.nb_fwd_streams >
1145 cur_fwd_config.nb_fwd_ports) {
1146 fwd_stream_stats_display(sm_id);
1147 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1148 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1150 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1152 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1155 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1156 tx_dropped = (uint64_t) (tx_dropped +
1157 fwd_streams[sm_id]->fwd_dropped);
1158 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1161 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1162 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1163 fwd_streams[sm_id]->rx_bad_ip_csum);
1164 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1168 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1169 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1170 fwd_streams[sm_id]->rx_bad_l4_csum);
1171 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1174 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1175 fwd_cycles = (uint64_t) (fwd_cycles +
1176 fwd_streams[sm_id]->core_cycles);
1181 total_rx_dropped = 0;
1182 total_tx_dropped = 0;
1183 total_rx_nombuf = 0;
1184 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1185 pt_id = fwd_ports_ids[i];
1187 port = &ports[pt_id];
1188 rte_eth_stats_get(pt_id, &stats);
1189 stats.ipackets -= port->stats.ipackets;
1190 port->stats.ipackets = 0;
1191 stats.opackets -= port->stats.opackets;
1192 port->stats.opackets = 0;
1193 stats.ibytes -= port->stats.ibytes;
1194 port->stats.ibytes = 0;
1195 stats.obytes -= port->stats.obytes;
1196 port->stats.obytes = 0;
1197 stats.imissed -= port->stats.imissed;
1198 port->stats.imissed = 0;
1199 stats.oerrors -= port->stats.oerrors;
1200 port->stats.oerrors = 0;
1201 stats.rx_nombuf -= port->stats.rx_nombuf;
1202 port->stats.rx_nombuf = 0;
1204 total_recv += stats.ipackets;
1205 total_xmit += stats.opackets;
1206 total_rx_dropped += stats.imissed;
1207 total_tx_dropped += port->tx_dropped;
1208 total_rx_nombuf += stats.rx_nombuf;
1210 fwd_port_stats_display(pt_id, &stats);
1212 printf("\n %s Accumulated forward statistics for all ports"
1214 acc_stats_border, acc_stats_border);
1215 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1217 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1219 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1220 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1221 if (total_rx_nombuf > 0)
1222 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1223 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1225 acc_stats_border, acc_stats_border);
1226 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1228 printf("\n CPU cycles/packet=%u (total cycles="
1229 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1230 (unsigned int)(fwd_cycles / total_recv),
1231 fwd_cycles, total_recv);
1233 printf("\nDone.\n");
1238 dev_set_link_up(portid_t pid)
1240 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1241 printf("\nSet link up fail.\n");
1245 dev_set_link_down(portid_t pid)
1247 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1248 printf("\nSet link down fail.\n");
1252 all_ports_started(void)
1255 struct rte_port *port;
1257 FOREACH_PORT(pi, ports) {
1259 /* Check if there is a port which is not started */
1260 if ((port->port_status != RTE_PORT_STARTED) &&
1261 (port->slave_flag == 0))
1265 /* No port is not started */
1270 all_ports_stopped(void)
1273 struct rte_port *port;
1275 FOREACH_PORT(pi, ports) {
1277 if ((port->port_status != RTE_PORT_STOPPED) &&
1278 (port->slave_flag == 0))
1286 port_is_started(portid_t port_id)
1288 if (port_id_is_invalid(port_id, ENABLED_WARN))
1291 if (ports[port_id].port_status != RTE_PORT_STARTED)
1298 port_is_closed(portid_t port_id)
1300 if (port_id_is_invalid(port_id, ENABLED_WARN))
1303 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1310 start_port(portid_t pid)
1312 int diag, need_check_link_status = -1;
1315 struct rte_port *port;
1316 struct ether_addr mac_addr;
1318 if (port_id_is_invalid(pid, ENABLED_WARN))
1323 FOREACH_PORT(pi, ports) {
1324 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1327 need_check_link_status = 0;
1329 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1330 RTE_PORT_HANDLING) == 0) {
1331 printf("Port %d is now not stopped\n", pi);
1335 if (port->need_reconfig > 0) {
1336 port->need_reconfig = 0;
1338 printf("Configuring Port %d (socket %u)\n", pi,
1340 /* configure port */
1341 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1344 if (rte_atomic16_cmpset(&(port->port_status),
1345 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1346 printf("Port %d can not be set back "
1347 "to stopped\n", pi);
1348 printf("Fail to configure port %d\n", pi);
1349 /* try to reconfigure port next time */
1350 port->need_reconfig = 1;
1354 if (port->need_reconfig_queues > 0) {
1355 port->need_reconfig_queues = 0;
1356 /* setup tx queues */
1357 for (qi = 0; qi < nb_txq; qi++) {
1358 if ((numa_support) &&
1359 (txring_numa[pi] != NUMA_NO_CONFIG))
1360 diag = rte_eth_tx_queue_setup(pi, qi,
1361 nb_txd,txring_numa[pi],
1364 diag = rte_eth_tx_queue_setup(pi, qi,
1365 nb_txd,port->socket_id,
1371 /* Fail to setup tx queue, return */
1372 if (rte_atomic16_cmpset(&(port->port_status),
1374 RTE_PORT_STOPPED) == 0)
1375 printf("Port %d can not be set back "
1376 "to stopped\n", pi);
1377 printf("Fail to configure port %d tx queues\n", pi);
1378 /* try to reconfigure queues next time */
1379 port->need_reconfig_queues = 1;
1382 /* setup rx queues */
1383 for (qi = 0; qi < nb_rxq; qi++) {
1384 if ((numa_support) &&
1385 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1386 struct rte_mempool * mp =
1387 mbuf_pool_find(rxring_numa[pi]);
1389 printf("Failed to setup RX queue:"
1390 "No mempool allocation"
1391 " on the socket %d\n",
1396 diag = rte_eth_rx_queue_setup(pi, qi,
1397 nb_rxd,rxring_numa[pi],
1398 &(port->rx_conf),mp);
1400 struct rte_mempool *mp =
1401 mbuf_pool_find(port->socket_id);
1403 printf("Failed to setup RX queue:"
1404 "No mempool allocation"
1405 " on the socket %d\n",
1409 diag = rte_eth_rx_queue_setup(pi, qi,
1410 nb_rxd,port->socket_id,
1411 &(port->rx_conf), mp);
1416 /* Fail to setup rx queue, return */
1417 if (rte_atomic16_cmpset(&(port->port_status),
1419 RTE_PORT_STOPPED) == 0)
1420 printf("Port %d can not be set back "
1421 "to stopped\n", pi);
1422 printf("Fail to configure port %d rx queues\n", pi);
1423 /* try to reconfigure queues next time */
1424 port->need_reconfig_queues = 1;
1429 if (rte_eth_dev_start(pi) < 0) {
1430 printf("Fail to start port %d\n", pi);
1432 /* Fail to setup rx queue, return */
1433 if (rte_atomic16_cmpset(&(port->port_status),
1434 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1435 printf("Port %d can not be set back to "
1440 if (rte_atomic16_cmpset(&(port->port_status),
1441 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1442 printf("Port %d can not be set into started\n", pi);
1444 rte_eth_macaddr_get(pi, &mac_addr);
1445 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1446 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1447 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1448 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1450 /* at least one port started, need checking link status */
1451 need_check_link_status = 1;
1454 if (need_check_link_status == 1 && !no_link_check)
1455 check_all_ports_link_status(RTE_PORT_ALL);
1456 else if (need_check_link_status == 0)
1457 printf("Please stop the ports first\n");
1464 stop_port(portid_t pid)
1467 struct rte_port *port;
1468 int need_check_link_status = 0;
1475 if (port_id_is_invalid(pid, ENABLED_WARN))
1478 printf("Stopping ports...\n");
1480 FOREACH_PORT(pi, ports) {
1481 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1484 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1485 printf("Please remove port %d from forwarding configuration.\n", pi);
1489 if (port_is_bonding_slave(pi)) {
1490 printf("Please remove port %d from bonded device.\n", pi);
1495 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1496 RTE_PORT_HANDLING) == 0)
1499 rte_eth_dev_stop(pi);
1501 if (rte_atomic16_cmpset(&(port->port_status),
1502 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1503 printf("Port %d can not be set into stopped\n", pi);
1504 need_check_link_status = 1;
1506 if (need_check_link_status && !no_link_check)
1507 check_all_ports_link_status(RTE_PORT_ALL);
1513 close_port(portid_t pid)
1516 struct rte_port *port;
1518 if (port_id_is_invalid(pid, ENABLED_WARN))
1521 printf("Closing ports...\n");
1523 FOREACH_PORT(pi, ports) {
1524 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1527 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1528 printf("Please remove port %d from forwarding configuration.\n", pi);
1532 if (port_is_bonding_slave(pi)) {
1533 printf("Please remove port %d from bonded device.\n", pi);
1538 if (rte_atomic16_cmpset(&(port->port_status),
1539 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1540 printf("Port %d is already closed\n", pi);
1544 if (rte_atomic16_cmpset(&(port->port_status),
1545 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1546 printf("Port %d is now not stopped\n", pi);
1550 rte_eth_dev_close(pi);
1552 if (rte_atomic16_cmpset(&(port->port_status),
1553 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1554 printf("Port %d cannot be set to closed\n", pi);
1561 attach_port(char *identifier)
1564 unsigned int socket_id;
1566 printf("Attaching a new port...\n");
1568 if (identifier == NULL) {
1569 printf("Invalid parameters are specified\n");
1573 if (rte_eth_dev_attach(identifier, &pi))
1576 ports[pi].enabled = 1;
1577 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1578 /* if socket_id is invalid, set to 0 */
1579 if (check_socket_id(socket_id) < 0)
1581 reconfig(pi, socket_id);
1582 rte_eth_promiscuous_enable(pi);
1584 nb_ports = rte_eth_dev_count();
1586 ports[pi].port_status = RTE_PORT_STOPPED;
1588 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1593 detach_port(uint8_t port_id)
1595 char name[RTE_ETH_NAME_MAX_LEN];
1597 printf("Detaching a port...\n");
1599 if (!port_is_closed(port_id)) {
1600 printf("Please close port first\n");
1604 if (rte_eth_dev_detach(port_id, name))
1607 ports[port_id].enabled = 0;
1608 nb_ports = rte_eth_dev_count();
1610 printf("Port '%s' is detached. Now total ports is %d\n",
1622 stop_packet_forwarding();
1624 if (ports != NULL) {
1626 FOREACH_PORT(pt_id, ports) {
1627 printf("\nShutting down port %d...\n", pt_id);
1633 printf("\nBye...\n");
1636 typedef void (*cmd_func_t)(void);
1637 struct pmd_test_command {
1638 const char *cmd_name;
1639 cmd_func_t cmd_func;
1642 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1644 /* Check the link status of all ports in up to 9s, and print them finally */
1646 check_all_ports_link_status(uint32_t port_mask)
1648 #define CHECK_INTERVAL 100 /* 100ms */
1649 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1650 uint8_t portid, count, all_ports_up, print_flag = 0;
1651 struct rte_eth_link link;
1653 printf("Checking link statuses...\n");
1655 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1657 FOREACH_PORT(portid, ports) {
1658 if ((port_mask & (1 << portid)) == 0)
1660 memset(&link, 0, sizeof(link));
1661 rte_eth_link_get_nowait(portid, &link);
1662 /* print link status if flag set */
1663 if (print_flag == 1) {
1664 if (link.link_status)
1665 printf("Port %d Link Up - speed %u "
1666 "Mbps - %s\n", (uint8_t)portid,
1667 (unsigned)link.link_speed,
1668 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1669 ("full-duplex") : ("half-duplex\n"));
1671 printf("Port %d Link Down\n",
1675 /* clear all_ports_up flag if any link down */
1676 if (link.link_status == ETH_LINK_DOWN) {
1681 /* after finally printing all link status, get out */
1682 if (print_flag == 1)
1685 if (all_ports_up == 0) {
1687 rte_delay_ms(CHECK_INTERVAL);
1690 /* set the print_flag if all ports up or timeout */
1691 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1698 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1702 uint8_t mapping_found = 0;
1704 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1705 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1706 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1707 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1708 tx_queue_stats_mappings[i].queue_id,
1709 tx_queue_stats_mappings[i].stats_counter_id);
1716 port->tx_queue_stats_mapping_enabled = 1;
1721 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1725 uint8_t mapping_found = 0;
1727 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1728 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1729 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1730 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1731 rx_queue_stats_mappings[i].queue_id,
1732 rx_queue_stats_mappings[i].stats_counter_id);
1739 port->rx_queue_stats_mapping_enabled = 1;
1744 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1748 diag = set_tx_queue_stats_mapping_registers(pi, port);
1750 if (diag == -ENOTSUP) {
1751 port->tx_queue_stats_mapping_enabled = 0;
1752 printf("TX queue stats mapping not supported port id=%d\n", pi);
1755 rte_exit(EXIT_FAILURE,
1756 "set_tx_queue_stats_mapping_registers "
1757 "failed for port id=%d diag=%d\n",
1761 diag = set_rx_queue_stats_mapping_registers(pi, port);
1763 if (diag == -ENOTSUP) {
1764 port->rx_queue_stats_mapping_enabled = 0;
1765 printf("RX queue stats mapping not supported port id=%d\n", pi);
1768 rte_exit(EXIT_FAILURE,
1769 "set_rx_queue_stats_mapping_registers "
1770 "failed for port id=%d diag=%d\n",
1776 rxtx_port_config(struct rte_port *port)
1778 port->rx_conf = port->dev_info.default_rxconf;
1779 port->tx_conf = port->dev_info.default_txconf;
1781 /* Check if any RX/TX parameters have been passed */
1782 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1783 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1785 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1786 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1788 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1789 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1791 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1792 port->rx_conf.rx_free_thresh = rx_free_thresh;
1794 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1795 port->rx_conf.rx_drop_en = rx_drop_en;
1797 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1798 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1800 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1801 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1803 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1804 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1806 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1807 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1809 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1810 port->tx_conf.tx_free_thresh = tx_free_thresh;
1812 if (txq_flags != RTE_PMD_PARAM_UNSET)
1813 port->tx_conf.txq_flags = txq_flags;
1817 init_port_config(void)
1820 struct rte_port *port;
1822 FOREACH_PORT(pid, ports) {
1824 port->dev_conf.rxmode = rx_mode;
1825 port->dev_conf.fdir_conf = fdir_conf;
1827 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1828 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1830 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1831 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1834 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1835 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1836 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1838 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1841 if (port->dev_info.max_vfs != 0) {
1842 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1843 port->dev_conf.rxmode.mq_mode =
1846 port->dev_conf.rxmode.mq_mode =
1849 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1852 rxtx_port_config(port);
1854 rte_eth_macaddr_get(pid, &port->eth_addr);
1856 map_port_queue_stats_mapping_registers(pid, port);
1857 #ifdef RTE_NIC_BYPASS
1858 rte_eth_dev_bypass_init(pid);
1863 void set_port_slave_flag(portid_t slave_pid)
1865 struct rte_port *port;
1867 port = &ports[slave_pid];
1868 port->slave_flag = 1;
1871 void clear_port_slave_flag(portid_t slave_pid)
1873 struct rte_port *port;
1875 port = &ports[slave_pid];
1876 port->slave_flag = 0;
1879 uint8_t port_is_bonding_slave(portid_t slave_pid)
1881 struct rte_port *port;
1883 port = &ports[slave_pid];
1884 return port->slave_flag;
1887 const uint16_t vlan_tags[] = {
1888 0, 1, 2, 3, 4, 5, 6, 7,
1889 8, 9, 10, 11, 12, 13, 14, 15,
1890 16, 17, 18, 19, 20, 21, 22, 23,
1891 24, 25, 26, 27, 28, 29, 30, 31
1895 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1896 enum dcb_mode_enable dcb_mode,
1897 enum rte_eth_nb_tcs num_tcs,
1903 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1904 * given above, and the number of traffic classes available for use.
1906 if (dcb_mode == DCB_VT_ENABLED) {
1907 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1908 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1909 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1910 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1912 /* VMDQ+DCB RX and TX configrations */
1913 vmdq_rx_conf->enable_default_pool = 0;
1914 vmdq_rx_conf->default_pool = 0;
1915 vmdq_rx_conf->nb_queue_pools =
1916 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1917 vmdq_tx_conf->nb_queue_pools =
1918 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1920 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1921 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1922 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1923 vmdq_rx_conf->pool_map[i].pools =
1924 1 << (i % vmdq_rx_conf->nb_queue_pools);
1926 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1927 vmdq_rx_conf->dcb_tc[i] = i;
1928 vmdq_tx_conf->dcb_tc[i] = i;
1931 /* set DCB mode of RX and TX of multiple queues */
1932 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1933 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1935 struct rte_eth_dcb_rx_conf *rx_conf =
1936 ð_conf->rx_adv_conf.dcb_rx_conf;
1937 struct rte_eth_dcb_tx_conf *tx_conf =
1938 ð_conf->tx_adv_conf.dcb_tx_conf;
1940 rx_conf->nb_tcs = num_tcs;
1941 tx_conf->nb_tcs = num_tcs;
1943 for (i = 0; i < num_tcs; i++) {
1944 rx_conf->dcb_tc[i] = i;
1945 tx_conf->dcb_tc[i] = i;
1947 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1948 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1949 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1953 eth_conf->dcb_capability_en =
1954 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1956 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1962 init_port_dcb_config(portid_t pid,
1963 enum dcb_mode_enable dcb_mode,
1964 enum rte_eth_nb_tcs num_tcs,
1967 struct rte_eth_conf port_conf;
1968 struct rte_eth_dev_info dev_info;
1969 struct rte_port *rte_port;
1973 rte_eth_dev_info_get(pid, &dev_info);
1975 /* If dev_info.vmdq_pool_base is greater than 0,
1976 * the queue id of vmdq pools is started after pf queues.
1978 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1979 printf("VMDQ_DCB multi-queue mode is nonsensical"
1980 " for port %d.", pid);
1984 /* Assume the ports in testpmd have the same dcb capability
1985 * and has the same number of rxq and txq in dcb mode
1987 if (dcb_mode == DCB_VT_ENABLED) {
1988 nb_rxq = dev_info.max_rx_queues;
1989 nb_txq = dev_info.max_tx_queues;
1991 /*if vt is disabled, use all pf queues */
1992 if (dev_info.vmdq_pool_base == 0) {
1993 nb_rxq = dev_info.max_rx_queues;
1994 nb_txq = dev_info.max_tx_queues;
1996 nb_rxq = (queueid_t)num_tcs;
1997 nb_txq = (queueid_t)num_tcs;
2001 rx_free_thresh = 64;
2003 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2004 /* Enter DCB configuration status */
2007 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2008 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2012 rte_port = &ports[pid];
2013 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2015 rxtx_port_config(rte_port);
2017 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2018 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2019 rx_vft_set(pid, vlan_tags[i], 1);
2021 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2022 map_port_queue_stats_mapping_registers(pid, rte_port);
2024 rte_port->dcb_flag = 1;
2034 /* Configuration of Ethernet ports. */
2035 ports = rte_zmalloc("testpmd: ports",
2036 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2037 RTE_CACHE_LINE_SIZE);
2038 if (ports == NULL) {
2039 rte_exit(EXIT_FAILURE,
2040 "rte_zmalloc(%d struct rte_port) failed\n",
2044 /* enabled allocated ports */
2045 for (pid = 0; pid < nb_ports; pid++)
2046 ports[pid].enabled = 1;
2057 signal_handler(int signum)
2059 if (signum == SIGINT || signum == SIGTERM) {
2060 printf("\nSignal %d received, preparing to exit...\n",
2062 #ifdef RTE_LIBRTE_PDUMP
2063 /* uninitialize packet capture framework */
2067 /* exit with the expected status */
2068 signal(signum, SIG_DFL);
2069 kill(getpid(), signum);
2074 main(int argc, char** argv)
2079 signal(SIGINT, signal_handler);
2080 signal(SIGTERM, signal_handler);
2082 diag = rte_eal_init(argc, argv);
2084 rte_panic("Cannot init EAL\n");
2086 #ifdef RTE_LIBRTE_PDUMP
2087 /* initialize packet capture framework */
2088 rte_pdump_init(NULL);
2091 nb_ports = (portid_t) rte_eth_dev_count();
2093 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2095 /* allocate port structures, and init them */
2098 set_def_fwd_config();
2100 rte_panic("Empty set of forwarding logical cores - check the "
2101 "core mask supplied in the command parameters\n");
2106 launch_args_parse(argc, argv);
2108 if (!nb_rxq && !nb_txq)
2109 printf("Warning: Either rx or tx queues should be non-zero\n");
2111 if (nb_rxq > 1 && nb_rxq > nb_txq)
2112 printf("Warning: nb_rxq=%d enables RSS configuration, "
2113 "but nb_txq=%d will prevent to fully test it.\n",
2117 if (start_port(RTE_PORT_ALL) != 0)
2118 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2120 /* set all ports to promiscuous mode by default */
2121 FOREACH_PORT(port_id, ports)
2122 rte_eth_promiscuous_enable(port_id);
2124 #ifdef RTE_LIBRTE_CMDLINE
2125 if (interactive == 1) {
2127 printf("Start automatic packet forwarding\n");
2128 start_packet_forwarding(0);
2137 printf("No commandline core given, start packet forwarding\n");
2138 start_packet_forwarding(0);
2139 printf("Press enter to exit\n");
2140 rc = read(0, &c, 1);