4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
85 uint16_t verbose_level = 0; /**< Silent by default. */
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
92 * NUMA support configuration.
93 * When set, the NUMA support attempts to dispatch the allocation of the
94 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95 * probed ports among the CPU sockets 0 and 1.
96 * Otherwise, all memory is allocated from CPU socket 0.
98 uint8_t numa_support = 0; /**< No numa support by default */
101 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
104 uint8_t socket_num = UMA_NO_CONFIG;
107 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
112 * Record the Ethernet address of peer target ports to which packets are
114 * Must be instantiated with the ethernet addresses of peer traffic generator
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
121 * Probed Target Environment.
123 struct rte_port *ports; /**< For all probed ethernet ports. */
124 portid_t nb_ports; /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
129 * Test Forwarding Configuration.
130 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t nb_cfg_ports; /**< Number of configured ports. */
136 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
145 * Forwarding engines.
147 struct fwd_engine * fwd_engines[] = {
156 #ifdef RTE_LIBRTE_IEEE1588
157 &ieee1588_fwd_engine,
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
170 * specified on command-line. */
173 * Configuration of packet segments used by the "txonly" processing engine.
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177 TXONLY_DEF_PACKET_LEN,
179 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
194 * Configurable number of RX/TX queues.
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
200 * Configurable number of RX/TX ring descriptors.
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
207 #define RTE_PMD_PARAM_UNSET -1
209 * Configurable values of RX and TX ring threshold registers.
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX free threshold.
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
226 * Configurable value of RX drop enable.
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX free threshold.
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX RS bit threshold.
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
241 * Configurable value of TX queue flags.
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
246 * Receive Side Scaling (RSS) configuration.
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
251 * Port topology configuration
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
256 * Avoids to flush all the RX streams before starts forwarding.
258 uint8_t no_flush_rx = 0; /* flush by default */
261 * Avoids to check link status when starting/stopping a port.
263 uint8_t no_link_check = 0; /* check by default */
266 * NIC bypass mode configuration options.
268 #ifdef RTE_NIC_BYPASS
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
276 * Ethernet device configuration.
278 struct rte_eth_rxmode rx_mode = {
279 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
281 .header_split = 0, /**< Header Split disabled. */
282 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
283 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
284 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
285 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
286 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
287 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
290 struct rte_fdir_conf fdir_conf = {
291 .mode = RTE_FDIR_MODE_NONE,
292 .pballoc = RTE_FDIR_PBALLOC_64K,
293 .status = RTE_FDIR_REPORT_STATUS,
295 .vlan_tci_mask = 0x0,
297 .src_ip = 0xFFFFFFFF,
298 .dst_ip = 0xFFFFFFFF,
301 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
304 .src_port_mask = 0xFFFF,
305 .dst_port_mask = 0xFFFF,
306 .mac_addr_byte_mask = 0xFF,
307 .tunnel_type_mask = 1,
308 .tunnel_id_mask = 0xFFFFFFFF,
313 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
315 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
316 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
318 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
319 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
321 uint16_t nb_tx_queue_stats_mappings = 0;
322 uint16_t nb_rx_queue_stats_mappings = 0;
324 unsigned max_socket = 0;
326 /* Forward function declarations */
327 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
328 static void check_all_ports_link_status(uint32_t port_mask);
331 * Check if all the ports are started.
332 * If yes, return positive value. If not, return zero.
334 static int all_ports_started(void);
337 * Find next enabled port
340 find_next_port(portid_t p, struct rte_port *ports, int size)
343 rte_exit(-EINVAL, "failed to find a next port id\n");
345 while ((p < size) && (ports[p].enabled == 0))
351 * Setup default configuration.
354 set_default_fwd_lcores_config(void)
358 unsigned int sock_num;
361 for (i = 0; i < RTE_MAX_LCORE; i++) {
362 sock_num = rte_lcore_to_socket_id(i) + 1;
363 if (sock_num > max_socket) {
364 if (sock_num > RTE_MAX_NUMA_NODES)
365 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
366 max_socket = sock_num;
368 if (!rte_lcore_is_enabled(i))
370 if (i == rte_get_master_lcore())
372 fwd_lcores_cpuids[nb_lc++] = i;
374 nb_lcores = (lcoreid_t) nb_lc;
375 nb_cfg_lcores = nb_lcores;
380 set_def_peer_eth_addrs(void)
384 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
385 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
386 peer_eth_addrs[i].addr_bytes[5] = i;
391 set_default_fwd_ports_config(void)
395 for (pt_id = 0; pt_id < nb_ports; pt_id++)
396 fwd_ports_ids[pt_id] = pt_id;
398 nb_cfg_ports = nb_ports;
399 nb_fwd_ports = nb_ports;
403 set_def_fwd_config(void)
405 set_default_fwd_lcores_config();
406 set_def_peer_eth_addrs();
407 set_default_fwd_ports_config();
411 * Configuration initialisation done once at init time.
414 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
415 unsigned int socket_id)
417 char pool_name[RTE_MEMPOOL_NAMESIZE];
418 struct rte_mempool *rte_mp = NULL;
421 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
422 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
425 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
426 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
428 #ifdef RTE_LIBRTE_PMD_XENVIRT
429 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
430 (unsigned) mb_mempool_cache,
431 sizeof(struct rte_pktmbuf_pool_private),
432 rte_pktmbuf_pool_init, NULL,
433 rte_pktmbuf_init, NULL,
437 /* if the former XEN allocation failed fall back to normal allocation */
438 if (rte_mp == NULL) {
440 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
441 mb_size, (unsigned) mb_mempool_cache,
442 sizeof(struct rte_pktmbuf_pool_private),
447 if (rte_mempool_populate_anon(rte_mp) == 0) {
448 rte_mempool_free(rte_mp);
452 rte_pktmbuf_pool_init(rte_mp, NULL);
453 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
455 /* wrapper to rte_mempool_create() */
456 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
457 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
462 if (rte_mp == NULL) {
463 rte_exit(EXIT_FAILURE,
464 "Creation of mbuf pool for socket %u failed: %s\n",
465 socket_id, rte_strerror(rte_errno));
466 } else if (verbose_level > 0) {
467 rte_mempool_dump(stdout, rte_mp);
472 * Check given socket id is valid or not with NUMA mode,
473 * if valid, return 0, else return -1
476 check_socket_id(const unsigned int socket_id)
478 static int warning_once = 0;
480 if (socket_id >= max_socket) {
481 if (!warning_once && numa_support)
482 printf("Warning: NUMA should be configured manually by"
483 " using --port-numa-config and"
484 " --ring-numa-config parameters along with"
496 struct rte_port *port;
497 struct rte_mempool *mbp;
498 unsigned int nb_mbuf_per_pool;
500 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
502 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
503 /* Configuration of logical cores. */
504 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
505 sizeof(struct fwd_lcore *) * nb_lcores,
506 RTE_CACHE_LINE_SIZE);
507 if (fwd_lcores == NULL) {
508 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
509 "failed\n", nb_lcores);
511 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
512 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
513 sizeof(struct fwd_lcore),
514 RTE_CACHE_LINE_SIZE);
515 if (fwd_lcores[lc_id] == NULL) {
516 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
519 fwd_lcores[lc_id]->cpuid_idx = lc_id;
523 * Create pools of mbuf.
524 * If NUMA support is disabled, create a single pool of mbuf in
525 * socket 0 memory by default.
526 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
528 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
529 * nb_txd can be configured at run time.
531 if (param_total_num_mbufs)
532 nb_mbuf_per_pool = param_total_num_mbufs;
534 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
535 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
539 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
543 if (socket_num == UMA_NO_CONFIG)
544 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
546 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
550 FOREACH_PORT(pid, ports) {
552 rte_eth_dev_info_get(pid, &port->dev_info);
555 if (port_numa[pid] != NUMA_NO_CONFIG)
556 port_per_socket[port_numa[pid]]++;
558 uint32_t socket_id = rte_eth_dev_socket_id(pid);
560 /* if socket_id is invalid, set to 0 */
561 if (check_socket_id(socket_id) < 0)
563 port_per_socket[socket_id]++;
567 /* set flag to initialize port/queue */
568 port->need_reconfig = 1;
569 port->need_reconfig_queues = 1;
574 unsigned int nb_mbuf;
576 if (param_total_num_mbufs)
577 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
579 for (i = 0; i < max_socket; i++) {
580 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
582 mbuf_pool_create(mbuf_data_size,
589 * Records which Mbuf pool to use by each logical core, if needed.
591 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
592 mbp = mbuf_pool_find(
593 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
596 mbp = mbuf_pool_find(0);
597 fwd_lcores[lc_id]->mbp = mbp;
600 /* Configuration of packet forwarding streams. */
601 if (init_fwd_streams() < 0)
602 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
609 reconfig(portid_t new_port_id, unsigned socket_id)
611 struct rte_port *port;
613 /* Reconfiguration of Ethernet ports. */
614 port = &ports[new_port_id];
615 rte_eth_dev_info_get(new_port_id, &port->dev_info);
617 /* set flag to initialize port/queue */
618 port->need_reconfig = 1;
619 port->need_reconfig_queues = 1;
620 port->socket_id = socket_id;
627 init_fwd_streams(void)
630 struct rte_port *port;
631 streamid_t sm_id, nb_fwd_streams_new;
634 /* set socket id according to numa or not */
635 FOREACH_PORT(pid, ports) {
637 if (nb_rxq > port->dev_info.max_rx_queues) {
638 printf("Fail: nb_rxq(%d) is greater than "
639 "max_rx_queues(%d)\n", nb_rxq,
640 port->dev_info.max_rx_queues);
643 if (nb_txq > port->dev_info.max_tx_queues) {
644 printf("Fail: nb_txq(%d) is greater than "
645 "max_tx_queues(%d)\n", nb_txq,
646 port->dev_info.max_tx_queues);
650 if (port_numa[pid] != NUMA_NO_CONFIG)
651 port->socket_id = port_numa[pid];
653 port->socket_id = rte_eth_dev_socket_id(pid);
655 /* if socket_id is invalid, set to 0 */
656 if (check_socket_id(port->socket_id) < 0)
661 if (socket_num == UMA_NO_CONFIG)
664 port->socket_id = socket_num;
668 q = RTE_MAX(nb_rxq, nb_txq);
670 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
673 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
674 if (nb_fwd_streams_new == nb_fwd_streams)
677 if (fwd_streams != NULL) {
678 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
679 if (fwd_streams[sm_id] == NULL)
681 rte_free(fwd_streams[sm_id]);
682 fwd_streams[sm_id] = NULL;
684 rte_free(fwd_streams);
689 nb_fwd_streams = nb_fwd_streams_new;
690 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
691 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
692 if (fwd_streams == NULL)
693 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
694 "failed\n", nb_fwd_streams);
696 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
697 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
698 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
699 if (fwd_streams[sm_id] == NULL)
700 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
707 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
709 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
711 unsigned int total_burst;
712 unsigned int nb_burst;
713 unsigned int burst_stats[3];
714 uint16_t pktnb_stats[3];
716 int burst_percent[3];
719 * First compute the total number of packet bursts and the
720 * two highest numbers of bursts of the same number of packets.
723 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
724 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
725 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
726 nb_burst = pbs->pkt_burst_spread[nb_pkt];
729 total_burst += nb_burst;
730 if (nb_burst > burst_stats[0]) {
731 burst_stats[1] = burst_stats[0];
732 pktnb_stats[1] = pktnb_stats[0];
733 burst_stats[0] = nb_burst;
734 pktnb_stats[0] = nb_pkt;
737 if (total_burst == 0)
739 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
740 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
741 burst_percent[0], (int) pktnb_stats[0]);
742 if (burst_stats[0] == total_burst) {
746 if (burst_stats[0] + burst_stats[1] == total_burst) {
747 printf(" + %d%% of %d pkts]\n",
748 100 - burst_percent[0], pktnb_stats[1]);
751 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
752 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
753 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
754 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
757 printf(" + %d%% of %d pkts + %d%% of others]\n",
758 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
760 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
763 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
765 struct rte_port *port;
768 static const char *fwd_stats_border = "----------------------";
770 port = &ports[port_id];
771 printf("\n %s Forward statistics for port %-2d %s\n",
772 fwd_stats_border, port_id, fwd_stats_border);
774 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
775 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
777 stats->ipackets, stats->imissed,
778 (uint64_t) (stats->ipackets + stats->imissed));
780 if (cur_fwd_eng == &csum_fwd_engine)
781 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
782 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
783 if ((stats->ierrors + stats->rx_nombuf) > 0) {
784 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
785 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
788 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
790 stats->opackets, port->tx_dropped,
791 (uint64_t) (stats->opackets + port->tx_dropped));
794 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
796 stats->ipackets, stats->imissed,
797 (uint64_t) (stats->ipackets + stats->imissed));
799 if (cur_fwd_eng == &csum_fwd_engine)
800 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
801 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
802 if ((stats->ierrors + stats->rx_nombuf) > 0) {
803 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
804 printf(" RX-nombufs: %14"PRIu64"\n",
808 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
810 stats->opackets, port->tx_dropped,
811 (uint64_t) (stats->opackets + port->tx_dropped));
814 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
816 pkt_burst_stats_display("RX",
817 &port->rx_stream->rx_burst_stats);
819 pkt_burst_stats_display("TX",
820 &port->tx_stream->tx_burst_stats);
823 if (port->rx_queue_stats_mapping_enabled) {
825 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 printf(" Stats reg %2d RX-packets:%14"PRIu64
827 " RX-errors:%14"PRIu64
828 " RX-bytes:%14"PRIu64"\n",
829 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
833 if (port->tx_queue_stats_mapping_enabled) {
834 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
835 printf(" Stats reg %2d TX-packets:%14"PRIu64
836 " TX-bytes:%14"PRIu64"\n",
837 i, stats->q_opackets[i], stats->q_obytes[i]);
841 printf(" %s--------------------------------%s\n",
842 fwd_stats_border, fwd_stats_border);
846 fwd_stream_stats_display(streamid_t stream_id)
848 struct fwd_stream *fs;
849 static const char *fwd_top_stats_border = "-------";
851 fs = fwd_streams[stream_id];
852 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
853 (fs->fwd_dropped == 0))
855 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
856 "TX Port=%2d/Queue=%2d %s\n",
857 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
858 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
859 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
860 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
862 /* if checksum mode */
863 if (cur_fwd_eng == &csum_fwd_engine) {
864 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
865 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
868 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
869 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
870 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
875 flush_fwd_rx_queues(void)
877 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
884 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
885 uint64_t timer_period;
887 /* convert to number of cycles */
888 timer_period = rte_get_timer_hz(); /* 1 second timeout */
890 for (j = 0; j < 2; j++) {
891 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
892 for (rxq = 0; rxq < nb_rxq; rxq++) {
893 port_id = fwd_ports_ids[rxp];
895 * testpmd can stuck in the below do while loop
896 * if rte_eth_rx_burst() always returns nonzero
897 * packets. So timer is added to exit this loop
898 * after 1sec timer expiry.
900 prev_tsc = rte_rdtsc();
902 nb_rx = rte_eth_rx_burst(port_id, rxq,
903 pkts_burst, MAX_PKT_BURST);
904 for (i = 0; i < nb_rx; i++)
905 rte_pktmbuf_free(pkts_burst[i]);
907 cur_tsc = rte_rdtsc();
908 diff_tsc = cur_tsc - prev_tsc;
909 timer_tsc += diff_tsc;
910 } while ((nb_rx > 0) &&
911 (timer_tsc < timer_period));
915 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
920 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
922 struct fwd_stream **fsm;
926 fsm = &fwd_streams[fc->stream_idx];
927 nb_fs = fc->stream_nb;
929 for (sm_id = 0; sm_id < nb_fs; sm_id++)
930 (*pkt_fwd)(fsm[sm_id]);
931 } while (! fc->stopped);
935 start_pkt_forward_on_core(void *fwd_arg)
937 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
938 cur_fwd_config.fwd_eng->packet_fwd);
943 * Run the TXONLY packet forwarding engine to send a single burst of packets.
944 * Used to start communication flows in network loopback test configurations.
947 run_one_txonly_burst_on_core(void *fwd_arg)
949 struct fwd_lcore *fwd_lc;
950 struct fwd_lcore tmp_lcore;
952 fwd_lc = (struct fwd_lcore *) fwd_arg;
954 tmp_lcore.stopped = 1;
955 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
960 * Launch packet forwarding:
961 * - Setup per-port forwarding context.
962 * - launch logical cores with their forwarding configuration.
965 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
967 port_fwd_begin_t port_fwd_begin;
972 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
973 if (port_fwd_begin != NULL) {
974 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
975 (*port_fwd_begin)(fwd_ports_ids[i]);
977 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
978 lc_id = fwd_lcores_cpuids[i];
979 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
980 fwd_lcores[i]->stopped = 0;
981 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
982 fwd_lcores[i], lc_id);
984 printf("launch lcore %u failed - diag=%d\n",
991 * Launch packet forwarding configuration.
994 start_packet_forwarding(int with_tx_first)
996 port_fwd_begin_t port_fwd_begin;
997 port_fwd_end_t port_fwd_end;
998 struct rte_port *port;
1003 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1004 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1006 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1007 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1009 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1010 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1011 (!nb_rxq || !nb_txq))
1012 rte_exit(EXIT_FAILURE,
1013 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1014 cur_fwd_eng->fwd_mode_name);
1016 if (all_ports_started() == 0) {
1017 printf("Not all ports were started\n");
1020 if (test_done == 0) {
1021 printf("Packet forwarding already started\n");
1025 if (init_fwd_streams() < 0) {
1026 printf("Fail from init_fwd_streams()\n");
1031 for (i = 0; i < nb_fwd_ports; i++) {
1032 pt_id = fwd_ports_ids[i];
1033 port = &ports[pt_id];
1034 if (!port->dcb_flag) {
1035 printf("In DCB mode, all forwarding ports must "
1036 "be configured in this mode.\n");
1040 if (nb_fwd_lcores == 1) {
1041 printf("In DCB mode,the nb forwarding cores "
1042 "should be larger than 1.\n");
1049 flush_fwd_rx_queues();
1052 pkt_fwd_config_display(&cur_fwd_config);
1053 rxtx_config_display();
1055 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1056 pt_id = fwd_ports_ids[i];
1057 port = &ports[pt_id];
1058 rte_eth_stats_get(pt_id, &port->stats);
1059 port->tx_dropped = 0;
1061 map_port_queue_stats_mapping_registers(pt_id, port);
1063 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1064 fwd_streams[sm_id]->rx_packets = 0;
1065 fwd_streams[sm_id]->tx_packets = 0;
1066 fwd_streams[sm_id]->fwd_dropped = 0;
1067 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1068 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1070 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1071 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1072 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1073 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1074 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1076 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1077 fwd_streams[sm_id]->core_cycles = 0;
1080 if (with_tx_first) {
1081 port_fwd_begin = tx_only_engine.port_fwd_begin;
1082 if (port_fwd_begin != NULL) {
1083 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1084 (*port_fwd_begin)(fwd_ports_ids[i]);
1086 while (with_tx_first--) {
1087 launch_packet_forwarding(
1088 run_one_txonly_burst_on_core);
1089 rte_eal_mp_wait_lcore();
1091 port_fwd_end = tx_only_engine.port_fwd_end;
1092 if (port_fwd_end != NULL) {
1093 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1094 (*port_fwd_end)(fwd_ports_ids[i]);
1097 launch_packet_forwarding(start_pkt_forward_on_core);
1101 stop_packet_forwarding(void)
1103 struct rte_eth_stats stats;
1104 struct rte_port *port;
1105 port_fwd_end_t port_fwd_end;
1110 uint64_t total_recv;
1111 uint64_t total_xmit;
1112 uint64_t total_rx_dropped;
1113 uint64_t total_tx_dropped;
1114 uint64_t total_rx_nombuf;
1115 uint64_t tx_dropped;
1116 uint64_t rx_bad_ip_csum;
1117 uint64_t rx_bad_l4_csum;
1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1119 uint64_t fwd_cycles;
1121 static const char *acc_stats_border = "+++++++++++++++";
1124 printf("Packet forwarding not started\n");
1127 printf("Telling cores to stop...");
1128 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1129 fwd_lcores[lc_id]->stopped = 1;
1130 printf("\nWaiting for lcores to finish...\n");
1131 rte_eal_mp_wait_lcore();
1132 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1133 if (port_fwd_end != NULL) {
1134 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1135 pt_id = fwd_ports_ids[i];
1136 (*port_fwd_end)(pt_id);
1139 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1142 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1143 if (cur_fwd_config.nb_fwd_streams >
1144 cur_fwd_config.nb_fwd_ports) {
1145 fwd_stream_stats_display(sm_id);
1146 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1147 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1149 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1151 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1154 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1155 tx_dropped = (uint64_t) (tx_dropped +
1156 fwd_streams[sm_id]->fwd_dropped);
1157 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1160 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1161 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1162 fwd_streams[sm_id]->rx_bad_ip_csum);
1163 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1167 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1168 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1169 fwd_streams[sm_id]->rx_bad_l4_csum);
1170 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1173 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1174 fwd_cycles = (uint64_t) (fwd_cycles +
1175 fwd_streams[sm_id]->core_cycles);
1180 total_rx_dropped = 0;
1181 total_tx_dropped = 0;
1182 total_rx_nombuf = 0;
1183 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1184 pt_id = fwd_ports_ids[i];
1186 port = &ports[pt_id];
1187 rte_eth_stats_get(pt_id, &stats);
1188 stats.ipackets -= port->stats.ipackets;
1189 port->stats.ipackets = 0;
1190 stats.opackets -= port->stats.opackets;
1191 port->stats.opackets = 0;
1192 stats.ibytes -= port->stats.ibytes;
1193 port->stats.ibytes = 0;
1194 stats.obytes -= port->stats.obytes;
1195 port->stats.obytes = 0;
1196 stats.imissed -= port->stats.imissed;
1197 port->stats.imissed = 0;
1198 stats.oerrors -= port->stats.oerrors;
1199 port->stats.oerrors = 0;
1200 stats.rx_nombuf -= port->stats.rx_nombuf;
1201 port->stats.rx_nombuf = 0;
1203 total_recv += stats.ipackets;
1204 total_xmit += stats.opackets;
1205 total_rx_dropped += stats.imissed;
1206 total_tx_dropped += port->tx_dropped;
1207 total_rx_nombuf += stats.rx_nombuf;
1209 fwd_port_stats_display(pt_id, &stats);
1211 printf("\n %s Accumulated forward statistics for all ports"
1213 acc_stats_border, acc_stats_border);
1214 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1216 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1218 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1219 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1220 if (total_rx_nombuf > 0)
1221 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1222 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1224 acc_stats_border, acc_stats_border);
1225 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1227 printf("\n CPU cycles/packet=%u (total cycles="
1228 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1229 (unsigned int)(fwd_cycles / total_recv),
1230 fwd_cycles, total_recv);
1232 printf("\nDone.\n");
1237 dev_set_link_up(portid_t pid)
1239 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1240 printf("\nSet link up fail.\n");
1244 dev_set_link_down(portid_t pid)
1246 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1247 printf("\nSet link down fail.\n");
1251 all_ports_started(void)
1254 struct rte_port *port;
1256 FOREACH_PORT(pi, ports) {
1258 /* Check if there is a port which is not started */
1259 if ((port->port_status != RTE_PORT_STARTED) &&
1260 (port->slave_flag == 0))
1264 /* No port is not started */
1269 all_ports_stopped(void)
1272 struct rte_port *port;
1274 FOREACH_PORT(pi, ports) {
1276 if ((port->port_status != RTE_PORT_STOPPED) &&
1277 (port->slave_flag == 0))
1285 port_is_started(portid_t port_id)
1287 if (port_id_is_invalid(port_id, ENABLED_WARN))
1290 if (ports[port_id].port_status != RTE_PORT_STARTED)
1297 port_is_closed(portid_t port_id)
1299 if (port_id_is_invalid(port_id, ENABLED_WARN))
1302 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1309 start_port(portid_t pid)
1311 int diag, need_check_link_status = -1;
1314 struct rte_port *port;
1315 struct ether_addr mac_addr;
1317 if (port_id_is_invalid(pid, ENABLED_WARN))
1322 FOREACH_PORT(pi, ports) {
1323 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1326 need_check_link_status = 0;
1328 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1329 RTE_PORT_HANDLING) == 0) {
1330 printf("Port %d is now not stopped\n", pi);
1334 if (port->need_reconfig > 0) {
1335 port->need_reconfig = 0;
1337 printf("Configuring Port %d (socket %u)\n", pi,
1339 /* configure port */
1340 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1343 if (rte_atomic16_cmpset(&(port->port_status),
1344 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1345 printf("Port %d can not be set back "
1346 "to stopped\n", pi);
1347 printf("Fail to configure port %d\n", pi);
1348 /* try to reconfigure port next time */
1349 port->need_reconfig = 1;
1353 if (port->need_reconfig_queues > 0) {
1354 port->need_reconfig_queues = 0;
1355 /* setup tx queues */
1356 for (qi = 0; qi < nb_txq; qi++) {
1357 if ((numa_support) &&
1358 (txring_numa[pi] != NUMA_NO_CONFIG))
1359 diag = rte_eth_tx_queue_setup(pi, qi,
1360 nb_txd,txring_numa[pi],
1363 diag = rte_eth_tx_queue_setup(pi, qi,
1364 nb_txd,port->socket_id,
1370 /* Fail to setup tx queue, return */
1371 if (rte_atomic16_cmpset(&(port->port_status),
1373 RTE_PORT_STOPPED) == 0)
1374 printf("Port %d can not be set back "
1375 "to stopped\n", pi);
1376 printf("Fail to configure port %d tx queues\n", pi);
1377 /* try to reconfigure queues next time */
1378 port->need_reconfig_queues = 1;
1381 /* setup rx queues */
1382 for (qi = 0; qi < nb_rxq; qi++) {
1383 if ((numa_support) &&
1384 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1385 struct rte_mempool * mp =
1386 mbuf_pool_find(rxring_numa[pi]);
1388 printf("Failed to setup RX queue:"
1389 "No mempool allocation"
1390 " on the socket %d\n",
1395 diag = rte_eth_rx_queue_setup(pi, qi,
1396 nb_rxd,rxring_numa[pi],
1397 &(port->rx_conf),mp);
1399 struct rte_mempool *mp =
1400 mbuf_pool_find(port->socket_id);
1402 printf("Failed to setup RX queue:"
1403 "No mempool allocation"
1404 " on the socket %d\n",
1408 diag = rte_eth_rx_queue_setup(pi, qi,
1409 nb_rxd,port->socket_id,
1410 &(port->rx_conf), mp);
1415 /* Fail to setup rx queue, return */
1416 if (rte_atomic16_cmpset(&(port->port_status),
1418 RTE_PORT_STOPPED) == 0)
1419 printf("Port %d can not be set back "
1420 "to stopped\n", pi);
1421 printf("Fail to configure port %d rx queues\n", pi);
1422 /* try to reconfigure queues next time */
1423 port->need_reconfig_queues = 1;
1428 if (rte_eth_dev_start(pi) < 0) {
1429 printf("Fail to start port %d\n", pi);
1431 /* Fail to setup rx queue, return */
1432 if (rte_atomic16_cmpset(&(port->port_status),
1433 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1434 printf("Port %d can not be set back to "
1439 if (rte_atomic16_cmpset(&(port->port_status),
1440 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1441 printf("Port %d can not be set into started\n", pi);
1443 rte_eth_macaddr_get(pi, &mac_addr);
1444 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1445 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1446 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1447 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1449 /* at least one port started, need checking link status */
1450 need_check_link_status = 1;
1453 if (need_check_link_status == 1 && !no_link_check)
1454 check_all_ports_link_status(RTE_PORT_ALL);
1455 else if (need_check_link_status == 0)
1456 printf("Please stop the ports first\n");
1463 stop_port(portid_t pid)
1466 struct rte_port *port;
1467 int need_check_link_status = 0;
1474 if (port_id_is_invalid(pid, ENABLED_WARN))
1477 printf("Stopping ports...\n");
1479 FOREACH_PORT(pi, ports) {
1480 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1483 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1484 printf("Please remove port %d from forwarding configuration.\n", pi);
1488 if (port_is_bonding_slave(pi)) {
1489 printf("Please remove port %d from bonded device.\n", pi);
1494 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1495 RTE_PORT_HANDLING) == 0)
1498 rte_eth_dev_stop(pi);
1500 if (rte_atomic16_cmpset(&(port->port_status),
1501 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1502 printf("Port %d can not be set into stopped\n", pi);
1503 need_check_link_status = 1;
1505 if (need_check_link_status && !no_link_check)
1506 check_all_ports_link_status(RTE_PORT_ALL);
1512 close_port(portid_t pid)
1515 struct rte_port *port;
1517 if (port_id_is_invalid(pid, ENABLED_WARN))
1520 printf("Closing ports...\n");
1522 FOREACH_PORT(pi, ports) {
1523 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1526 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1527 printf("Please remove port %d from forwarding configuration.\n", pi);
1531 if (port_is_bonding_slave(pi)) {
1532 printf("Please remove port %d from bonded device.\n", pi);
1537 if (rte_atomic16_cmpset(&(port->port_status),
1538 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1539 printf("Port %d is already closed\n", pi);
1543 if (rte_atomic16_cmpset(&(port->port_status),
1544 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1545 printf("Port %d is now not stopped\n", pi);
1549 if (port->flow_list)
1550 port_flow_flush(pi);
1551 rte_eth_dev_close(pi);
1553 if (rte_atomic16_cmpset(&(port->port_status),
1554 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1555 printf("Port %d cannot be set to closed\n", pi);
1562 attach_port(char *identifier)
1565 unsigned int socket_id;
1567 printf("Attaching a new port...\n");
1569 if (identifier == NULL) {
1570 printf("Invalid parameters are specified\n");
1574 if (rte_eth_dev_attach(identifier, &pi))
1577 ports[pi].enabled = 1;
1578 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1579 /* if socket_id is invalid, set to 0 */
1580 if (check_socket_id(socket_id) < 0)
1582 reconfig(pi, socket_id);
1583 rte_eth_promiscuous_enable(pi);
1585 nb_ports = rte_eth_dev_count();
1587 ports[pi].port_status = RTE_PORT_STOPPED;
1589 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1594 detach_port(uint8_t port_id)
1596 char name[RTE_ETH_NAME_MAX_LEN];
1598 printf("Detaching a port...\n");
1600 if (!port_is_closed(port_id)) {
1601 printf("Please close port first\n");
1605 if (ports[port_id].flow_list)
1606 port_flow_flush(port_id);
1608 if (rte_eth_dev_detach(port_id, name))
1611 ports[port_id].enabled = 0;
1612 nb_ports = rte_eth_dev_count();
1614 printf("Port '%s' is detached. Now total ports is %d\n",
1626 stop_packet_forwarding();
1628 if (ports != NULL) {
1630 FOREACH_PORT(pt_id, ports) {
1631 printf("\nShutting down port %d...\n", pt_id);
1637 printf("\nBye...\n");
1640 typedef void (*cmd_func_t)(void);
1641 struct pmd_test_command {
1642 const char *cmd_name;
1643 cmd_func_t cmd_func;
1646 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1648 /* Check the link status of all ports in up to 9s, and print them finally */
1650 check_all_ports_link_status(uint32_t port_mask)
1652 #define CHECK_INTERVAL 100 /* 100ms */
1653 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1654 uint8_t portid, count, all_ports_up, print_flag = 0;
1655 struct rte_eth_link link;
1657 printf("Checking link statuses...\n");
1659 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1661 FOREACH_PORT(portid, ports) {
1662 if ((port_mask & (1 << portid)) == 0)
1664 memset(&link, 0, sizeof(link));
1665 rte_eth_link_get_nowait(portid, &link);
1666 /* print link status if flag set */
1667 if (print_flag == 1) {
1668 if (link.link_status)
1669 printf("Port %d Link Up - speed %u "
1670 "Mbps - %s\n", (uint8_t)portid,
1671 (unsigned)link.link_speed,
1672 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1673 ("full-duplex") : ("half-duplex\n"));
1675 printf("Port %d Link Down\n",
1679 /* clear all_ports_up flag if any link down */
1680 if (link.link_status == ETH_LINK_DOWN) {
1685 /* after finally printing all link status, get out */
1686 if (print_flag == 1)
1689 if (all_ports_up == 0) {
1691 rte_delay_ms(CHECK_INTERVAL);
1694 /* set the print_flag if all ports up or timeout */
1695 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1702 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1706 uint8_t mapping_found = 0;
1708 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1709 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1710 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1711 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1712 tx_queue_stats_mappings[i].queue_id,
1713 tx_queue_stats_mappings[i].stats_counter_id);
1720 port->tx_queue_stats_mapping_enabled = 1;
1725 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1729 uint8_t mapping_found = 0;
1731 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1732 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1733 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1734 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1735 rx_queue_stats_mappings[i].queue_id,
1736 rx_queue_stats_mappings[i].stats_counter_id);
1743 port->rx_queue_stats_mapping_enabled = 1;
1748 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1752 diag = set_tx_queue_stats_mapping_registers(pi, port);
1754 if (diag == -ENOTSUP) {
1755 port->tx_queue_stats_mapping_enabled = 0;
1756 printf("TX queue stats mapping not supported port id=%d\n", pi);
1759 rte_exit(EXIT_FAILURE,
1760 "set_tx_queue_stats_mapping_registers "
1761 "failed for port id=%d diag=%d\n",
1765 diag = set_rx_queue_stats_mapping_registers(pi, port);
1767 if (diag == -ENOTSUP) {
1768 port->rx_queue_stats_mapping_enabled = 0;
1769 printf("RX queue stats mapping not supported port id=%d\n", pi);
1772 rte_exit(EXIT_FAILURE,
1773 "set_rx_queue_stats_mapping_registers "
1774 "failed for port id=%d diag=%d\n",
1780 rxtx_port_config(struct rte_port *port)
1782 port->rx_conf = port->dev_info.default_rxconf;
1783 port->tx_conf = port->dev_info.default_txconf;
1785 /* Check if any RX/TX parameters have been passed */
1786 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1787 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1789 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1790 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1792 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1793 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1795 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1796 port->rx_conf.rx_free_thresh = rx_free_thresh;
1798 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1799 port->rx_conf.rx_drop_en = rx_drop_en;
1801 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1802 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1804 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1805 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1807 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1808 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1810 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1811 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1813 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1814 port->tx_conf.tx_free_thresh = tx_free_thresh;
1816 if (txq_flags != RTE_PMD_PARAM_UNSET)
1817 port->tx_conf.txq_flags = txq_flags;
1821 init_port_config(void)
1824 struct rte_port *port;
1826 FOREACH_PORT(pid, ports) {
1828 port->dev_conf.rxmode = rx_mode;
1829 port->dev_conf.fdir_conf = fdir_conf;
1831 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1832 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1834 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1835 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1838 if (port->dcb_flag == 0) {
1839 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1840 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1842 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1845 rxtx_port_config(port);
1847 rte_eth_macaddr_get(pid, &port->eth_addr);
1849 map_port_queue_stats_mapping_registers(pid, port);
1850 #ifdef RTE_NIC_BYPASS
1851 rte_eth_dev_bypass_init(pid);
1856 void set_port_slave_flag(portid_t slave_pid)
1858 struct rte_port *port;
1860 port = &ports[slave_pid];
1861 port->slave_flag = 1;
1864 void clear_port_slave_flag(portid_t slave_pid)
1866 struct rte_port *port;
1868 port = &ports[slave_pid];
1869 port->slave_flag = 0;
1872 uint8_t port_is_bonding_slave(portid_t slave_pid)
1874 struct rte_port *port;
1876 port = &ports[slave_pid];
1877 return port->slave_flag;
1880 const uint16_t vlan_tags[] = {
1881 0, 1, 2, 3, 4, 5, 6, 7,
1882 8, 9, 10, 11, 12, 13, 14, 15,
1883 16, 17, 18, 19, 20, 21, 22, 23,
1884 24, 25, 26, 27, 28, 29, 30, 31
1888 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1889 enum dcb_mode_enable dcb_mode,
1890 enum rte_eth_nb_tcs num_tcs,
1896 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1897 * given above, and the number of traffic classes available for use.
1899 if (dcb_mode == DCB_VT_ENABLED) {
1900 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1901 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1902 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1903 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1905 /* VMDQ+DCB RX and TX configurations */
1906 vmdq_rx_conf->enable_default_pool = 0;
1907 vmdq_rx_conf->default_pool = 0;
1908 vmdq_rx_conf->nb_queue_pools =
1909 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1910 vmdq_tx_conf->nb_queue_pools =
1911 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1913 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1914 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1915 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1916 vmdq_rx_conf->pool_map[i].pools =
1917 1 << (i % vmdq_rx_conf->nb_queue_pools);
1919 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1920 vmdq_rx_conf->dcb_tc[i] = i;
1921 vmdq_tx_conf->dcb_tc[i] = i;
1924 /* set DCB mode of RX and TX of multiple queues */
1925 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1926 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1928 struct rte_eth_dcb_rx_conf *rx_conf =
1929 ð_conf->rx_adv_conf.dcb_rx_conf;
1930 struct rte_eth_dcb_tx_conf *tx_conf =
1931 ð_conf->tx_adv_conf.dcb_tx_conf;
1933 rx_conf->nb_tcs = num_tcs;
1934 tx_conf->nb_tcs = num_tcs;
1936 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1937 rx_conf->dcb_tc[i] = i % num_tcs;
1938 tx_conf->dcb_tc[i] = i % num_tcs;
1940 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1941 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1942 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1946 eth_conf->dcb_capability_en =
1947 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1949 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1955 init_port_dcb_config(portid_t pid,
1956 enum dcb_mode_enable dcb_mode,
1957 enum rte_eth_nb_tcs num_tcs,
1960 struct rte_eth_conf port_conf;
1961 struct rte_port *rte_port;
1965 rte_port = &ports[pid];
1967 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1968 /* Enter DCB configuration status */
1971 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1972 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1975 port_conf.rxmode.hw_vlan_filter = 1;
1978 * Write the configuration into the device.
1979 * Set the numbers of RX & TX queues to 0, so
1980 * the RX & TX queues will not be setup.
1982 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
1984 rte_eth_dev_info_get(pid, &rte_port->dev_info);
1986 /* If dev_info.vmdq_pool_base is greater than 0,
1987 * the queue id of vmdq pools is started after pf queues.
1989 if (dcb_mode == DCB_VT_ENABLED &&
1990 rte_port->dev_info.vmdq_pool_base > 0) {
1991 printf("VMDQ_DCB multi-queue mode is nonsensical"
1992 " for port %d.", pid);
1996 /* Assume the ports in testpmd have the same dcb capability
1997 * and has the same number of rxq and txq in dcb mode
1999 if (dcb_mode == DCB_VT_ENABLED) {
2000 if (rte_port->dev_info.max_vfs > 0) {
2001 nb_rxq = rte_port->dev_info.nb_rx_queues;
2002 nb_txq = rte_port->dev_info.nb_tx_queues;
2004 nb_rxq = rte_port->dev_info.max_rx_queues;
2005 nb_txq = rte_port->dev_info.max_tx_queues;
2008 /*if vt is disabled, use all pf queues */
2009 if (rte_port->dev_info.vmdq_pool_base == 0) {
2010 nb_rxq = rte_port->dev_info.max_rx_queues;
2011 nb_txq = rte_port->dev_info.max_tx_queues;
2013 nb_rxq = (queueid_t)num_tcs;
2014 nb_txq = (queueid_t)num_tcs;
2018 rx_free_thresh = 64;
2020 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2022 rxtx_port_config(rte_port);
2024 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2025 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2026 rx_vft_set(pid, vlan_tags[i], 1);
2028 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2029 map_port_queue_stats_mapping_registers(pid, rte_port);
2031 rte_port->dcb_flag = 1;
2041 /* Configuration of Ethernet ports. */
2042 ports = rte_zmalloc("testpmd: ports",
2043 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2044 RTE_CACHE_LINE_SIZE);
2045 if (ports == NULL) {
2046 rte_exit(EXIT_FAILURE,
2047 "rte_zmalloc(%d struct rte_port) failed\n",
2051 /* enabled allocated ports */
2052 for (pid = 0; pid < nb_ports; pid++)
2053 ports[pid].enabled = 1;
2064 signal_handler(int signum)
2066 if (signum == SIGINT || signum == SIGTERM) {
2067 printf("\nSignal %d received, preparing to exit...\n",
2069 #ifdef RTE_LIBRTE_PDUMP
2070 /* uninitialize packet capture framework */
2074 /* exit with the expected status */
2075 signal(signum, SIG_DFL);
2076 kill(getpid(), signum);
2081 main(int argc, char** argv)
2086 signal(SIGINT, signal_handler);
2087 signal(SIGTERM, signal_handler);
2089 diag = rte_eal_init(argc, argv);
2091 rte_panic("Cannot init EAL\n");
2093 #ifdef RTE_LIBRTE_PDUMP
2094 /* initialize packet capture framework */
2095 rte_pdump_init(NULL);
2098 nb_ports = (portid_t) rte_eth_dev_count();
2100 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2102 /* allocate port structures, and init them */
2105 set_def_fwd_config();
2107 rte_panic("Empty set of forwarding logical cores - check the "
2108 "core mask supplied in the command parameters\n");
2113 launch_args_parse(argc, argv);
2115 if (!nb_rxq && !nb_txq)
2116 printf("Warning: Either rx or tx queues should be non-zero\n");
2118 if (nb_rxq > 1 && nb_rxq > nb_txq)
2119 printf("Warning: nb_rxq=%d enables RSS configuration, "
2120 "but nb_txq=%d will prevent to fully test it.\n",
2124 if (start_port(RTE_PORT_ALL) != 0)
2125 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2127 /* set all ports to promiscuous mode by default */
2128 FOREACH_PORT(port_id, ports)
2129 rte_eth_promiscuous_enable(port_id);
2131 #ifdef RTE_LIBRTE_CMDLINE
2132 if (interactive == 1) {
2134 printf("Start automatic packet forwarding\n");
2135 start_packet_forwarding(0);
2144 printf("No commandline core given, start packet forwarding\n");
2145 start_packet_forwarding(0);
2146 printf("Press enter to exit\n");
2147 rc = read(0, &c, 1);