4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
151 #ifdef RTE_LIBRTE_IEEE1588
152 &ieee1588_fwd_engine,
157 struct fwd_config cur_fwd_config;
158 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
160 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
161 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
162 * specified on command-line. */
165 * Configuration of packet segments used by the "txonly" processing engine.
167 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
168 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
169 TXONLY_DEF_PACKET_LEN,
171 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
174 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
176 /* current configuration is in DCB or not,0 means it is not in DCB mode */
177 uint8_t dcb_config = 0;
179 /* Whether the dcb is in testing status */
180 uint8_t dcb_test = 0;
182 /* DCB on and VT on mapping is default */
183 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
186 * Configurable number of RX/TX queues.
188 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
189 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
192 * Configurable number of RX/TX ring descriptors.
194 #define RTE_TEST_RX_DESC_DEFAULT 128
195 #define RTE_TEST_TX_DESC_DEFAULT 512
196 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
197 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 * Configurable values of RX and TX ring threshold registers.
202 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
203 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
204 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
206 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
207 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
208 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
210 struct rte_eth_thresh rx_thresh = {
211 .pthresh = RX_PTHRESH,
212 .hthresh = RX_HTHRESH,
213 .wthresh = RX_WTHRESH,
216 struct rte_eth_thresh tx_thresh = {
217 .pthresh = TX_PTHRESH,
218 .hthresh = TX_HTHRESH,
219 .wthresh = TX_WTHRESH,
223 * Configurable value of RX free threshold.
225 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
228 * Configurable value of RX drop enable.
230 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
233 * Configurable value of TX free threshold.
235 uint16_t tx_free_thresh = 0; /* Use default values. */
238 * Configurable value of TX RS bit threshold.
240 uint16_t tx_rs_thresh = 0; /* Use default values. */
243 * Configurable value of TX queue flags.
245 uint32_t txq_flags = 0; /* No flags set. */
248 * Receive Side Scaling (RSS) configuration.
250 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
253 * Port topology configuration
255 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
258 * Avoids to flush all the RX streams before starts forwarding.
260 uint8_t no_flush_rx = 0; /* flush by default */
263 * NIC bypass mode configuration options.
265 #ifdef RTE_NIC_BYPASS
267 /* The NIC bypass watchdog timeout. */
268 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
273 * Ethernet device configuration.
275 struct rte_eth_rxmode rx_mode = {
276 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
278 .header_split = 0, /**< Header Split disabled. */
279 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
280 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
281 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
282 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
283 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
284 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
287 struct rte_fdir_conf fdir_conf = {
288 .mode = RTE_FDIR_MODE_NONE,
289 .pballoc = RTE_FDIR_PBALLOC_64K,
290 .status = RTE_FDIR_REPORT_STATUS,
291 .flexbytes_offset = 0x6,
295 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
297 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
298 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
300 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
301 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
303 uint16_t nb_tx_queue_stats_mappings = 0;
304 uint16_t nb_rx_queue_stats_mappings = 0;
306 /* Forward function declarations */
307 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
308 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
311 * Check if all the ports are started.
312 * If yes, return positive value. If not, return zero.
314 static int all_ports_started(void);
317 * Setup default configuration.
320 set_default_fwd_lcores_config(void)
326 for (i = 0; i < RTE_MAX_LCORE; i++) {
327 if (! rte_lcore_is_enabled(i))
329 if (i == rte_get_master_lcore())
331 fwd_lcores_cpuids[nb_lc++] = i;
333 nb_lcores = (lcoreid_t) nb_lc;
334 nb_cfg_lcores = nb_lcores;
339 set_def_peer_eth_addrs(void)
343 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
344 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
345 peer_eth_addrs[i].addr_bytes[5] = i;
350 set_default_fwd_ports_config(void)
354 for (pt_id = 0; pt_id < nb_ports; pt_id++)
355 fwd_ports_ids[pt_id] = pt_id;
357 nb_cfg_ports = nb_ports;
358 nb_fwd_ports = nb_ports;
362 set_def_fwd_config(void)
364 set_default_fwd_lcores_config();
365 set_def_peer_eth_addrs();
366 set_default_fwd_ports_config();
370 * Configuration initialisation done once at init time.
372 struct mbuf_ctor_arg {
373 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
374 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
377 struct mbuf_pool_ctor_arg {
378 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
382 testpmd_mbuf_ctor(struct rte_mempool *mp,
385 __attribute__((unused)) unsigned i)
387 struct mbuf_ctor_arg *mb_ctor_arg;
390 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
391 mb = (struct rte_mbuf *) raw_mbuf;
393 mb->type = RTE_MBUF_PKT;
395 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
396 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
397 mb_ctor_arg->seg_buf_offset);
398 mb->buf_len = mb_ctor_arg->seg_buf_size;
399 mb->type = RTE_MBUF_PKT;
401 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
403 mb->pkt.vlan_macip.data = 0;
404 mb->pkt.hash.rss = 0;
408 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
411 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
412 struct rte_pktmbuf_pool_private *mbp_priv;
414 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
415 printf("%s(%s) private_data_size %d < %d\n",
416 __func__, mp->name, (int) mp->private_data_size,
417 (int) sizeof(struct rte_pktmbuf_pool_private));
420 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
421 mbp_priv = rte_mempool_get_priv(mp);
422 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
426 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
427 unsigned int socket_id)
429 char pool_name[RTE_MEMPOOL_NAMESIZE];
430 struct rte_mempool *rte_mp;
431 struct mbuf_pool_ctor_arg mbp_ctor_arg;
432 struct mbuf_ctor_arg mb_ctor_arg;
435 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
437 mb_ctor_arg.seg_buf_offset =
438 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
439 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
440 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
441 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
443 #ifdef RTE_LIBRTE_PMD_XENVIRT
444 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
445 (unsigned) mb_mempool_cache,
446 sizeof(struct rte_pktmbuf_pool_private),
447 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
448 testpmd_mbuf_ctor, &mb_ctor_arg,
455 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
456 (unsigned) mb_mempool_cache,
457 sizeof(struct rte_pktmbuf_pool_private),
458 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
459 testpmd_mbuf_ctor, &mb_ctor_arg,
462 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
463 (unsigned) mb_mempool_cache,
464 sizeof(struct rte_pktmbuf_pool_private),
465 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
466 testpmd_mbuf_ctor, &mb_ctor_arg,
471 if (rte_mp == NULL) {
472 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
473 "failed\n", socket_id);
474 } else if (verbose_level > 0) {
475 rte_mempool_dump(rte_mp);
480 * Check given socket id is valid or not with NUMA mode,
481 * if valid, return 0, else return -1
484 check_socket_id(const unsigned int socket_id)
486 static int warning_once = 0;
488 if (socket_id >= MAX_SOCKET) {
489 if (!warning_once && numa_support)
490 printf("Warning: NUMA should be configured manually by"
491 " using --port-numa-config and"
492 " --ring-numa-config parameters along with"
504 struct rte_port *port;
505 struct rte_mempool *mbp;
506 unsigned int nb_mbuf_per_pool;
508 uint8_t port_per_socket[MAX_SOCKET];
510 memset(port_per_socket,0,MAX_SOCKET);
511 /* Configuration of logical cores. */
512 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
513 sizeof(struct fwd_lcore *) * nb_lcores,
515 if (fwd_lcores == NULL) {
516 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
517 "failed\n", nb_lcores);
519 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
520 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
521 sizeof(struct fwd_lcore),
523 if (fwd_lcores[lc_id] == NULL) {
524 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
527 fwd_lcores[lc_id]->cpuid_idx = lc_id;
531 * Create pools of mbuf.
532 * If NUMA support is disabled, create a single pool of mbuf in
533 * socket 0 memory by default.
534 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
536 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
537 * nb_txd can be configured at run time.
539 if (param_total_num_mbufs)
540 nb_mbuf_per_pool = param_total_num_mbufs;
542 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
543 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
546 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
550 if (socket_num == UMA_NO_CONFIG)
551 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
553 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
557 /* Configuration of Ethernet ports. */
558 ports = rte_zmalloc("testpmd: ports",
559 sizeof(struct rte_port) * nb_ports,
562 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
563 "failed\n", nb_ports);
566 for (pid = 0; pid < nb_ports; pid++) {
568 rte_eth_dev_info_get(pid, &port->dev_info);
571 if (port_numa[pid] != NUMA_NO_CONFIG)
572 port_per_socket[port_numa[pid]]++;
574 uint32_t socket_id = rte_eth_dev_socket_id(pid);
576 /* if socket_id is invalid, set to 0 */
577 if (check_socket_id(socket_id) < 0)
579 port_per_socket[socket_id]++;
583 /* set flag to initialize port/queue */
584 port->need_reconfig = 1;
585 port->need_reconfig_queues = 1;
590 unsigned int nb_mbuf;
592 if (param_total_num_mbufs)
593 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
595 for (i = 0; i < MAX_SOCKET; i++) {
596 nb_mbuf = (nb_mbuf_per_pool *
599 mbuf_pool_create(mbuf_data_size,
606 * Records which Mbuf pool to use by each logical core, if needed.
608 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
609 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
611 mbp = mbuf_pool_find(0);
612 fwd_lcores[lc_id]->mbp = mbp;
615 /* Configuration of packet forwarding streams. */
616 if (init_fwd_streams() < 0)
617 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
621 init_fwd_streams(void)
624 struct rte_port *port;
625 streamid_t sm_id, nb_fwd_streams_new;
627 /* set socket id according to numa or not */
628 for (pid = 0; pid < nb_ports; pid++) {
630 if (nb_rxq > port->dev_info.max_rx_queues) {
631 printf("Fail: nb_rxq(%d) is greater than "
632 "max_rx_queues(%d)\n", nb_rxq,
633 port->dev_info.max_rx_queues);
636 if (nb_txq > port->dev_info.max_tx_queues) {
637 printf("Fail: nb_txq(%d) is greater than "
638 "max_tx_queues(%d)\n", nb_txq,
639 port->dev_info.max_tx_queues);
643 if (port_numa[pid] != NUMA_NO_CONFIG)
644 port->socket_id = port_numa[pid];
646 port->socket_id = rte_eth_dev_socket_id(pid);
648 /* if socket_id is invalid, set to 0 */
649 if (check_socket_id(port->socket_id) < 0)
654 if (socket_num == UMA_NO_CONFIG)
657 port->socket_id = socket_num;
661 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
662 if (nb_fwd_streams_new == nb_fwd_streams)
665 if (fwd_streams != NULL) {
666 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667 if (fwd_streams[sm_id] == NULL)
669 rte_free(fwd_streams[sm_id]);
670 fwd_streams[sm_id] = NULL;
672 rte_free(fwd_streams);
677 nb_fwd_streams = nb_fwd_streams_new;
678 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
679 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
680 if (fwd_streams == NULL)
681 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
682 "failed\n", nb_fwd_streams);
684 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
685 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
686 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
687 if (fwd_streams[sm_id] == NULL)
688 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
695 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
697 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
699 unsigned int total_burst;
700 unsigned int nb_burst;
701 unsigned int burst_stats[3];
702 uint16_t pktnb_stats[3];
704 int burst_percent[3];
707 * First compute the total number of packet bursts and the
708 * two highest numbers of bursts of the same number of packets.
711 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
712 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
713 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
714 nb_burst = pbs->pkt_burst_spread[nb_pkt];
717 total_burst += nb_burst;
718 if (nb_burst > burst_stats[0]) {
719 burst_stats[1] = burst_stats[0];
720 pktnb_stats[1] = pktnb_stats[0];
721 burst_stats[0] = nb_burst;
722 pktnb_stats[0] = nb_pkt;
725 if (total_burst == 0)
727 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
728 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
729 burst_percent[0], (int) pktnb_stats[0]);
730 if (burst_stats[0] == total_burst) {
734 if (burst_stats[0] + burst_stats[1] == total_burst) {
735 printf(" + %d%% of %d pkts]\n",
736 100 - burst_percent[0], pktnb_stats[1]);
739 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
740 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
741 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
742 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
745 printf(" + %d%% of %d pkts + %d%% of others]\n",
746 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
748 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
751 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
753 struct rte_port *port;
756 static const char *fwd_stats_border = "----------------------";
758 port = &ports[port_id];
759 printf("\n %s Forward statistics for port %-2d %s\n",
760 fwd_stats_border, port_id, fwd_stats_border);
762 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
763 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
765 stats->ipackets, stats->ierrors,
766 (uint64_t) (stats->ipackets + stats->ierrors));
768 if (cur_fwd_eng == &csum_fwd_engine)
769 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
770 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
772 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
774 stats->opackets, port->tx_dropped,
775 (uint64_t) (stats->opackets + port->tx_dropped));
777 if (stats->rx_nombuf > 0)
778 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
782 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
784 stats->ipackets, stats->ierrors,
785 (uint64_t) (stats->ipackets + stats->ierrors));
787 if (cur_fwd_eng == &csum_fwd_engine)
788 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
789 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
791 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
793 stats->opackets, port->tx_dropped,
794 (uint64_t) (stats->opackets + port->tx_dropped));
796 if (stats->rx_nombuf > 0)
797 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
800 /* Display statistics of XON/XOFF pause frames, if any. */
801 if ((stats->tx_pause_xon | stats->rx_pause_xon |
802 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
803 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
804 stats->rx_pause_xoff, stats->rx_pause_xon);
805 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
806 stats->tx_pause_xoff, stats->tx_pause_xon);
809 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
811 pkt_burst_stats_display("RX",
812 &port->rx_stream->rx_burst_stats);
814 pkt_burst_stats_display("TX",
815 &port->tx_stream->tx_burst_stats);
818 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
819 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
823 if (port->rx_queue_stats_mapping_enabled) {
825 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 printf(" Stats reg %2d RX-packets:%14"PRIu64
827 " RX-errors:%14"PRIu64
828 " RX-bytes:%14"PRIu64"\n",
829 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
833 if (port->tx_queue_stats_mapping_enabled) {
834 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
835 printf(" Stats reg %2d TX-packets:%14"PRIu64
836 " TX-bytes:%14"PRIu64"\n",
837 i, stats->q_opackets[i], stats->q_obytes[i]);
841 printf(" %s--------------------------------%s\n",
842 fwd_stats_border, fwd_stats_border);
846 fwd_stream_stats_display(streamid_t stream_id)
848 struct fwd_stream *fs;
849 static const char *fwd_top_stats_border = "-------";
851 fs = fwd_streams[stream_id];
852 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
853 (fs->fwd_dropped == 0))
855 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
856 "TX Port=%2d/Queue=%2d %s\n",
857 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
858 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
859 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
860 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
862 /* if checksum mode */
863 if (cur_fwd_eng == &csum_fwd_engine) {
864 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
865 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
868 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
869 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
870 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
875 flush_fwd_rx_queues(void)
877 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
885 for (j = 0; j < 2; j++) {
886 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
887 for (rxq = 0; rxq < nb_rxq; rxq++) {
888 port_id = fwd_ports_ids[rxp];
890 nb_rx = rte_eth_rx_burst(port_id, rxq,
891 pkts_burst, MAX_PKT_BURST);
892 for (i = 0; i < nb_rx; i++)
893 rte_pktmbuf_free(pkts_burst[i]);
897 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
902 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
904 struct fwd_stream **fsm;
908 fsm = &fwd_streams[fc->stream_idx];
909 nb_fs = fc->stream_nb;
911 for (sm_id = 0; sm_id < nb_fs; sm_id++)
912 (*pkt_fwd)(fsm[sm_id]);
913 } while (! fc->stopped);
917 start_pkt_forward_on_core(void *fwd_arg)
919 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
920 cur_fwd_config.fwd_eng->packet_fwd);
925 * Run the TXONLY packet forwarding engine to send a single burst of packets.
926 * Used to start communication flows in network loopback test configurations.
929 run_one_txonly_burst_on_core(void *fwd_arg)
931 struct fwd_lcore *fwd_lc;
932 struct fwd_lcore tmp_lcore;
934 fwd_lc = (struct fwd_lcore *) fwd_arg;
936 tmp_lcore.stopped = 1;
937 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
942 * Launch packet forwarding:
943 * - Setup per-port forwarding context.
944 * - launch logical cores with their forwarding configuration.
947 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
949 port_fwd_begin_t port_fwd_begin;
954 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
955 if (port_fwd_begin != NULL) {
956 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
957 (*port_fwd_begin)(fwd_ports_ids[i]);
959 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
960 lc_id = fwd_lcores_cpuids[i];
961 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
962 fwd_lcores[i]->stopped = 0;
963 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
964 fwd_lcores[i], lc_id);
966 printf("launch lcore %u failed - diag=%d\n",
973 * Launch packet forwarding configuration.
976 start_packet_forwarding(int with_tx_first)
978 port_fwd_begin_t port_fwd_begin;
979 port_fwd_end_t port_fwd_end;
980 struct rte_port *port;
985 if (all_ports_started() == 0) {
986 printf("Not all ports were started\n");
989 if (test_done == 0) {
990 printf("Packet forwarding already started\n");
994 for (i = 0; i < nb_fwd_ports; i++) {
995 pt_id = fwd_ports_ids[i];
996 port = &ports[pt_id];
997 if (!port->dcb_flag) {
998 printf("In DCB mode, all forwarding ports must "
999 "be configured in this mode.\n");
1003 if (nb_fwd_lcores == 1) {
1004 printf("In DCB mode,the nb forwarding cores "
1005 "should be larger than 1.\n");
1012 flush_fwd_rx_queues();
1015 rxtx_config_display();
1017 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1018 pt_id = fwd_ports_ids[i];
1019 port = &ports[pt_id];
1020 rte_eth_stats_get(pt_id, &port->stats);
1021 port->tx_dropped = 0;
1023 map_port_queue_stats_mapping_registers(pt_id, port);
1025 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1026 fwd_streams[sm_id]->rx_packets = 0;
1027 fwd_streams[sm_id]->tx_packets = 0;
1028 fwd_streams[sm_id]->fwd_dropped = 0;
1029 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1030 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1032 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1033 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1034 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1035 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1036 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1038 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1039 fwd_streams[sm_id]->core_cycles = 0;
1042 if (with_tx_first) {
1043 port_fwd_begin = tx_only_engine.port_fwd_begin;
1044 if (port_fwd_begin != NULL) {
1045 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1046 (*port_fwd_begin)(fwd_ports_ids[i]);
1048 launch_packet_forwarding(run_one_txonly_burst_on_core);
1049 rte_eal_mp_wait_lcore();
1050 port_fwd_end = tx_only_engine.port_fwd_end;
1051 if (port_fwd_end != NULL) {
1052 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1053 (*port_fwd_end)(fwd_ports_ids[i]);
1056 launch_packet_forwarding(start_pkt_forward_on_core);
1060 stop_packet_forwarding(void)
1062 struct rte_eth_stats stats;
1063 struct rte_port *port;
1064 port_fwd_end_t port_fwd_end;
1069 uint64_t total_recv;
1070 uint64_t total_xmit;
1071 uint64_t total_rx_dropped;
1072 uint64_t total_tx_dropped;
1073 uint64_t total_rx_nombuf;
1074 uint64_t tx_dropped;
1075 uint64_t rx_bad_ip_csum;
1076 uint64_t rx_bad_l4_csum;
1077 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1078 uint64_t fwd_cycles;
1080 static const char *acc_stats_border = "+++++++++++++++";
1082 if (all_ports_started() == 0) {
1083 printf("Not all ports were started\n");
1087 printf("Packet forwarding not started\n");
1090 printf("Telling cores to stop...");
1091 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1092 fwd_lcores[lc_id]->stopped = 1;
1093 printf("\nWaiting for lcores to finish...\n");
1094 rte_eal_mp_wait_lcore();
1095 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1096 if (port_fwd_end != NULL) {
1097 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1098 pt_id = fwd_ports_ids[i];
1099 (*port_fwd_end)(pt_id);
1102 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1105 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1106 if (cur_fwd_config.nb_fwd_streams >
1107 cur_fwd_config.nb_fwd_ports) {
1108 fwd_stream_stats_display(sm_id);
1109 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1110 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1112 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1114 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1117 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1118 tx_dropped = (uint64_t) (tx_dropped +
1119 fwd_streams[sm_id]->fwd_dropped);
1120 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1123 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1124 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1125 fwd_streams[sm_id]->rx_bad_ip_csum);
1126 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1130 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1131 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1132 fwd_streams[sm_id]->rx_bad_l4_csum);
1133 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1136 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1137 fwd_cycles = (uint64_t) (fwd_cycles +
1138 fwd_streams[sm_id]->core_cycles);
1143 total_rx_dropped = 0;
1144 total_tx_dropped = 0;
1145 total_rx_nombuf = 0;
1146 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1147 pt_id = fwd_ports_ids[i];
1149 port = &ports[pt_id];
1150 rte_eth_stats_get(pt_id, &stats);
1151 stats.ipackets -= port->stats.ipackets;
1152 port->stats.ipackets = 0;
1153 stats.opackets -= port->stats.opackets;
1154 port->stats.opackets = 0;
1155 stats.ibytes -= port->stats.ibytes;
1156 port->stats.ibytes = 0;
1157 stats.obytes -= port->stats.obytes;
1158 port->stats.obytes = 0;
1159 stats.ierrors -= port->stats.ierrors;
1160 port->stats.ierrors = 0;
1161 stats.oerrors -= port->stats.oerrors;
1162 port->stats.oerrors = 0;
1163 stats.rx_nombuf -= port->stats.rx_nombuf;
1164 port->stats.rx_nombuf = 0;
1165 stats.fdirmatch -= port->stats.fdirmatch;
1166 port->stats.rx_nombuf = 0;
1167 stats.fdirmiss -= port->stats.fdirmiss;
1168 port->stats.rx_nombuf = 0;
1170 total_recv += stats.ipackets;
1171 total_xmit += stats.opackets;
1172 total_rx_dropped += stats.ierrors;
1173 total_tx_dropped += port->tx_dropped;
1174 total_rx_nombuf += stats.rx_nombuf;
1176 fwd_port_stats_display(pt_id, &stats);
1178 printf("\n %s Accumulated forward statistics for all ports"
1180 acc_stats_border, acc_stats_border);
1181 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1183 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1185 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1186 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1187 if (total_rx_nombuf > 0)
1188 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1189 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1191 acc_stats_border, acc_stats_border);
1192 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1194 printf("\n CPU cycles/packet=%u (total cycles="
1195 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1196 (unsigned int)(fwd_cycles / total_recv),
1197 fwd_cycles, total_recv);
1199 printf("\nDone.\n");
1204 all_ports_started(void)
1207 struct rte_port *port;
1209 for (pi = 0; pi < nb_ports; pi++) {
1211 /* Check if there is a port which is not started */
1212 if (port->port_status != RTE_PORT_STARTED)
1216 /* No port is not started */
1221 start_port(portid_t pid)
1223 int diag, need_check_link_status = 0;
1226 struct rte_port *port;
1228 if (test_done == 0) {
1229 printf("Please stop forwarding first\n");
1233 if (init_fwd_streams() < 0) {
1234 printf("Fail from init_fwd_streams()\n");
1240 for (pi = 0; pi < nb_ports; pi++) {
1241 if (pid < nb_ports && pid != pi)
1245 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1246 RTE_PORT_HANDLING) == 0) {
1247 printf("Port %d is now not stopped\n", pi);
1251 if (port->need_reconfig > 0) {
1252 port->need_reconfig = 0;
1254 printf("Configuring Port %d (socket %u)\n", pi,
1256 /* configure port */
1257 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1260 if (rte_atomic16_cmpset(&(port->port_status),
1261 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1262 printf("Port %d can not be set back "
1263 "to stopped\n", pi);
1264 printf("Fail to configure port %d\n", pi);
1265 /* try to reconfigure port next time */
1266 port->need_reconfig = 1;
1270 if (port->need_reconfig_queues > 0) {
1271 port->need_reconfig_queues = 0;
1272 /* setup tx queues */
1273 for (qi = 0; qi < nb_txq; qi++) {
1274 if ((numa_support) &&
1275 (txring_numa[pi] != NUMA_NO_CONFIG))
1276 diag = rte_eth_tx_queue_setup(pi, qi,
1277 nb_txd,txring_numa[pi],
1280 diag = rte_eth_tx_queue_setup(pi, qi,
1281 nb_txd,port->socket_id,
1287 /* Fail to setup tx queue, return */
1288 if (rte_atomic16_cmpset(&(port->port_status),
1290 RTE_PORT_STOPPED) == 0)
1291 printf("Port %d can not be set back "
1292 "to stopped\n", pi);
1293 printf("Fail to configure port %d tx queues\n", pi);
1294 /* try to reconfigure queues next time */
1295 port->need_reconfig_queues = 1;
1298 /* setup rx queues */
1299 for (qi = 0; qi < nb_rxq; qi++) {
1300 if ((numa_support) &&
1301 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1302 struct rte_mempool * mp =
1303 mbuf_pool_find(rxring_numa[pi]);
1305 printf("Failed to setup RX queue:"
1306 "No mempool allocation"
1307 "on the socket %d\n",
1312 diag = rte_eth_rx_queue_setup(pi, qi,
1313 nb_rxd,rxring_numa[pi],
1314 &(port->rx_conf),mp);
1317 diag = rte_eth_rx_queue_setup(pi, qi,
1318 nb_rxd,port->socket_id,
1320 mbuf_pool_find(port->socket_id));
1326 /* Fail to setup rx queue, return */
1327 if (rte_atomic16_cmpset(&(port->port_status),
1329 RTE_PORT_STOPPED) == 0)
1330 printf("Port %d can not be set back "
1331 "to stopped\n", pi);
1332 printf("Fail to configure port %d rx queues\n", pi);
1333 /* try to reconfigure queues next time */
1334 port->need_reconfig_queues = 1;
1339 if (rte_eth_dev_start(pi) < 0) {
1340 printf("Fail to start port %d\n", pi);
1342 /* Fail to setup rx queue, return */
1343 if (rte_atomic16_cmpset(&(port->port_status),
1344 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1345 printf("Port %d can not be set back to "
1350 if (rte_atomic16_cmpset(&(port->port_status),
1351 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1352 printf("Port %d can not be set into started\n", pi);
1354 /* at least one port started, need checking link status */
1355 need_check_link_status = 1;
1358 if (need_check_link_status)
1359 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1361 printf("Please stop the ports first\n");
1368 stop_port(portid_t pid)
1371 struct rte_port *port;
1372 int need_check_link_status = 0;
1374 if (test_done == 0) {
1375 printf("Please stop forwarding first\n");
1382 printf("Stopping ports...\n");
1384 for (pi = 0; pi < nb_ports; pi++) {
1385 if (pid < nb_ports && pid != pi)
1389 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1390 RTE_PORT_HANDLING) == 0)
1393 rte_eth_dev_stop(pi);
1395 if (rte_atomic16_cmpset(&(port->port_status),
1396 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1397 printf("Port %d can not be set into stopped\n", pi);
1398 need_check_link_status = 1;
1400 if (need_check_link_status)
1401 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1407 close_port(portid_t pid)
1410 struct rte_port *port;
1412 if (test_done == 0) {
1413 printf("Please stop forwarding first\n");
1417 printf("Closing ports...\n");
1419 for (pi = 0; pi < nb_ports; pi++) {
1420 if (pid < nb_ports && pid != pi)
1424 if (rte_atomic16_cmpset(&(port->port_status),
1425 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1426 printf("Port %d is now not stopped\n", pi);
1430 rte_eth_dev_close(pi);
1432 if (rte_atomic16_cmpset(&(port->port_status),
1433 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1434 printf("Port %d can not be set into stopped\n", pi);
1441 all_ports_stopped(void)
1444 struct rte_port *port;
1446 for (pi = 0; pi < nb_ports; pi++) {
1448 if (port->port_status != RTE_PORT_STOPPED)
1460 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1461 printf("Stopping port %d...", pt_id);
1463 rte_eth_dev_close(pt_id);
1469 typedef void (*cmd_func_t)(void);
1470 struct pmd_test_command {
1471 const char *cmd_name;
1472 cmd_func_t cmd_func;
1475 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1477 /* Check the link status of all ports in up to 9s, and print them finally */
1479 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1481 #define CHECK_INTERVAL 100 /* 100ms */
1482 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1483 uint8_t portid, count, all_ports_up, print_flag = 0;
1484 struct rte_eth_link link;
1486 printf("Checking link statuses...\n");
1488 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1490 for (portid = 0; portid < port_num; portid++) {
1491 if ((port_mask & (1 << portid)) == 0)
1493 memset(&link, 0, sizeof(link));
1494 rte_eth_link_get_nowait(portid, &link);
1495 /* print link status if flag set */
1496 if (print_flag == 1) {
1497 if (link.link_status)
1498 printf("Port %d Link Up - speed %u "
1499 "Mbps - %s\n", (uint8_t)portid,
1500 (unsigned)link.link_speed,
1501 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1502 ("full-duplex") : ("half-duplex\n"));
1504 printf("Port %d Link Down\n",
1508 /* clear all_ports_up flag if any link down */
1509 if (link.link_status == 0) {
1514 /* after finally printing all link status, get out */
1515 if (print_flag == 1)
1518 if (all_ports_up == 0) {
1520 rte_delay_ms(CHECK_INTERVAL);
1523 /* set the print_flag if all ports up or timeout */
1524 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1531 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1535 uint8_t mapping_found = 0;
1537 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1538 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1539 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1540 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1541 tx_queue_stats_mappings[i].queue_id,
1542 tx_queue_stats_mappings[i].stats_counter_id);
1549 port->tx_queue_stats_mapping_enabled = 1;
1554 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1558 uint8_t mapping_found = 0;
1560 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1561 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1562 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1563 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1564 rx_queue_stats_mappings[i].queue_id,
1565 rx_queue_stats_mappings[i].stats_counter_id);
1572 port->rx_queue_stats_mapping_enabled = 1;
1577 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1581 diag = set_tx_queue_stats_mapping_registers(pi, port);
1583 if (diag == -ENOTSUP) {
1584 port->tx_queue_stats_mapping_enabled = 0;
1585 printf("TX queue stats mapping not supported port id=%d\n", pi);
1588 rte_exit(EXIT_FAILURE,
1589 "set_tx_queue_stats_mapping_registers "
1590 "failed for port id=%d diag=%d\n",
1594 diag = set_rx_queue_stats_mapping_registers(pi, port);
1596 if (diag == -ENOTSUP) {
1597 port->rx_queue_stats_mapping_enabled = 0;
1598 printf("RX queue stats mapping not supported port id=%d\n", pi);
1601 rte_exit(EXIT_FAILURE,
1602 "set_rx_queue_stats_mapping_registers "
1603 "failed for port id=%d diag=%d\n",
1609 init_port_config(void)
1612 struct rte_port *port;
1614 for (pid = 0; pid < nb_ports; pid++) {
1616 port->dev_conf.rxmode = rx_mode;
1617 port->dev_conf.fdir_conf = fdir_conf;
1619 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1620 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1622 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1623 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1626 /* In SR-IOV mode, RSS mode is not available */
1627 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1628 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1629 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1631 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1634 port->rx_conf.rx_thresh = rx_thresh;
1635 port->rx_conf.rx_free_thresh = rx_free_thresh;
1636 port->rx_conf.rx_drop_en = rx_drop_en;
1637 port->tx_conf.tx_thresh = tx_thresh;
1638 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1639 port->tx_conf.tx_free_thresh = tx_free_thresh;
1640 port->tx_conf.txq_flags = txq_flags;
1642 rte_eth_macaddr_get(pid, &port->eth_addr);
1644 map_port_queue_stats_mapping_registers(pid, port);
1645 #ifdef RTE_NIC_BYPASS
1646 rte_eth_dev_bypass_init(pid);
1651 const uint16_t vlan_tags[] = {
1652 0, 1, 2, 3, 4, 5, 6, 7,
1653 8, 9, 10, 11, 12, 13, 14, 15,
1654 16, 17, 18, 19, 20, 21, 22, 23,
1655 24, 25, 26, 27, 28, 29, 30, 31
1659 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1664 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1665 * given above, and the number of traffic classes available for use.
1667 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1668 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1669 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1671 /* VMDQ+DCB RX and TX configrations */
1672 vmdq_rx_conf.enable_default_pool = 0;
1673 vmdq_rx_conf.default_pool = 0;
1674 vmdq_rx_conf.nb_queue_pools =
1675 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1676 vmdq_tx_conf.nb_queue_pools =
1677 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1679 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1680 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1681 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1682 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1684 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1685 vmdq_rx_conf.dcb_queue[i] = i;
1686 vmdq_tx_conf.dcb_queue[i] = i;
1689 /*set DCB mode of RX and TX of multiple queues*/
1690 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1691 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1692 if (dcb_conf->pfc_en)
1693 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1695 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1697 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1698 sizeof(struct rte_eth_vmdq_dcb_conf)));
1699 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1700 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1703 struct rte_eth_dcb_rx_conf rx_conf;
1704 struct rte_eth_dcb_tx_conf tx_conf;
1706 /* queue mapping configuration of DCB RX and TX */
1707 if (dcb_conf->num_tcs == ETH_4_TCS)
1708 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1710 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1712 rx_conf.nb_tcs = dcb_conf->num_tcs;
1713 tx_conf.nb_tcs = dcb_conf->num_tcs;
1715 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1716 rx_conf.dcb_queue[i] = i;
1717 tx_conf.dcb_queue[i] = i;
1719 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1720 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1721 if (dcb_conf->pfc_en)
1722 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1724 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1726 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1727 sizeof(struct rte_eth_dcb_rx_conf)));
1728 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1729 sizeof(struct rte_eth_dcb_tx_conf)));
1736 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1738 struct rte_eth_conf port_conf;
1739 struct rte_port *rte_port;
1744 /* rxq and txq configuration in dcb mode */
1747 rx_free_thresh = 64;
1749 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1750 /* Enter DCB configuration status */
1753 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1754 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1755 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1759 rte_port = &ports[pid];
1760 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1762 rte_port->rx_conf.rx_thresh = rx_thresh;
1763 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1764 rte_port->tx_conf.tx_thresh = tx_thresh;
1765 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1766 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1768 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1769 for (i = 0; i < nb_vlan; i++){
1770 rx_vft_set(pid, vlan_tags[i], 1);
1773 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1774 map_port_queue_stats_mapping_registers(pid, rte_port);
1776 rte_port->dcb_flag = 1;
1781 #ifdef RTE_EXEC_ENV_BAREMETAL
1786 main(int argc, char** argv)
1791 diag = rte_eal_init(argc, argv);
1793 rte_panic("Cannot init EAL\n");
1795 if (rte_pmd_init_all())
1796 rte_panic("Cannot init PMD\n");
1798 if (rte_eal_pci_probe())
1799 rte_panic("Cannot probe PCI\n");
1801 nb_ports = (portid_t) rte_eth_dev_count();
1803 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1805 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1806 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1807 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1808 "configuration file\n");
1810 set_def_fwd_config();
1812 rte_panic("Empty set of forwarding logical cores - check the "
1813 "core mask supplied in the command parameters\n");
1818 launch_args_parse(argc, argv);
1820 if (nb_rxq > nb_txq)
1821 printf("Warning: nb_rxq=%d enables RSS configuration, "
1822 "but nb_txq=%d will prevent to fully test it.\n",
1826 if (start_port(RTE_PORT_ALL) != 0)
1827 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1829 /* set all ports to promiscuous mode by default */
1830 for (port_id = 0; port_id < nb_ports; port_id++)
1831 rte_eth_promiscuous_enable(port_id);
1833 #ifdef RTE_LIBRTE_CMDLINE
1834 if (interactive == 1) {
1836 printf("Start automatic packet forwarding\n");
1837 start_packet_forwarding(0);
1846 printf("No commandline core given, start packet forwarding\n");
1847 start_packet_forwarding(0);
1848 printf("Press enter to exit\n");
1849 rc = read(0, &c, 1);