4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
153 #ifdef RTE_LIBRTE_IEEE1588
154 &ieee1588_fwd_engine,
159 struct fwd_config cur_fwd_config;
160 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
163 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
164 * specified on command-line. */
167 * Configuration of packet segments used by the "txonly" processing engine.
169 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
170 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
171 TXONLY_DEF_PACKET_LEN,
173 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
176 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
178 /* current configuration is in DCB or not,0 means it is not in DCB mode */
179 uint8_t dcb_config = 0;
181 /* Whether the dcb is in testing status */
182 uint8_t dcb_test = 0;
184 /* DCB on and VT on mapping is default */
185 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
188 * Configurable number of RX/TX queues.
190 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
191 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
194 * Configurable number of RX/TX ring descriptors.
196 #define RTE_TEST_RX_DESC_DEFAULT 128
197 #define RTE_TEST_TX_DESC_DEFAULT 512
198 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
199 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 * Configurable values of RX and TX ring threshold registers.
204 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
205 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
206 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
208 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
209 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
210 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
212 struct rte_eth_thresh rx_thresh = {
213 .pthresh = RX_PTHRESH,
214 .hthresh = RX_HTHRESH,
215 .wthresh = RX_WTHRESH,
218 struct rte_eth_thresh tx_thresh = {
219 .pthresh = TX_PTHRESH,
220 .hthresh = TX_HTHRESH,
221 .wthresh = TX_WTHRESH,
225 * Configurable value of RX free threshold.
227 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
230 * Configurable value of RX drop enable.
232 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
235 * Configurable value of TX free threshold.
237 uint16_t tx_free_thresh = 0; /* Use default values. */
240 * Configurable value of TX RS bit threshold.
242 uint16_t tx_rs_thresh = 0; /* Use default values. */
245 * Configurable value of TX queue flags.
247 uint32_t txq_flags = 0; /* No flags set. */
250 * Receive Side Scaling (RSS) configuration.
252 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
255 * Port topology configuration
257 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
260 * Avoids to flush all the RX streams before starts forwarding.
262 uint8_t no_flush_rx = 0; /* flush by default */
265 * NIC bypass mode configuration options.
267 #ifdef RTE_NIC_BYPASS
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
275 * Ethernet device configuration.
277 struct rte_eth_rxmode rx_mode = {
278 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
280 .header_split = 0, /**< Header Split disabled. */
281 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
284 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
286 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
289 struct rte_fdir_conf fdir_conf = {
290 .mode = RTE_FDIR_MODE_NONE,
291 .pballoc = RTE_FDIR_PBALLOC_64K,
292 .status = RTE_FDIR_REPORT_STATUS,
293 .flexbytes_offset = 0x6,
297 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
299 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
300 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
302 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
303 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
305 uint16_t nb_tx_queue_stats_mappings = 0;
306 uint16_t nb_rx_queue_stats_mappings = 0;
308 /* Forward function declarations */
309 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
310 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
313 * Check if all the ports are started.
314 * If yes, return positive value. If not, return zero.
316 static int all_ports_started(void);
319 * Setup default configuration.
322 set_default_fwd_lcores_config(void)
328 for (i = 0; i < RTE_MAX_LCORE; i++) {
329 if (! rte_lcore_is_enabled(i))
331 if (i == rte_get_master_lcore())
333 fwd_lcores_cpuids[nb_lc++] = i;
335 nb_lcores = (lcoreid_t) nb_lc;
336 nb_cfg_lcores = nb_lcores;
341 set_def_peer_eth_addrs(void)
345 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
346 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
347 peer_eth_addrs[i].addr_bytes[5] = i;
352 set_default_fwd_ports_config(void)
356 for (pt_id = 0; pt_id < nb_ports; pt_id++)
357 fwd_ports_ids[pt_id] = pt_id;
359 nb_cfg_ports = nb_ports;
360 nb_fwd_ports = nb_ports;
364 set_def_fwd_config(void)
366 set_default_fwd_lcores_config();
367 set_def_peer_eth_addrs();
368 set_default_fwd_ports_config();
372 * Configuration initialisation done once at init time.
374 struct mbuf_ctor_arg {
375 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
376 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
379 struct mbuf_pool_ctor_arg {
380 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
384 testpmd_mbuf_ctor(struct rte_mempool *mp,
387 __attribute__((unused)) unsigned i)
389 struct mbuf_ctor_arg *mb_ctor_arg;
392 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
393 mb = (struct rte_mbuf *) raw_mbuf;
395 mb->type = RTE_MBUF_PKT;
397 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
398 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
399 mb_ctor_arg->seg_buf_offset);
400 mb->buf_len = mb_ctor_arg->seg_buf_size;
401 mb->type = RTE_MBUF_PKT;
403 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
405 mb->pkt.vlan_macip.data = 0;
406 mb->pkt.hash.rss = 0;
410 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
413 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
414 struct rte_pktmbuf_pool_private *mbp_priv;
416 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
417 printf("%s(%s) private_data_size %d < %d\n",
418 __func__, mp->name, (int) mp->private_data_size,
419 (int) sizeof(struct rte_pktmbuf_pool_private));
422 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
423 mbp_priv = rte_mempool_get_priv(mp);
424 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
428 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
429 unsigned int socket_id)
431 char pool_name[RTE_MEMPOOL_NAMESIZE];
432 struct rte_mempool *rte_mp;
433 struct mbuf_pool_ctor_arg mbp_ctor_arg;
434 struct mbuf_ctor_arg mb_ctor_arg;
437 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
439 mb_ctor_arg.seg_buf_offset =
440 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
441 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
442 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
443 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
445 #ifdef RTE_LIBRTE_PMD_XENVIRT
446 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
447 (unsigned) mb_mempool_cache,
448 sizeof(struct rte_pktmbuf_pool_private),
449 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
450 testpmd_mbuf_ctor, &mb_ctor_arg,
457 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
458 (unsigned) mb_mempool_cache,
459 sizeof(struct rte_pktmbuf_pool_private),
460 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
461 testpmd_mbuf_ctor, &mb_ctor_arg,
464 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
465 (unsigned) mb_mempool_cache,
466 sizeof(struct rte_pktmbuf_pool_private),
467 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
468 testpmd_mbuf_ctor, &mb_ctor_arg,
473 if (rte_mp == NULL) {
474 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
475 "failed\n", socket_id);
476 } else if (verbose_level > 0) {
477 rte_mempool_dump(rte_mp);
482 * Check given socket id is valid or not with NUMA mode,
483 * if valid, return 0, else return -1
486 check_socket_id(const unsigned int socket_id)
488 static int warning_once = 0;
490 if (socket_id >= MAX_SOCKET) {
491 if (!warning_once && numa_support)
492 printf("Warning: NUMA should be configured manually by"
493 " using --port-numa-config and"
494 " --ring-numa-config parameters along with"
506 struct rte_port *port;
507 struct rte_mempool *mbp;
508 unsigned int nb_mbuf_per_pool;
510 uint8_t port_per_socket[MAX_SOCKET];
512 memset(port_per_socket,0,MAX_SOCKET);
513 /* Configuration of logical cores. */
514 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
515 sizeof(struct fwd_lcore *) * nb_lcores,
517 if (fwd_lcores == NULL) {
518 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
519 "failed\n", nb_lcores);
521 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
522 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
523 sizeof(struct fwd_lcore),
525 if (fwd_lcores[lc_id] == NULL) {
526 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
529 fwd_lcores[lc_id]->cpuid_idx = lc_id;
533 * Create pools of mbuf.
534 * If NUMA support is disabled, create a single pool of mbuf in
535 * socket 0 memory by default.
536 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
538 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
539 * nb_txd can be configured at run time.
541 if (param_total_num_mbufs)
542 nb_mbuf_per_pool = param_total_num_mbufs;
544 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
545 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
548 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
552 if (socket_num == UMA_NO_CONFIG)
553 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
555 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
559 /* Configuration of Ethernet ports. */
560 ports = rte_zmalloc("testpmd: ports",
561 sizeof(struct rte_port) * nb_ports,
564 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
565 "failed\n", nb_ports);
568 for (pid = 0; pid < nb_ports; pid++) {
570 rte_eth_dev_info_get(pid, &port->dev_info);
573 if (port_numa[pid] != NUMA_NO_CONFIG)
574 port_per_socket[port_numa[pid]]++;
576 uint32_t socket_id = rte_eth_dev_socket_id(pid);
578 /* if socket_id is invalid, set to 0 */
579 if (check_socket_id(socket_id) < 0)
581 port_per_socket[socket_id]++;
585 /* set flag to initialize port/queue */
586 port->need_reconfig = 1;
587 port->need_reconfig_queues = 1;
592 unsigned int nb_mbuf;
594 if (param_total_num_mbufs)
595 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
597 for (i = 0; i < MAX_SOCKET; i++) {
598 nb_mbuf = (nb_mbuf_per_pool *
601 mbuf_pool_create(mbuf_data_size,
608 * Records which Mbuf pool to use by each logical core, if needed.
610 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
611 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
613 mbp = mbuf_pool_find(0);
614 fwd_lcores[lc_id]->mbp = mbp;
617 /* Configuration of packet forwarding streams. */
618 if (init_fwd_streams() < 0)
619 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
623 init_fwd_streams(void)
626 struct rte_port *port;
627 streamid_t sm_id, nb_fwd_streams_new;
629 /* set socket id according to numa or not */
630 for (pid = 0; pid < nb_ports; pid++) {
632 if (nb_rxq > port->dev_info.max_rx_queues) {
633 printf("Fail: nb_rxq(%d) is greater than "
634 "max_rx_queues(%d)\n", nb_rxq,
635 port->dev_info.max_rx_queues);
638 if (nb_txq > port->dev_info.max_tx_queues) {
639 printf("Fail: nb_txq(%d) is greater than "
640 "max_tx_queues(%d)\n", nb_txq,
641 port->dev_info.max_tx_queues);
645 if (port_numa[pid] != NUMA_NO_CONFIG)
646 port->socket_id = port_numa[pid];
648 port->socket_id = rte_eth_dev_socket_id(pid);
650 /* if socket_id is invalid, set to 0 */
651 if (check_socket_id(port->socket_id) < 0)
656 if (socket_num == UMA_NO_CONFIG)
659 port->socket_id = socket_num;
663 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
664 if (nb_fwd_streams_new == nb_fwd_streams)
667 if (fwd_streams != NULL) {
668 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
669 if (fwd_streams[sm_id] == NULL)
671 rte_free(fwd_streams[sm_id]);
672 fwd_streams[sm_id] = NULL;
674 rte_free(fwd_streams);
679 nb_fwd_streams = nb_fwd_streams_new;
680 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
681 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
682 if (fwd_streams == NULL)
683 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
684 "failed\n", nb_fwd_streams);
686 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
687 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
688 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
689 if (fwd_streams[sm_id] == NULL)
690 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
697 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
699 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
701 unsigned int total_burst;
702 unsigned int nb_burst;
703 unsigned int burst_stats[3];
704 uint16_t pktnb_stats[3];
706 int burst_percent[3];
709 * First compute the total number of packet bursts and the
710 * two highest numbers of bursts of the same number of packets.
713 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
714 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
715 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
716 nb_burst = pbs->pkt_burst_spread[nb_pkt];
719 total_burst += nb_burst;
720 if (nb_burst > burst_stats[0]) {
721 burst_stats[1] = burst_stats[0];
722 pktnb_stats[1] = pktnb_stats[0];
723 burst_stats[0] = nb_burst;
724 pktnb_stats[0] = nb_pkt;
727 if (total_burst == 0)
729 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
730 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
731 burst_percent[0], (int) pktnb_stats[0]);
732 if (burst_stats[0] == total_burst) {
736 if (burst_stats[0] + burst_stats[1] == total_burst) {
737 printf(" + %d%% of %d pkts]\n",
738 100 - burst_percent[0], pktnb_stats[1]);
741 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
742 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
743 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
744 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
747 printf(" + %d%% of %d pkts + %d%% of others]\n",
748 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
750 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
753 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
755 struct rte_port *port;
758 static const char *fwd_stats_border = "----------------------";
760 port = &ports[port_id];
761 printf("\n %s Forward statistics for port %-2d %s\n",
762 fwd_stats_border, port_id, fwd_stats_border);
764 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
765 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
767 stats->ipackets, stats->ierrors,
768 (uint64_t) (stats->ipackets + stats->ierrors));
770 if (cur_fwd_eng == &csum_fwd_engine)
771 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
772 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
774 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
776 stats->opackets, port->tx_dropped,
777 (uint64_t) (stats->opackets + port->tx_dropped));
779 if (stats->rx_nombuf > 0)
780 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
784 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
786 stats->ipackets, stats->ierrors,
787 (uint64_t) (stats->ipackets + stats->ierrors));
789 if (cur_fwd_eng == &csum_fwd_engine)
790 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
791 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
793 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
795 stats->opackets, port->tx_dropped,
796 (uint64_t) (stats->opackets + port->tx_dropped));
798 if (stats->rx_nombuf > 0)
799 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
802 /* Display statistics of XON/XOFF pause frames, if any. */
803 if ((stats->tx_pause_xon | stats->rx_pause_xon |
804 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
805 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
806 stats->rx_pause_xoff, stats->rx_pause_xon);
807 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
808 stats->tx_pause_xoff, stats->tx_pause_xon);
811 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
813 pkt_burst_stats_display("RX",
814 &port->rx_stream->rx_burst_stats);
816 pkt_burst_stats_display("TX",
817 &port->tx_stream->tx_burst_stats);
820 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
821 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
825 if (port->rx_queue_stats_mapping_enabled) {
827 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828 printf(" Stats reg %2d RX-packets:%14"PRIu64
829 " RX-errors:%14"PRIu64
830 " RX-bytes:%14"PRIu64"\n",
831 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
835 if (port->tx_queue_stats_mapping_enabled) {
836 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
837 printf(" Stats reg %2d TX-packets:%14"PRIu64
838 " TX-bytes:%14"PRIu64"\n",
839 i, stats->q_opackets[i], stats->q_obytes[i]);
843 printf(" %s--------------------------------%s\n",
844 fwd_stats_border, fwd_stats_border);
848 fwd_stream_stats_display(streamid_t stream_id)
850 struct fwd_stream *fs;
851 static const char *fwd_top_stats_border = "-------";
853 fs = fwd_streams[stream_id];
854 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
855 (fs->fwd_dropped == 0))
857 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
858 "TX Port=%2d/Queue=%2d %s\n",
859 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
860 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
861 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
862 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
864 /* if checksum mode */
865 if (cur_fwd_eng == &csum_fwd_engine) {
866 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
867 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
870 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
871 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
872 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
877 flush_fwd_rx_queues(void)
879 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
887 for (j = 0; j < 2; j++) {
888 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
889 for (rxq = 0; rxq < nb_rxq; rxq++) {
890 port_id = fwd_ports_ids[rxp];
892 nb_rx = rte_eth_rx_burst(port_id, rxq,
893 pkts_burst, MAX_PKT_BURST);
894 for (i = 0; i < nb_rx; i++)
895 rte_pktmbuf_free(pkts_burst[i]);
899 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
904 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
906 struct fwd_stream **fsm;
910 fsm = &fwd_streams[fc->stream_idx];
911 nb_fs = fc->stream_nb;
913 for (sm_id = 0; sm_id < nb_fs; sm_id++)
914 (*pkt_fwd)(fsm[sm_id]);
915 } while (! fc->stopped);
919 start_pkt_forward_on_core(void *fwd_arg)
921 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
922 cur_fwd_config.fwd_eng->packet_fwd);
927 * Run the TXONLY packet forwarding engine to send a single burst of packets.
928 * Used to start communication flows in network loopback test configurations.
931 run_one_txonly_burst_on_core(void *fwd_arg)
933 struct fwd_lcore *fwd_lc;
934 struct fwd_lcore tmp_lcore;
936 fwd_lc = (struct fwd_lcore *) fwd_arg;
938 tmp_lcore.stopped = 1;
939 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
944 * Launch packet forwarding:
945 * - Setup per-port forwarding context.
946 * - launch logical cores with their forwarding configuration.
949 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
951 port_fwd_begin_t port_fwd_begin;
956 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
957 if (port_fwd_begin != NULL) {
958 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
959 (*port_fwd_begin)(fwd_ports_ids[i]);
961 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
962 lc_id = fwd_lcores_cpuids[i];
963 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
964 fwd_lcores[i]->stopped = 0;
965 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
966 fwd_lcores[i], lc_id);
968 printf("launch lcore %u failed - diag=%d\n",
975 * Launch packet forwarding configuration.
978 start_packet_forwarding(int with_tx_first)
980 port_fwd_begin_t port_fwd_begin;
981 port_fwd_end_t port_fwd_end;
982 struct rte_port *port;
987 if (all_ports_started() == 0) {
988 printf("Not all ports were started\n");
991 if (test_done == 0) {
992 printf("Packet forwarding already started\n");
996 for (i = 0; i < nb_fwd_ports; i++) {
997 pt_id = fwd_ports_ids[i];
998 port = &ports[pt_id];
999 if (!port->dcb_flag) {
1000 printf("In DCB mode, all forwarding ports must "
1001 "be configured in this mode.\n");
1005 if (nb_fwd_lcores == 1) {
1006 printf("In DCB mode,the nb forwarding cores "
1007 "should be larger than 1.\n");
1014 flush_fwd_rx_queues();
1017 rxtx_config_display();
1019 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1020 pt_id = fwd_ports_ids[i];
1021 port = &ports[pt_id];
1022 rte_eth_stats_get(pt_id, &port->stats);
1023 port->tx_dropped = 0;
1025 map_port_queue_stats_mapping_registers(pt_id, port);
1027 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1028 fwd_streams[sm_id]->rx_packets = 0;
1029 fwd_streams[sm_id]->tx_packets = 0;
1030 fwd_streams[sm_id]->fwd_dropped = 0;
1031 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1032 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1036 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1037 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1038 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041 fwd_streams[sm_id]->core_cycles = 0;
1044 if (with_tx_first) {
1045 port_fwd_begin = tx_only_engine.port_fwd_begin;
1046 if (port_fwd_begin != NULL) {
1047 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1048 (*port_fwd_begin)(fwd_ports_ids[i]);
1050 launch_packet_forwarding(run_one_txonly_burst_on_core);
1051 rte_eal_mp_wait_lcore();
1052 port_fwd_end = tx_only_engine.port_fwd_end;
1053 if (port_fwd_end != NULL) {
1054 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1055 (*port_fwd_end)(fwd_ports_ids[i]);
1058 launch_packet_forwarding(start_pkt_forward_on_core);
1062 stop_packet_forwarding(void)
1064 struct rte_eth_stats stats;
1065 struct rte_port *port;
1066 port_fwd_end_t port_fwd_end;
1071 uint64_t total_recv;
1072 uint64_t total_xmit;
1073 uint64_t total_rx_dropped;
1074 uint64_t total_tx_dropped;
1075 uint64_t total_rx_nombuf;
1076 uint64_t tx_dropped;
1077 uint64_t rx_bad_ip_csum;
1078 uint64_t rx_bad_l4_csum;
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080 uint64_t fwd_cycles;
1082 static const char *acc_stats_border = "+++++++++++++++";
1084 if (all_ports_started() == 0) {
1085 printf("Not all ports were started\n");
1089 printf("Packet forwarding not started\n");
1092 printf("Telling cores to stop...");
1093 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1094 fwd_lcores[lc_id]->stopped = 1;
1095 printf("\nWaiting for lcores to finish...\n");
1096 rte_eal_mp_wait_lcore();
1097 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1098 if (port_fwd_end != NULL) {
1099 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100 pt_id = fwd_ports_ids[i];
1101 (*port_fwd_end)(pt_id);
1104 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1107 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1108 if (cur_fwd_config.nb_fwd_streams >
1109 cur_fwd_config.nb_fwd_ports) {
1110 fwd_stream_stats_display(sm_id);
1111 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1112 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1114 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1116 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1119 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1120 tx_dropped = (uint64_t) (tx_dropped +
1121 fwd_streams[sm_id]->fwd_dropped);
1122 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1125 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1126 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1127 fwd_streams[sm_id]->rx_bad_ip_csum);
1128 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1132 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1133 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1134 fwd_streams[sm_id]->rx_bad_l4_csum);
1135 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139 fwd_cycles = (uint64_t) (fwd_cycles +
1140 fwd_streams[sm_id]->core_cycles);
1145 total_rx_dropped = 0;
1146 total_tx_dropped = 0;
1147 total_rx_nombuf = 0;
1148 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1149 pt_id = fwd_ports_ids[i];
1151 port = &ports[pt_id];
1152 rte_eth_stats_get(pt_id, &stats);
1153 stats.ipackets -= port->stats.ipackets;
1154 port->stats.ipackets = 0;
1155 stats.opackets -= port->stats.opackets;
1156 port->stats.opackets = 0;
1157 stats.ibytes -= port->stats.ibytes;
1158 port->stats.ibytes = 0;
1159 stats.obytes -= port->stats.obytes;
1160 port->stats.obytes = 0;
1161 stats.ierrors -= port->stats.ierrors;
1162 port->stats.ierrors = 0;
1163 stats.oerrors -= port->stats.oerrors;
1164 port->stats.oerrors = 0;
1165 stats.rx_nombuf -= port->stats.rx_nombuf;
1166 port->stats.rx_nombuf = 0;
1167 stats.fdirmatch -= port->stats.fdirmatch;
1168 port->stats.rx_nombuf = 0;
1169 stats.fdirmiss -= port->stats.fdirmiss;
1170 port->stats.rx_nombuf = 0;
1172 total_recv += stats.ipackets;
1173 total_xmit += stats.opackets;
1174 total_rx_dropped += stats.ierrors;
1175 total_tx_dropped += port->tx_dropped;
1176 total_rx_nombuf += stats.rx_nombuf;
1178 fwd_port_stats_display(pt_id, &stats);
1180 printf("\n %s Accumulated forward statistics for all ports"
1182 acc_stats_border, acc_stats_border);
1183 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1185 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1187 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1188 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1189 if (total_rx_nombuf > 0)
1190 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1191 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1193 acc_stats_border, acc_stats_border);
1194 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1196 printf("\n CPU cycles/packet=%u (total cycles="
1197 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1198 (unsigned int)(fwd_cycles / total_recv),
1199 fwd_cycles, total_recv);
1201 printf("\nDone.\n");
1206 all_ports_started(void)
1209 struct rte_port *port;
1211 for (pi = 0; pi < nb_ports; pi++) {
1213 /* Check if there is a port which is not started */
1214 if (port->port_status != RTE_PORT_STARTED)
1218 /* No port is not started */
1223 start_port(portid_t pid)
1225 int diag, need_check_link_status = 0;
1228 struct rte_port *port;
1230 if (test_done == 0) {
1231 printf("Please stop forwarding first\n");
1235 if (init_fwd_streams() < 0) {
1236 printf("Fail from init_fwd_streams()\n");
1242 for (pi = 0; pi < nb_ports; pi++) {
1243 if (pid < nb_ports && pid != pi)
1247 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1248 RTE_PORT_HANDLING) == 0) {
1249 printf("Port %d is now not stopped\n", pi);
1253 if (port->need_reconfig > 0) {
1254 port->need_reconfig = 0;
1256 printf("Configuring Port %d (socket %u)\n", pi,
1258 /* configure port */
1259 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1262 if (rte_atomic16_cmpset(&(port->port_status),
1263 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1264 printf("Port %d can not be set back "
1265 "to stopped\n", pi);
1266 printf("Fail to configure port %d\n", pi);
1267 /* try to reconfigure port next time */
1268 port->need_reconfig = 1;
1272 if (port->need_reconfig_queues > 0) {
1273 port->need_reconfig_queues = 0;
1274 /* setup tx queues */
1275 for (qi = 0; qi < nb_txq; qi++) {
1276 if ((numa_support) &&
1277 (txring_numa[pi] != NUMA_NO_CONFIG))
1278 diag = rte_eth_tx_queue_setup(pi, qi,
1279 nb_txd,txring_numa[pi],
1282 diag = rte_eth_tx_queue_setup(pi, qi,
1283 nb_txd,port->socket_id,
1289 /* Fail to setup tx queue, return */
1290 if (rte_atomic16_cmpset(&(port->port_status),
1292 RTE_PORT_STOPPED) == 0)
1293 printf("Port %d can not be set back "
1294 "to stopped\n", pi);
1295 printf("Fail to configure port %d tx queues\n", pi);
1296 /* try to reconfigure queues next time */
1297 port->need_reconfig_queues = 1;
1300 /* setup rx queues */
1301 for (qi = 0; qi < nb_rxq; qi++) {
1302 if ((numa_support) &&
1303 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1304 struct rte_mempool * mp =
1305 mbuf_pool_find(rxring_numa[pi]);
1307 printf("Failed to setup RX queue:"
1308 "No mempool allocation"
1309 "on the socket %d\n",
1314 diag = rte_eth_rx_queue_setup(pi, qi,
1315 nb_rxd,rxring_numa[pi],
1316 &(port->rx_conf),mp);
1319 diag = rte_eth_rx_queue_setup(pi, qi,
1320 nb_rxd,port->socket_id,
1322 mbuf_pool_find(port->socket_id));
1328 /* Fail to setup rx queue, return */
1329 if (rte_atomic16_cmpset(&(port->port_status),
1331 RTE_PORT_STOPPED) == 0)
1332 printf("Port %d can not be set back "
1333 "to stopped\n", pi);
1334 printf("Fail to configure port %d rx queues\n", pi);
1335 /* try to reconfigure queues next time */
1336 port->need_reconfig_queues = 1;
1341 if (rte_eth_dev_start(pi) < 0) {
1342 printf("Fail to start port %d\n", pi);
1344 /* Fail to setup rx queue, return */
1345 if (rte_atomic16_cmpset(&(port->port_status),
1346 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1347 printf("Port %d can not be set back to "
1352 if (rte_atomic16_cmpset(&(port->port_status),
1353 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1354 printf("Port %d can not be set into started\n", pi);
1356 /* at least one port started, need checking link status */
1357 need_check_link_status = 1;
1360 if (need_check_link_status)
1361 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1363 printf("Please stop the ports first\n");
1370 stop_port(portid_t pid)
1373 struct rte_port *port;
1374 int need_check_link_status = 0;
1376 if (test_done == 0) {
1377 printf("Please stop forwarding first\n");
1384 printf("Stopping ports...\n");
1386 for (pi = 0; pi < nb_ports; pi++) {
1387 if (pid < nb_ports && pid != pi)
1391 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1392 RTE_PORT_HANDLING) == 0)
1395 rte_eth_dev_stop(pi);
1397 if (rte_atomic16_cmpset(&(port->port_status),
1398 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1399 printf("Port %d can not be set into stopped\n", pi);
1400 need_check_link_status = 1;
1402 if (need_check_link_status)
1403 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1409 close_port(portid_t pid)
1412 struct rte_port *port;
1414 if (test_done == 0) {
1415 printf("Please stop forwarding first\n");
1419 printf("Closing ports...\n");
1421 for (pi = 0; pi < nb_ports; pi++) {
1422 if (pid < nb_ports && pid != pi)
1426 if (rte_atomic16_cmpset(&(port->port_status),
1427 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1428 printf("Port %d is now not stopped\n", pi);
1432 rte_eth_dev_close(pi);
1434 if (rte_atomic16_cmpset(&(port->port_status),
1435 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1436 printf("Port %d can not be set into stopped\n", pi);
1443 all_ports_stopped(void)
1446 struct rte_port *port;
1448 for (pi = 0; pi < nb_ports; pi++) {
1450 if (port->port_status != RTE_PORT_STOPPED)
1462 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1463 printf("Stopping port %d...", pt_id);
1465 rte_eth_dev_close(pt_id);
1471 typedef void (*cmd_func_t)(void);
1472 struct pmd_test_command {
1473 const char *cmd_name;
1474 cmd_func_t cmd_func;
1477 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1479 /* Check the link status of all ports in up to 9s, and print them finally */
1481 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1483 #define CHECK_INTERVAL 100 /* 100ms */
1484 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1485 uint8_t portid, count, all_ports_up, print_flag = 0;
1486 struct rte_eth_link link;
1488 printf("Checking link statuses...\n");
1490 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1492 for (portid = 0; portid < port_num; portid++) {
1493 if ((port_mask & (1 << portid)) == 0)
1495 memset(&link, 0, sizeof(link));
1496 rte_eth_link_get_nowait(portid, &link);
1497 /* print link status if flag set */
1498 if (print_flag == 1) {
1499 if (link.link_status)
1500 printf("Port %d Link Up - speed %u "
1501 "Mbps - %s\n", (uint8_t)portid,
1502 (unsigned)link.link_speed,
1503 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1504 ("full-duplex") : ("half-duplex\n"));
1506 printf("Port %d Link Down\n",
1510 /* clear all_ports_up flag if any link down */
1511 if (link.link_status == 0) {
1516 /* after finally printing all link status, get out */
1517 if (print_flag == 1)
1520 if (all_ports_up == 0) {
1522 rte_delay_ms(CHECK_INTERVAL);
1525 /* set the print_flag if all ports up or timeout */
1526 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1533 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1537 uint8_t mapping_found = 0;
1539 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1540 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1541 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1542 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1543 tx_queue_stats_mappings[i].queue_id,
1544 tx_queue_stats_mappings[i].stats_counter_id);
1551 port->tx_queue_stats_mapping_enabled = 1;
1556 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1560 uint8_t mapping_found = 0;
1562 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1563 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1564 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1565 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1566 rx_queue_stats_mappings[i].queue_id,
1567 rx_queue_stats_mappings[i].stats_counter_id);
1574 port->rx_queue_stats_mapping_enabled = 1;
1579 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1583 diag = set_tx_queue_stats_mapping_registers(pi, port);
1585 if (diag == -ENOTSUP) {
1586 port->tx_queue_stats_mapping_enabled = 0;
1587 printf("TX queue stats mapping not supported port id=%d\n", pi);
1590 rte_exit(EXIT_FAILURE,
1591 "set_tx_queue_stats_mapping_registers "
1592 "failed for port id=%d diag=%d\n",
1596 diag = set_rx_queue_stats_mapping_registers(pi, port);
1598 if (diag == -ENOTSUP) {
1599 port->rx_queue_stats_mapping_enabled = 0;
1600 printf("RX queue stats mapping not supported port id=%d\n", pi);
1603 rte_exit(EXIT_FAILURE,
1604 "set_rx_queue_stats_mapping_registers "
1605 "failed for port id=%d diag=%d\n",
1611 init_port_config(void)
1614 struct rte_port *port;
1616 for (pid = 0; pid < nb_ports; pid++) {
1618 port->dev_conf.rxmode = rx_mode;
1619 port->dev_conf.fdir_conf = fdir_conf;
1621 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1622 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1624 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1625 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1628 /* In SR-IOV mode, RSS mode is not available */
1629 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1630 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1631 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1633 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1636 port->rx_conf.rx_thresh = rx_thresh;
1637 port->rx_conf.rx_free_thresh = rx_free_thresh;
1638 port->rx_conf.rx_drop_en = rx_drop_en;
1639 port->tx_conf.tx_thresh = tx_thresh;
1640 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1641 port->tx_conf.tx_free_thresh = tx_free_thresh;
1642 port->tx_conf.txq_flags = txq_flags;
1644 rte_eth_macaddr_get(pid, &port->eth_addr);
1646 map_port_queue_stats_mapping_registers(pid, port);
1647 #ifdef RTE_NIC_BYPASS
1648 rte_eth_dev_bypass_init(pid);
1653 const uint16_t vlan_tags[] = {
1654 0, 1, 2, 3, 4, 5, 6, 7,
1655 8, 9, 10, 11, 12, 13, 14, 15,
1656 16, 17, 18, 19, 20, 21, 22, 23,
1657 24, 25, 26, 27, 28, 29, 30, 31
1661 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1666 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1667 * given above, and the number of traffic classes available for use.
1669 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1670 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1671 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1673 /* VMDQ+DCB RX and TX configrations */
1674 vmdq_rx_conf.enable_default_pool = 0;
1675 vmdq_rx_conf.default_pool = 0;
1676 vmdq_rx_conf.nb_queue_pools =
1677 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1678 vmdq_tx_conf.nb_queue_pools =
1679 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1681 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1682 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1683 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1684 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1686 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1687 vmdq_rx_conf.dcb_queue[i] = i;
1688 vmdq_tx_conf.dcb_queue[i] = i;
1691 /*set DCB mode of RX and TX of multiple queues*/
1692 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1693 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1694 if (dcb_conf->pfc_en)
1695 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1697 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1699 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1700 sizeof(struct rte_eth_vmdq_dcb_conf)));
1701 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1702 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1705 struct rte_eth_dcb_rx_conf rx_conf;
1706 struct rte_eth_dcb_tx_conf tx_conf;
1708 /* queue mapping configuration of DCB RX and TX */
1709 if (dcb_conf->num_tcs == ETH_4_TCS)
1710 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1712 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1714 rx_conf.nb_tcs = dcb_conf->num_tcs;
1715 tx_conf.nb_tcs = dcb_conf->num_tcs;
1717 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1718 rx_conf.dcb_queue[i] = i;
1719 tx_conf.dcb_queue[i] = i;
1721 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1722 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1723 if (dcb_conf->pfc_en)
1724 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1726 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1728 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1729 sizeof(struct rte_eth_dcb_rx_conf)));
1730 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1731 sizeof(struct rte_eth_dcb_tx_conf)));
1738 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1740 struct rte_eth_conf port_conf;
1741 struct rte_port *rte_port;
1746 /* rxq and txq configuration in dcb mode */
1749 rx_free_thresh = 64;
1751 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1752 /* Enter DCB configuration status */
1755 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1756 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1757 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1761 rte_port = &ports[pid];
1762 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1764 rte_port->rx_conf.rx_thresh = rx_thresh;
1765 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1766 rte_port->tx_conf.tx_thresh = tx_thresh;
1767 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1768 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1770 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1771 for (i = 0; i < nb_vlan; i++){
1772 rx_vft_set(pid, vlan_tags[i], 1);
1775 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1776 map_port_queue_stats_mapping_registers(pid, rte_port);
1778 rte_port->dcb_flag = 1;
1783 #ifdef RTE_EXEC_ENV_BAREMETAL
1788 main(int argc, char** argv)
1793 diag = rte_eal_init(argc, argv);
1795 rte_panic("Cannot init EAL\n");
1797 if (rte_pmd_init_all())
1798 rte_panic("Cannot init PMD\n");
1800 if (rte_eal_pci_probe())
1801 rte_panic("Cannot probe PCI\n");
1803 nb_ports = (portid_t) rte_eth_dev_count();
1805 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1807 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1808 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1809 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1810 "configuration file\n");
1812 set_def_fwd_config();
1814 rte_panic("Empty set of forwarding logical cores - check the "
1815 "core mask supplied in the command parameters\n");
1820 launch_args_parse(argc, argv);
1822 if (nb_rxq > nb_txq)
1823 printf("Warning: nb_rxq=%d enables RSS configuration, "
1824 "but nb_txq=%d will prevent to fully test it.\n",
1828 if (start_port(RTE_PORT_ALL) != 0)
1829 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1831 /* set all ports to promiscuous mode by default */
1832 for (port_id = 0; port_id < nb_ports; port_id++)
1833 rte_eth_promiscuous_enable(port_id);
1835 #ifdef RTE_LIBRTE_CMDLINE
1836 if (interactive == 1) {
1838 printf("Start automatic packet forwarding\n");
1839 start_packet_forwarding(0);
1848 printf("No commandline core given, start packet forwarding\n");
1849 start_packet_forwarding(0);
1850 printf("Press enter to exit\n");
1851 rc = read(0, &c, 1);