4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
88 * NUMA support configuration.
89 * When set, the NUMA support attempts to dispatch the allocation of the
90 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91 * probed ports among the CPU sockets 0 and 1.
92 * Otherwise, all memory is allocated from CPU socket 0.
94 uint8_t numa_support = 0; /**< No numa support by default */
97 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100 uint8_t socket_num = UMA_NO_CONFIG;
103 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108 * Record the Ethernet address of peer target ports to which packets are
110 * Must be instanciated with the ethernet addresses of peer traffic generator
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
117 * Probed Target Environment.
119 struct rte_port *ports; /**< For all probed ethernet ports. */
120 portid_t nb_ports; /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
125 * Test Forwarding Configuration.
126 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t nb_cfg_ports; /**< Number of configured ports. */
132 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
141 * Forwarding engines.
143 struct fwd_engine * fwd_engines[] = {
149 #ifdef RTE_LIBRTE_IEEE1588
150 &ieee1588_fwd_engine,
155 struct fwd_config cur_fwd_config;
156 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
158 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
159 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
160 * specified on command-line. */
163 * Configuration of packet segments used by the "txonly" processing engine.
165 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
166 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
167 TXONLY_DEF_PACKET_LEN,
169 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
171 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
172 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
174 /* current configuration is in DCB or not,0 means it is not in DCB mode */
175 uint8_t dcb_config = 0;
177 /* Whether the dcb is in testing status */
178 uint8_t dcb_test = 0;
180 /* DCB on and VT on mapping is default */
181 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
184 * Configurable number of RX/TX queues.
186 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
187 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
190 * Configurable number of RX/TX ring descriptors.
192 #define RTE_TEST_RX_DESC_DEFAULT 128
193 #define RTE_TEST_TX_DESC_DEFAULT 512
194 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
195 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
198 * Configurable values of RX and TX ring threshold registers.
200 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
201 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
202 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
204 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
205 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
206 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
208 struct rte_eth_thresh rx_thresh = {
209 .pthresh = RX_PTHRESH,
210 .hthresh = RX_HTHRESH,
211 .wthresh = RX_WTHRESH,
214 struct rte_eth_thresh tx_thresh = {
215 .pthresh = TX_PTHRESH,
216 .hthresh = TX_HTHRESH,
217 .wthresh = TX_WTHRESH,
221 * Configurable value of RX free threshold.
223 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
226 * Configurable value of RX drop enable.
228 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
231 * Configurable value of TX free threshold.
233 uint16_t tx_free_thresh = 0; /* Use default values. */
236 * Configurable value of TX RS bit threshold.
238 uint16_t tx_rs_thresh = 0; /* Use default values. */
241 * Configurable value of TX queue flags.
243 uint32_t txq_flags = 0; /* No flags set. */
246 * Receive Side Scaling (RSS) configuration.
248 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
251 * Port topology configuration
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
256 * Avoids to flush all the RX streams before starts forwarding.
258 uint8_t no_flush_rx = 0; /* flush by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
289 .flexbytes_offset = 0x6,
293 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
296 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
299 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301 uint16_t nb_tx_queue_stats_mappings = 0;
302 uint16_t nb_rx_queue_stats_mappings = 0;
304 /* Forward function declarations */
305 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
306 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
309 * Check if all the ports are started.
310 * If yes, return positive value. If not, return zero.
312 static int all_ports_started(void);
315 * Setup default configuration.
318 set_default_fwd_lcores_config(void)
324 for (i = 0; i < RTE_MAX_LCORE; i++) {
325 if (! rte_lcore_is_enabled(i))
327 if (i == rte_get_master_lcore())
329 fwd_lcores_cpuids[nb_lc++] = i;
331 nb_lcores = (lcoreid_t) nb_lc;
332 nb_cfg_lcores = nb_lcores;
337 set_def_peer_eth_addrs(void)
341 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
342 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
343 peer_eth_addrs[i].addr_bytes[5] = i;
348 set_default_fwd_ports_config(void)
352 for (pt_id = 0; pt_id < nb_ports; pt_id++)
353 fwd_ports_ids[pt_id] = pt_id;
355 nb_cfg_ports = nb_ports;
356 nb_fwd_ports = nb_ports;
360 set_def_fwd_config(void)
362 set_default_fwd_lcores_config();
363 set_def_peer_eth_addrs();
364 set_default_fwd_ports_config();
368 * Configuration initialisation done once at init time.
370 struct mbuf_ctor_arg {
371 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
372 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
375 struct mbuf_pool_ctor_arg {
376 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
380 testpmd_mbuf_ctor(struct rte_mempool *mp,
383 __attribute__((unused)) unsigned i)
385 struct mbuf_ctor_arg *mb_ctor_arg;
388 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
389 mb = (struct rte_mbuf *) raw_mbuf;
391 mb->type = RTE_MBUF_PKT;
393 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
394 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
395 mb_ctor_arg->seg_buf_offset);
396 mb->buf_len = mb_ctor_arg->seg_buf_size;
397 mb->type = RTE_MBUF_PKT;
399 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
401 mb->pkt.vlan_macip.data = 0;
402 mb->pkt.hash.rss = 0;
406 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
409 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
410 struct rte_pktmbuf_pool_private *mbp_priv;
412 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
413 printf("%s(%s) private_data_size %d < %d\n",
414 __func__, mp->name, (int) mp->private_data_size,
415 (int) sizeof(struct rte_pktmbuf_pool_private));
418 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
419 mbp_priv = rte_mempool_get_priv(mp);
420 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
424 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
425 unsigned int socket_id)
427 char pool_name[RTE_MEMPOOL_NAMESIZE];
428 struct rte_mempool *rte_mp;
429 struct mbuf_pool_ctor_arg mbp_ctor_arg;
430 struct mbuf_ctor_arg mb_ctor_arg;
433 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
435 mb_ctor_arg.seg_buf_offset =
436 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
437 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
438 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
439 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
441 #ifdef RTE_LIBRTE_PMD_XENVIRT
442 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
443 (unsigned) mb_mempool_cache,
444 sizeof(struct rte_pktmbuf_pool_private),
445 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
446 testpmd_mbuf_ctor, &mb_ctor_arg,
453 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
454 (unsigned) mb_mempool_cache,
455 sizeof(struct rte_pktmbuf_pool_private),
456 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
457 testpmd_mbuf_ctor, &mb_ctor_arg,
460 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
461 (unsigned) mb_mempool_cache,
462 sizeof(struct rte_pktmbuf_pool_private),
463 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
464 testpmd_mbuf_ctor, &mb_ctor_arg,
469 if (rte_mp == NULL) {
470 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
471 "failed\n", socket_id);
472 } else if (verbose_level > 0) {
473 rte_mempool_dump(rte_mp);
481 struct rte_port *port;
482 struct rte_mempool *mbp;
483 unsigned int nb_mbuf_per_pool;
485 uint8_t port_per_socket[MAX_SOCKET];
487 memset(port_per_socket,0,MAX_SOCKET);
488 /* Configuration of logical cores. */
489 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
490 sizeof(struct fwd_lcore *) * nb_lcores,
492 if (fwd_lcores == NULL) {
493 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
494 "failed\n", nb_lcores);
496 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
497 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
498 sizeof(struct fwd_lcore),
500 if (fwd_lcores[lc_id] == NULL) {
501 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
504 fwd_lcores[lc_id]->cpuid_idx = lc_id;
508 * Create pools of mbuf.
509 * If NUMA support is disabled, create a single pool of mbuf in
510 * socket 0 memory by default.
511 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
513 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
514 * nb_txd can be configured at run time.
516 if (param_total_num_mbufs)
517 nb_mbuf_per_pool = param_total_num_mbufs;
519 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
520 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
523 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
527 if (socket_num == UMA_NO_CONFIG)
528 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
530 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
534 * Records which Mbuf pool to use by each logical core, if needed.
536 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
537 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
539 mbp = mbuf_pool_find(0);
540 fwd_lcores[lc_id]->mbp = mbp;
543 /* Configuration of Ethernet ports. */
544 ports = rte_zmalloc("testpmd: ports",
545 sizeof(struct rte_port) * nb_ports,
548 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
549 "failed\n", nb_ports);
552 for (pid = 0; pid < nb_ports; pid++) {
554 rte_eth_dev_info_get(pid, &port->dev_info);
557 if (port_numa[pid] != NUMA_NO_CONFIG)
558 port_per_socket[port_numa[pid]]++;
560 uint32_t socket_id = rte_eth_dev_socket_id(pid);
561 port_per_socket[socket_id]++;
565 /* set flag to initialize port/queue */
566 port->need_reconfig = 1;
567 port->need_reconfig_queues = 1;
572 unsigned int nb_mbuf;
574 if (param_total_num_mbufs)
575 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
577 for (i = 0; i < MAX_SOCKET; i++) {
578 nb_mbuf = (nb_mbuf_per_pool *
581 mbuf_pool_create(mbuf_data_size,
586 /* Configuration of packet forwarding streams. */
587 if (init_fwd_streams() < 0)
588 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
592 init_fwd_streams(void)
595 struct rte_port *port;
596 streamid_t sm_id, nb_fwd_streams_new;
598 /* set socket id according to numa or not */
599 for (pid = 0; pid < nb_ports; pid++) {
601 if (nb_rxq > port->dev_info.max_rx_queues) {
602 printf("Fail: nb_rxq(%d) is greater than "
603 "max_rx_queues(%d)\n", nb_rxq,
604 port->dev_info.max_rx_queues);
607 if (nb_txq > port->dev_info.max_tx_queues) {
608 printf("Fail: nb_txq(%d) is greater than "
609 "max_tx_queues(%d)\n", nb_txq,
610 port->dev_info.max_tx_queues);
614 port->socket_id = rte_eth_dev_socket_id(pid);
616 if (socket_num == UMA_NO_CONFIG)
619 port->socket_id = socket_num;
623 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
624 if (nb_fwd_streams_new == nb_fwd_streams)
627 if (fwd_streams != NULL) {
628 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
629 if (fwd_streams[sm_id] == NULL)
631 rte_free(fwd_streams[sm_id]);
632 fwd_streams[sm_id] = NULL;
634 rte_free(fwd_streams);
639 nb_fwd_streams = nb_fwd_streams_new;
640 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
641 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
642 if (fwd_streams == NULL)
643 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
644 "failed\n", nb_fwd_streams);
646 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
647 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
648 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
649 if (fwd_streams[sm_id] == NULL)
650 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
657 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
659 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
661 unsigned int total_burst;
662 unsigned int nb_burst;
663 unsigned int burst_stats[3];
664 uint16_t pktnb_stats[3];
666 int burst_percent[3];
669 * First compute the total number of packet bursts and the
670 * two highest numbers of bursts of the same number of packets.
673 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
674 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
675 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
676 nb_burst = pbs->pkt_burst_spread[nb_pkt];
679 total_burst += nb_burst;
680 if (nb_burst > burst_stats[0]) {
681 burst_stats[1] = burst_stats[0];
682 pktnb_stats[1] = pktnb_stats[0];
683 burst_stats[0] = nb_burst;
684 pktnb_stats[0] = nb_pkt;
687 if (total_burst == 0)
689 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
690 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
691 burst_percent[0], (int) pktnb_stats[0]);
692 if (burst_stats[0] == total_burst) {
696 if (burst_stats[0] + burst_stats[1] == total_burst) {
697 printf(" + %d%% of %d pkts]\n",
698 100 - burst_percent[0], pktnb_stats[1]);
701 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
702 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
703 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
704 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
707 printf(" + %d%% of %d pkts + %d%% of others]\n",
708 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
710 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
713 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
715 struct rte_port *port;
718 static const char *fwd_stats_border = "----------------------";
720 port = &ports[port_id];
721 printf("\n %s Forward statistics for port %-2d %s\n",
722 fwd_stats_border, port_id, fwd_stats_border);
724 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
725 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
727 stats->ipackets, stats->ierrors,
728 (uint64_t) (stats->ipackets + stats->ierrors));
730 if (cur_fwd_eng == &csum_fwd_engine)
731 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
732 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
734 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
736 stats->opackets, port->tx_dropped,
737 (uint64_t) (stats->opackets + port->tx_dropped));
739 if (stats->rx_nombuf > 0)
740 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
744 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
746 stats->ipackets, stats->ierrors,
747 (uint64_t) (stats->ipackets + stats->ierrors));
749 if (cur_fwd_eng == &csum_fwd_engine)
750 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
751 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
753 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
755 stats->opackets, port->tx_dropped,
756 (uint64_t) (stats->opackets + port->tx_dropped));
758 if (stats->rx_nombuf > 0)
759 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
761 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
763 pkt_burst_stats_display("RX",
764 &port->rx_stream->rx_burst_stats);
766 pkt_burst_stats_display("TX",
767 &port->tx_stream->tx_burst_stats);
770 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
771 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
775 if (port->rx_queue_stats_mapping_enabled) {
777 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
778 printf(" Stats reg %2d RX-packets:%14"PRIu64
779 " RX-errors:%14"PRIu64
780 " RX-bytes:%14"PRIu64"\n",
781 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
785 if (port->tx_queue_stats_mapping_enabled) {
786 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
787 printf(" Stats reg %2d TX-packets:%14"PRIu64
788 " TX-bytes:%14"PRIu64"\n",
789 i, stats->q_opackets[i], stats->q_obytes[i]);
793 printf(" %s--------------------------------%s\n",
794 fwd_stats_border, fwd_stats_border);
798 fwd_stream_stats_display(streamid_t stream_id)
800 struct fwd_stream *fs;
801 static const char *fwd_top_stats_border = "-------";
803 fs = fwd_streams[stream_id];
804 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
805 (fs->fwd_dropped == 0))
807 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
808 "TX Port=%2d/Queue=%2d %s\n",
809 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
810 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
811 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
812 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
814 /* if checksum mode */
815 if (cur_fwd_eng == &csum_fwd_engine) {
816 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
817 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
820 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
821 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
822 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
827 flush_fwd_rx_queues(void)
829 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
837 for (j = 0; j < 2; j++) {
838 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
839 for (rxq = 0; rxq < nb_rxq; rxq++) {
840 port_id = fwd_ports_ids[rxp];
842 nb_rx = rte_eth_rx_burst(port_id, rxq,
843 pkts_burst, MAX_PKT_BURST);
844 for (i = 0; i < nb_rx; i++)
845 rte_pktmbuf_free(pkts_burst[i]);
849 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
854 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
856 struct fwd_stream **fsm;
860 fsm = &fwd_streams[fc->stream_idx];
861 nb_fs = fc->stream_nb;
863 for (sm_id = 0; sm_id < nb_fs; sm_id++)
864 (*pkt_fwd)(fsm[sm_id]);
865 } while (! fc->stopped);
869 start_pkt_forward_on_core(void *fwd_arg)
871 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
872 cur_fwd_config.fwd_eng->packet_fwd);
877 * Run the TXONLY packet forwarding engine to send a single burst of packets.
878 * Used to start communication flows in network loopback test configurations.
881 run_one_txonly_burst_on_core(void *fwd_arg)
883 struct fwd_lcore *fwd_lc;
884 struct fwd_lcore tmp_lcore;
886 fwd_lc = (struct fwd_lcore *) fwd_arg;
888 tmp_lcore.stopped = 1;
889 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
894 * Launch packet forwarding:
895 * - Setup per-port forwarding context.
896 * - launch logical cores with their forwarding configuration.
899 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
901 port_fwd_begin_t port_fwd_begin;
906 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
907 if (port_fwd_begin != NULL) {
908 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
909 (*port_fwd_begin)(fwd_ports_ids[i]);
911 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
912 lc_id = fwd_lcores_cpuids[i];
913 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
914 fwd_lcores[i]->stopped = 0;
915 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
916 fwd_lcores[i], lc_id);
918 printf("launch lcore %u failed - diag=%d\n",
925 * Launch packet forwarding configuration.
928 start_packet_forwarding(int with_tx_first)
930 port_fwd_begin_t port_fwd_begin;
931 port_fwd_end_t port_fwd_end;
932 struct rte_port *port;
937 if (all_ports_started() == 0) {
938 printf("Not all ports were started\n");
941 if (test_done == 0) {
942 printf("Packet forwarding already started\n");
946 for (i = 0; i < nb_fwd_ports; i++) {
947 pt_id = fwd_ports_ids[i];
948 port = &ports[pt_id];
949 if (!port->dcb_flag) {
950 printf("In DCB mode, all forwarding ports must "
951 "be configured in this mode.\n");
955 if (nb_fwd_lcores == 1) {
956 printf("In DCB mode,the nb forwarding cores "
957 "should be larger than 1.\n");
964 flush_fwd_rx_queues();
967 rxtx_config_display();
969 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
970 pt_id = fwd_ports_ids[i];
971 port = &ports[pt_id];
972 rte_eth_stats_get(pt_id, &port->stats);
973 port->tx_dropped = 0;
975 map_port_queue_stats_mapping_registers(pt_id, port);
977 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
978 fwd_streams[sm_id]->rx_packets = 0;
979 fwd_streams[sm_id]->tx_packets = 0;
980 fwd_streams[sm_id]->fwd_dropped = 0;
981 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
982 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
984 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
985 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
986 sizeof(fwd_streams[sm_id]->rx_burst_stats));
987 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
988 sizeof(fwd_streams[sm_id]->tx_burst_stats));
990 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
991 fwd_streams[sm_id]->core_cycles = 0;
995 port_fwd_begin = tx_only_engine.port_fwd_begin;
996 if (port_fwd_begin != NULL) {
997 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
998 (*port_fwd_begin)(fwd_ports_ids[i]);
1000 launch_packet_forwarding(run_one_txonly_burst_on_core);
1001 rte_eal_mp_wait_lcore();
1002 port_fwd_end = tx_only_engine.port_fwd_end;
1003 if (port_fwd_end != NULL) {
1004 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1005 (*port_fwd_end)(fwd_ports_ids[i]);
1008 launch_packet_forwarding(start_pkt_forward_on_core);
1012 stop_packet_forwarding(void)
1014 struct rte_eth_stats stats;
1015 struct rte_port *port;
1016 port_fwd_end_t port_fwd_end;
1021 uint64_t total_recv;
1022 uint64_t total_xmit;
1023 uint64_t total_rx_dropped;
1024 uint64_t total_tx_dropped;
1025 uint64_t total_rx_nombuf;
1026 uint64_t tx_dropped;
1027 uint64_t rx_bad_ip_csum;
1028 uint64_t rx_bad_l4_csum;
1029 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1030 uint64_t fwd_cycles;
1032 static const char *acc_stats_border = "+++++++++++++++";
1034 if (all_ports_started() == 0) {
1035 printf("Not all ports were started\n");
1039 printf("Packet forwarding not started\n");
1042 printf("Telling cores to stop...");
1043 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1044 fwd_lcores[lc_id]->stopped = 1;
1045 printf("\nWaiting for lcores to finish...\n");
1046 rte_eal_mp_wait_lcore();
1047 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1048 if (port_fwd_end != NULL) {
1049 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1050 pt_id = fwd_ports_ids[i];
1051 (*port_fwd_end)(pt_id);
1054 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1057 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1058 if (cur_fwd_config.nb_fwd_streams >
1059 cur_fwd_config.nb_fwd_ports) {
1060 fwd_stream_stats_display(sm_id);
1061 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1062 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1064 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1066 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1069 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1070 tx_dropped = (uint64_t) (tx_dropped +
1071 fwd_streams[sm_id]->fwd_dropped);
1072 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1075 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1076 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1077 fwd_streams[sm_id]->rx_bad_ip_csum);
1078 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1082 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1083 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1084 fwd_streams[sm_id]->rx_bad_l4_csum);
1085 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1088 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1089 fwd_cycles = (uint64_t) (fwd_cycles +
1090 fwd_streams[sm_id]->core_cycles);
1095 total_rx_dropped = 0;
1096 total_tx_dropped = 0;
1097 total_rx_nombuf = 0;
1098 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1099 pt_id = fwd_ports_ids[i];
1101 port = &ports[pt_id];
1102 rte_eth_stats_get(pt_id, &stats);
1103 stats.ipackets -= port->stats.ipackets;
1104 port->stats.ipackets = 0;
1105 stats.opackets -= port->stats.opackets;
1106 port->stats.opackets = 0;
1107 stats.ibytes -= port->stats.ibytes;
1108 port->stats.ibytes = 0;
1109 stats.obytes -= port->stats.obytes;
1110 port->stats.obytes = 0;
1111 stats.ierrors -= port->stats.ierrors;
1112 port->stats.ierrors = 0;
1113 stats.oerrors -= port->stats.oerrors;
1114 port->stats.oerrors = 0;
1115 stats.rx_nombuf -= port->stats.rx_nombuf;
1116 port->stats.rx_nombuf = 0;
1117 stats.fdirmatch -= port->stats.fdirmatch;
1118 port->stats.rx_nombuf = 0;
1119 stats.fdirmiss -= port->stats.fdirmiss;
1120 port->stats.rx_nombuf = 0;
1122 total_recv += stats.ipackets;
1123 total_xmit += stats.opackets;
1124 total_rx_dropped += stats.ierrors;
1125 total_tx_dropped += port->tx_dropped;
1126 total_rx_nombuf += stats.rx_nombuf;
1128 fwd_port_stats_display(pt_id, &stats);
1130 printf("\n %s Accumulated forward statistics for all ports"
1132 acc_stats_border, acc_stats_border);
1133 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1135 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1137 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1138 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1139 if (total_rx_nombuf > 0)
1140 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1141 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1143 acc_stats_border, acc_stats_border);
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1146 printf("\n CPU cycles/packet=%u (total cycles="
1147 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1148 (unsigned int)(fwd_cycles / total_recv),
1149 fwd_cycles, total_recv);
1151 printf("\nDone.\n");
1156 all_ports_started(void)
1159 struct rte_port *port;
1161 for (pi = 0; pi < nb_ports; pi++) {
1163 /* Check if there is a port which is not started */
1164 if (port->port_status != RTE_PORT_STARTED)
1168 /* No port is not started */
1173 start_port(portid_t pid)
1175 int diag, need_check_link_status = 0;
1178 struct rte_port *port;
1180 if (test_done == 0) {
1181 printf("Please stop forwarding first\n");
1185 if (init_fwd_streams() < 0) {
1186 printf("Fail from init_fwd_streams()\n");
1192 for (pi = 0; pi < nb_ports; pi++) {
1193 if (pid < nb_ports && pid != pi)
1197 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1198 RTE_PORT_HANDLING) == 0) {
1199 printf("Port %d is now not stopped\n", pi);
1203 if (port->need_reconfig > 0) {
1204 port->need_reconfig = 0;
1206 printf("Configuring Port %d (socket %d)\n", pi,
1207 rte_eth_dev_socket_id(pi));
1208 /* configure port */
1209 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1212 if (rte_atomic16_cmpset(&(port->port_status),
1213 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1214 printf("Port %d can not be set back "
1215 "to stopped\n", pi);
1216 printf("Fail to configure port %d\n", pi);
1217 /* try to reconfigure port next time */
1218 port->need_reconfig = 1;
1222 if (port->need_reconfig_queues > 0) {
1223 port->need_reconfig_queues = 0;
1224 /* setup tx queues */
1225 for (qi = 0; qi < nb_txq; qi++) {
1226 if ((numa_support) &&
1227 (txring_numa[pi] != NUMA_NO_CONFIG))
1228 diag = rte_eth_tx_queue_setup(pi, qi,
1229 nb_txd,txring_numa[pi],
1232 diag = rte_eth_tx_queue_setup(pi, qi,
1233 nb_txd,port->socket_id,
1239 /* Fail to setup tx queue, return */
1240 if (rte_atomic16_cmpset(&(port->port_status),
1242 RTE_PORT_STOPPED) == 0)
1243 printf("Port %d can not be set back "
1244 "to stopped\n", pi);
1245 printf("Fail to configure port %d tx queues\n", pi);
1246 /* try to reconfigure queues next time */
1247 port->need_reconfig_queues = 1;
1250 /* setup rx queues */
1251 for (qi = 0; qi < nb_rxq; qi++) {
1252 if ((numa_support) &&
1253 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1254 struct rte_mempool * mp =
1255 mbuf_pool_find(rxring_numa[pi]);
1257 printf("Failed to setup RX queue:"
1258 "No mempool allocation"
1259 "on the socket %d\n",
1264 diag = rte_eth_rx_queue_setup(pi, qi,
1265 nb_rxd,rxring_numa[pi],
1266 &(port->rx_conf),mp);
1269 diag = rte_eth_rx_queue_setup(pi, qi,
1270 nb_rxd,port->socket_id,
1272 mbuf_pool_find(port->socket_id));
1278 /* Fail to setup rx queue, return */
1279 if (rte_atomic16_cmpset(&(port->port_status),
1281 RTE_PORT_STOPPED) == 0)
1282 printf("Port %d can not be set back "
1283 "to stopped\n", pi);
1284 printf("Fail to configure port %d rx queues\n", pi);
1285 /* try to reconfigure queues next time */
1286 port->need_reconfig_queues = 1;
1291 if (rte_eth_dev_start(pi) < 0) {
1292 printf("Fail to start port %d\n", pi);
1294 /* Fail to setup rx queue, return */
1295 if (rte_atomic16_cmpset(&(port->port_status),
1296 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1297 printf("Port %d can not be set back to "
1302 if (rte_atomic16_cmpset(&(port->port_status),
1303 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1304 printf("Port %d can not be set into started\n", pi);
1306 /* at least one port started, need checking link status */
1307 need_check_link_status = 1;
1310 if (need_check_link_status)
1311 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1313 printf("Please stop the ports first\n");
1320 stop_port(portid_t pid)
1323 struct rte_port *port;
1324 int need_check_link_status = 0;
1326 if (test_done == 0) {
1327 printf("Please stop forwarding first\n");
1334 printf("Stopping ports...\n");
1336 for (pi = 0; pi < nb_ports; pi++) {
1337 if (pid < nb_ports && pid != pi)
1341 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1342 RTE_PORT_HANDLING) == 0)
1345 rte_eth_dev_stop(pi);
1347 if (rte_atomic16_cmpset(&(port->port_status),
1348 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1349 printf("Port %d can not be set into stopped\n", pi);
1350 need_check_link_status = 1;
1352 if (need_check_link_status)
1353 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1359 close_port(portid_t pid)
1362 struct rte_port *port;
1364 if (test_done == 0) {
1365 printf("Please stop forwarding first\n");
1369 printf("Closing ports...\n");
1371 for (pi = 0; pi < nb_ports; pi++) {
1372 if (pid < nb_ports && pid != pi)
1376 if (rte_atomic16_cmpset(&(port->port_status),
1377 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1378 printf("Port %d is now not stopped\n", pi);
1382 rte_eth_dev_close(pi);
1384 if (rte_atomic16_cmpset(&(port->port_status),
1385 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1386 printf("Port %d can not be set into stopped\n", pi);
1393 all_ports_stopped(void)
1396 struct rte_port *port;
1398 for (pi = 0; pi < nb_ports; pi++) {
1400 if (port->port_status != RTE_PORT_STOPPED)
1412 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1413 printf("Stopping port %d...", pt_id);
1415 rte_eth_dev_close(pt_id);
1421 typedef void (*cmd_func_t)(void);
1422 struct pmd_test_command {
1423 const char *cmd_name;
1424 cmd_func_t cmd_func;
1427 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1429 /* Check the link status of all ports in up to 9s, and print them finally */
1431 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1433 #define CHECK_INTERVAL 100 /* 100ms */
1434 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1435 uint8_t portid, count, all_ports_up, print_flag = 0;
1436 struct rte_eth_link link;
1438 printf("Checking link statuses...\n");
1440 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1442 for (portid = 0; portid < port_num; portid++) {
1443 if ((port_mask & (1 << portid)) == 0)
1445 memset(&link, 0, sizeof(link));
1446 rte_eth_link_get_nowait(portid, &link);
1447 /* print link status if flag set */
1448 if (print_flag == 1) {
1449 if (link.link_status)
1450 printf("Port %d Link Up - speed %u "
1451 "Mbps - %s\n", (uint8_t)portid,
1452 (unsigned)link.link_speed,
1453 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1454 ("full-duplex") : ("half-duplex\n"));
1456 printf("Port %d Link Down\n",
1460 /* clear all_ports_up flag if any link down */
1461 if (link.link_status == 0) {
1466 /* after finally printing all link status, get out */
1467 if (print_flag == 1)
1470 if (all_ports_up == 0) {
1472 rte_delay_ms(CHECK_INTERVAL);
1475 /* set the print_flag if all ports up or timeout */
1476 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1483 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1487 uint8_t mapping_found = 0;
1489 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1490 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1491 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1492 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1493 tx_queue_stats_mappings[i].queue_id,
1494 tx_queue_stats_mappings[i].stats_counter_id);
1501 port->tx_queue_stats_mapping_enabled = 1;
1506 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1510 uint8_t mapping_found = 0;
1512 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1513 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1514 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1515 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1516 rx_queue_stats_mappings[i].queue_id,
1517 rx_queue_stats_mappings[i].stats_counter_id);
1524 port->rx_queue_stats_mapping_enabled = 1;
1529 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1533 diag = set_tx_queue_stats_mapping_registers(pi, port);
1535 if (diag == -ENOTSUP) {
1536 port->tx_queue_stats_mapping_enabled = 0;
1537 printf("TX queue stats mapping not supported port id=%d\n", pi);
1540 rte_exit(EXIT_FAILURE,
1541 "set_tx_queue_stats_mapping_registers "
1542 "failed for port id=%d diag=%d\n",
1546 diag = set_rx_queue_stats_mapping_registers(pi, port);
1548 if (diag == -ENOTSUP) {
1549 port->rx_queue_stats_mapping_enabled = 0;
1550 printf("RX queue stats mapping not supported port id=%d\n", pi);
1553 rte_exit(EXIT_FAILURE,
1554 "set_rx_queue_stats_mapping_registers "
1555 "failed for port id=%d diag=%d\n",
1561 init_port_config(void)
1564 struct rte_port *port;
1566 for (pid = 0; pid < nb_ports; pid++) {
1568 port->dev_conf.rxmode = rx_mode;
1569 port->dev_conf.fdir_conf = fdir_conf;
1571 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1572 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1574 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1575 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1577 port->rx_conf.rx_thresh = rx_thresh;
1578 port->rx_conf.rx_free_thresh = rx_free_thresh;
1579 port->rx_conf.rx_drop_en = rx_drop_en;
1580 port->tx_conf.tx_thresh = tx_thresh;
1581 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1582 port->tx_conf.tx_free_thresh = tx_free_thresh;
1583 port->tx_conf.txq_flags = txq_flags;
1585 rte_eth_macaddr_get(pid, &port->eth_addr);
1587 map_port_queue_stats_mapping_registers(pid, port);
1588 #ifdef RTE_NIC_BYPASS
1589 rte_eth_dev_bypass_init(pid);
1594 const uint16_t vlan_tags[] = {
1595 0, 1, 2, 3, 4, 5, 6, 7,
1596 8, 9, 10, 11, 12, 13, 14, 15,
1597 16, 17, 18, 19, 20, 21, 22, 23,
1598 24, 25, 26, 27, 28, 29, 30, 31
1602 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1607 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1608 * given above, and the number of traffic classes available for use.
1610 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1611 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1612 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1614 /* VMDQ+DCB RX and TX configrations */
1615 vmdq_rx_conf.enable_default_pool = 0;
1616 vmdq_rx_conf.default_pool = 0;
1617 vmdq_rx_conf.nb_queue_pools =
1618 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1619 vmdq_tx_conf.nb_queue_pools =
1620 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1622 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1623 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1624 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1625 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1627 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1628 vmdq_rx_conf.dcb_queue[i] = i;
1629 vmdq_tx_conf.dcb_queue[i] = i;
1632 /*set DCB mode of RX and TX of multiple queues*/
1633 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1634 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1635 if (dcb_conf->pfc_en)
1636 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1638 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1640 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1641 sizeof(struct rte_eth_vmdq_dcb_conf)));
1642 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1643 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1646 struct rte_eth_dcb_rx_conf rx_conf;
1647 struct rte_eth_dcb_tx_conf tx_conf;
1649 /* queue mapping configuration of DCB RX and TX */
1650 if (dcb_conf->num_tcs == ETH_4_TCS)
1651 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1653 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1655 rx_conf.nb_tcs = dcb_conf->num_tcs;
1656 tx_conf.nb_tcs = dcb_conf->num_tcs;
1658 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1659 rx_conf.dcb_queue[i] = i;
1660 tx_conf.dcb_queue[i] = i;
1662 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1663 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1664 if (dcb_conf->pfc_en)
1665 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1667 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1669 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1670 sizeof(struct rte_eth_dcb_rx_conf)));
1671 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1672 sizeof(struct rte_eth_dcb_tx_conf)));
1679 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1681 struct rte_eth_conf port_conf;
1682 struct rte_port *rte_port;
1687 /* rxq and txq configuration in dcb mode */
1690 rx_free_thresh = 64;
1692 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1693 /* Enter DCB configuration status */
1696 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1697 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1698 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1702 rte_port = &ports[pid];
1703 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1705 rte_port->rx_conf.rx_thresh = rx_thresh;
1706 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1707 rte_port->tx_conf.tx_thresh = tx_thresh;
1708 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1709 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1711 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1712 for (i = 0; i < nb_vlan; i++){
1713 rx_vft_set(pid, vlan_tags[i], 1);
1716 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1717 map_port_queue_stats_mapping_registers(pid, rte_port);
1719 rte_port->dcb_flag = 1;
1724 #ifdef RTE_EXEC_ENV_BAREMETAL
1729 main(int argc, char** argv)
1734 diag = rte_eal_init(argc, argv);
1736 rte_panic("Cannot init EAL\n");
1738 if (rte_pmd_init_all())
1739 rte_panic("Cannot init PMD\n");
1741 if (rte_eal_pci_probe())
1742 rte_panic("Cannot probe PCI\n");
1744 nb_ports = (portid_t) rte_eth_dev_count();
1746 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1748 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1749 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1750 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1751 "configuration file\n");
1753 set_def_fwd_config();
1755 rte_panic("Empty set of forwarding logical cores - check the "
1756 "core mask supplied in the command parameters\n");
1761 launch_args_parse(argc, argv);
1763 if (nb_rxq > nb_txq)
1764 printf("Warning: nb_rxq=%d enables RSS configuration, "
1765 "but nb_txq=%d will prevent to fully test it.\n",
1769 if (start_port(RTE_PORT_ALL) != 0)
1770 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1772 /* set all ports to promiscuous mode by default */
1773 for (port_id = 0; port_id < nb_ports; port_id++)
1774 rte_eth_promiscuous_enable(port_id);
1776 if (interactive == 1)
1782 printf("No commandline core given, start packet forwarding\n");
1783 start_packet_forwarding(0);
1784 printf("Press enter to exit\n");
1785 rc = read(0, &c, 1);