4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
88 * NUMA support configuration.
89 * When set, the NUMA support attempts to dispatch the allocation of the
90 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91 * probed ports among the CPU sockets 0 and 1.
92 * Otherwise, all memory is allocated from CPU socket 0.
94 uint8_t numa_support = 0; /**< No numa support by default */
97 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100 uint8_t socket_num = UMA_NO_CONFIG;
103 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108 * Record the Ethernet address of peer target ports to which packets are
110 * Must be instanciated with the ethernet addresses of peer traffic generator
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
117 * Probed Target Environment.
119 struct rte_port *ports; /**< For all probed ethernet ports. */
120 portid_t nb_ports; /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
125 * Test Forwarding Configuration.
126 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t nb_cfg_ports; /**< Number of configured ports. */
132 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
141 * Forwarding engines.
143 struct fwd_engine * fwd_engines[] = {
146 &mac_retry_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151 &ieee1588_fwd_engine,
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
161 * specified on command-line. */
164 * Configuration of packet segments used by the "txonly" processing engine.
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168 TXONLY_DEF_PACKET_LEN,
170 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
185 * Configurable number of RX/TX queues.
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 * Configurable number of RX/TX ring descriptors.
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 * Configurable values of RX and TX ring threshold registers.
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
209 struct rte_eth_thresh rx_thresh = {
210 .pthresh = RX_PTHRESH,
211 .hthresh = RX_HTHRESH,
212 .wthresh = RX_WTHRESH,
215 struct rte_eth_thresh tx_thresh = {
216 .pthresh = TX_PTHRESH,
217 .hthresh = TX_HTHRESH,
218 .wthresh = TX_WTHRESH,
222 * Configurable value of RX free threshold.
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
227 * Configurable value of RX drop enable.
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
232 * Configurable value of TX free threshold.
234 uint16_t tx_free_thresh = 0; /* Use default values. */
237 * Configurable value of TX RS bit threshold.
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
242 * Configurable value of TX queue flags.
244 uint32_t txq_flags = 0; /* No flags set. */
247 * Receive Side Scaling (RSS) configuration.
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
252 * Port topology configuration
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
257 * Avoids to flush all the RX streams before starts forwarding.
259 uint8_t no_flush_rx = 0; /* flush by default */
262 * NIC bypass mode configuration options.
264 #ifdef RTE_NIC_BYPASS
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 * Ethernet device configuration.
274 struct rte_eth_rxmode rx_mode = {
275 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 .header_split = 0, /**< Header Split disabled. */
278 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
281 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
283 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
286 struct rte_fdir_conf fdir_conf = {
287 .mode = RTE_FDIR_MODE_NONE,
288 .pballoc = RTE_FDIR_PBALLOC_64K,
289 .status = RTE_FDIR_REPORT_STATUS,
290 .flexbytes_offset = 0x6,
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
310 * Check if all the ports are started.
311 * If yes, return positive value. If not, return zero.
313 static int all_ports_started(void);
316 * Setup default configuration.
319 set_default_fwd_lcores_config(void)
325 for (i = 0; i < RTE_MAX_LCORE; i++) {
326 if (! rte_lcore_is_enabled(i))
328 if (i == rte_get_master_lcore())
330 fwd_lcores_cpuids[nb_lc++] = i;
332 nb_lcores = (lcoreid_t) nb_lc;
333 nb_cfg_lcores = nb_lcores;
338 set_def_peer_eth_addrs(void)
342 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344 peer_eth_addrs[i].addr_bytes[5] = i;
349 set_default_fwd_ports_config(void)
353 for (pt_id = 0; pt_id < nb_ports; pt_id++)
354 fwd_ports_ids[pt_id] = pt_id;
356 nb_cfg_ports = nb_ports;
357 nb_fwd_ports = nb_ports;
361 set_def_fwd_config(void)
363 set_default_fwd_lcores_config();
364 set_def_peer_eth_addrs();
365 set_default_fwd_ports_config();
369 * Configuration initialisation done once at init time.
371 struct mbuf_ctor_arg {
372 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
376 struct mbuf_pool_ctor_arg {
377 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
384 __attribute__((unused)) unsigned i)
386 struct mbuf_ctor_arg *mb_ctor_arg;
389 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390 mb = (struct rte_mbuf *) raw_mbuf;
392 mb->type = RTE_MBUF_PKT;
394 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396 mb_ctor_arg->seg_buf_offset);
397 mb->buf_len = mb_ctor_arg->seg_buf_size;
398 mb->type = RTE_MBUF_PKT;
400 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
402 mb->pkt.vlan_macip.data = 0;
403 mb->pkt.hash.rss = 0;
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
410 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
411 struct rte_pktmbuf_pool_private *mbp_priv;
413 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414 printf("%s(%s) private_data_size %d < %d\n",
415 __func__, mp->name, (int) mp->private_data_size,
416 (int) sizeof(struct rte_pktmbuf_pool_private));
419 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420 mbp_priv = rte_mempool_get_priv(mp);
421 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426 unsigned int socket_id)
428 char pool_name[RTE_MEMPOOL_NAMESIZE];
429 struct rte_mempool *rte_mp;
430 struct mbuf_pool_ctor_arg mbp_ctor_arg;
431 struct mbuf_ctor_arg mb_ctor_arg;
434 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
436 mb_ctor_arg.seg_buf_offset =
437 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444 (unsigned) mb_mempool_cache,
445 sizeof(struct rte_pktmbuf_pool_private),
446 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447 testpmd_mbuf_ctor, &mb_ctor_arg,
454 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455 (unsigned) mb_mempool_cache,
456 sizeof(struct rte_pktmbuf_pool_private),
457 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458 testpmd_mbuf_ctor, &mb_ctor_arg,
461 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462 (unsigned) mb_mempool_cache,
463 sizeof(struct rte_pktmbuf_pool_private),
464 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 testpmd_mbuf_ctor, &mb_ctor_arg,
470 if (rte_mp == NULL) {
471 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472 "failed\n", socket_id);
473 } else if (verbose_level > 0) {
474 rte_mempool_dump(rte_mp);
482 struct rte_port *port;
483 struct rte_mempool *mbp;
484 unsigned int nb_mbuf_per_pool;
486 uint8_t port_per_socket[MAX_SOCKET];
488 memset(port_per_socket,0,MAX_SOCKET);
489 /* Configuration of logical cores. */
490 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
491 sizeof(struct fwd_lcore *) * nb_lcores,
493 if (fwd_lcores == NULL) {
494 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
495 "failed\n", nb_lcores);
497 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
498 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
499 sizeof(struct fwd_lcore),
501 if (fwd_lcores[lc_id] == NULL) {
502 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
505 fwd_lcores[lc_id]->cpuid_idx = lc_id;
509 * Create pools of mbuf.
510 * If NUMA support is disabled, create a single pool of mbuf in
511 * socket 0 memory by default.
512 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
514 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
515 * nb_txd can be configured at run time.
517 if (param_total_num_mbufs)
518 nb_mbuf_per_pool = param_total_num_mbufs;
520 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
521 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
524 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
528 if (socket_num == UMA_NO_CONFIG)
529 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
531 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
535 * Records which Mbuf pool to use by each logical core, if needed.
537 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
538 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
540 mbp = mbuf_pool_find(0);
541 fwd_lcores[lc_id]->mbp = mbp;
544 /* Configuration of Ethernet ports. */
545 ports = rte_zmalloc("testpmd: ports",
546 sizeof(struct rte_port) * nb_ports,
549 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
550 "failed\n", nb_ports);
553 for (pid = 0; pid < nb_ports; pid++) {
555 rte_eth_dev_info_get(pid, &port->dev_info);
558 if (port_numa[pid] != NUMA_NO_CONFIG)
559 port_per_socket[port_numa[pid]]++;
561 uint32_t socket_id = rte_eth_dev_socket_id(pid);
562 port_per_socket[socket_id]++;
566 /* set flag to initialize port/queue */
567 port->need_reconfig = 1;
568 port->need_reconfig_queues = 1;
573 unsigned int nb_mbuf;
575 if (param_total_num_mbufs)
576 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
578 for (i = 0; i < MAX_SOCKET; i++) {
579 nb_mbuf = (nb_mbuf_per_pool *
582 mbuf_pool_create(mbuf_data_size,
587 /* Configuration of packet forwarding streams. */
588 if (init_fwd_streams() < 0)
589 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
593 init_fwd_streams(void)
596 struct rte_port *port;
597 streamid_t sm_id, nb_fwd_streams_new;
599 /* set socket id according to numa or not */
600 for (pid = 0; pid < nb_ports; pid++) {
602 if (nb_rxq > port->dev_info.max_rx_queues) {
603 printf("Fail: nb_rxq(%d) is greater than "
604 "max_rx_queues(%d)\n", nb_rxq,
605 port->dev_info.max_rx_queues);
608 if (nb_txq > port->dev_info.max_tx_queues) {
609 printf("Fail: nb_txq(%d) is greater than "
610 "max_tx_queues(%d)\n", nb_txq,
611 port->dev_info.max_tx_queues);
615 port->socket_id = rte_eth_dev_socket_id(pid);
617 if (socket_num == UMA_NO_CONFIG)
620 port->socket_id = socket_num;
624 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
625 if (nb_fwd_streams_new == nb_fwd_streams)
628 if (fwd_streams != NULL) {
629 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
630 if (fwd_streams[sm_id] == NULL)
632 rte_free(fwd_streams[sm_id]);
633 fwd_streams[sm_id] = NULL;
635 rte_free(fwd_streams);
640 nb_fwd_streams = nb_fwd_streams_new;
641 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
642 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
643 if (fwd_streams == NULL)
644 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
645 "failed\n", nb_fwd_streams);
647 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
648 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
649 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
650 if (fwd_streams[sm_id] == NULL)
651 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
658 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
660 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
662 unsigned int total_burst;
663 unsigned int nb_burst;
664 unsigned int burst_stats[3];
665 uint16_t pktnb_stats[3];
667 int burst_percent[3];
670 * First compute the total number of packet bursts and the
671 * two highest numbers of bursts of the same number of packets.
674 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
675 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
676 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
677 nb_burst = pbs->pkt_burst_spread[nb_pkt];
680 total_burst += nb_burst;
681 if (nb_burst > burst_stats[0]) {
682 burst_stats[1] = burst_stats[0];
683 pktnb_stats[1] = pktnb_stats[0];
684 burst_stats[0] = nb_burst;
685 pktnb_stats[0] = nb_pkt;
688 if (total_burst == 0)
690 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
691 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
692 burst_percent[0], (int) pktnb_stats[0]);
693 if (burst_stats[0] == total_burst) {
697 if (burst_stats[0] + burst_stats[1] == total_burst) {
698 printf(" + %d%% of %d pkts]\n",
699 100 - burst_percent[0], pktnb_stats[1]);
702 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
703 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
704 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
705 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
708 printf(" + %d%% of %d pkts + %d%% of others]\n",
709 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
711 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
714 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
716 struct rte_port *port;
719 static const char *fwd_stats_border = "----------------------";
721 port = &ports[port_id];
722 printf("\n %s Forward statistics for port %-2d %s\n",
723 fwd_stats_border, port_id, fwd_stats_border);
725 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
726 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
728 stats->ipackets, stats->ierrors,
729 (uint64_t) (stats->ipackets + stats->ierrors));
731 if (cur_fwd_eng == &csum_fwd_engine)
732 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
733 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
735 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
737 stats->opackets, port->tx_dropped,
738 (uint64_t) (stats->opackets + port->tx_dropped));
740 if (stats->rx_nombuf > 0)
741 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
745 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
747 stats->ipackets, stats->ierrors,
748 (uint64_t) (stats->ipackets + stats->ierrors));
750 if (cur_fwd_eng == &csum_fwd_engine)
751 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
752 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
754 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
756 stats->opackets, port->tx_dropped,
757 (uint64_t) (stats->opackets + port->tx_dropped));
759 if (stats->rx_nombuf > 0)
760 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
763 /* Display statistics of XON/XOFF pause frames, if any. */
764 if ((stats->tx_pause_xon | stats->rx_pause_xon |
765 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
766 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
767 stats->rx_pause_xoff, stats->rx_pause_xon);
768 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
769 stats->tx_pause_xoff, stats->tx_pause_xon);
772 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
774 pkt_burst_stats_display("RX",
775 &port->rx_stream->rx_burst_stats);
777 pkt_burst_stats_display("TX",
778 &port->tx_stream->tx_burst_stats);
781 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
782 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
786 if (port->rx_queue_stats_mapping_enabled) {
788 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
789 printf(" Stats reg %2d RX-packets:%14"PRIu64
790 " RX-errors:%14"PRIu64
791 " RX-bytes:%14"PRIu64"\n",
792 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
796 if (port->tx_queue_stats_mapping_enabled) {
797 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
798 printf(" Stats reg %2d TX-packets:%14"PRIu64
799 " TX-bytes:%14"PRIu64"\n",
800 i, stats->q_opackets[i], stats->q_obytes[i]);
804 printf(" %s--------------------------------%s\n",
805 fwd_stats_border, fwd_stats_border);
809 fwd_stream_stats_display(streamid_t stream_id)
811 struct fwd_stream *fs;
812 static const char *fwd_top_stats_border = "-------";
814 fs = fwd_streams[stream_id];
815 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
816 (fs->fwd_dropped == 0))
818 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
819 "TX Port=%2d/Queue=%2d %s\n",
820 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
821 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
822 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
823 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
825 /* if checksum mode */
826 if (cur_fwd_eng == &csum_fwd_engine) {
827 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
828 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
831 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
832 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
833 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
838 flush_fwd_rx_queues(void)
840 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
848 for (j = 0; j < 2; j++) {
849 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
850 for (rxq = 0; rxq < nb_rxq; rxq++) {
851 port_id = fwd_ports_ids[rxp];
853 nb_rx = rte_eth_rx_burst(port_id, rxq,
854 pkts_burst, MAX_PKT_BURST);
855 for (i = 0; i < nb_rx; i++)
856 rte_pktmbuf_free(pkts_burst[i]);
860 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
865 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
867 struct fwd_stream **fsm;
871 fsm = &fwd_streams[fc->stream_idx];
872 nb_fs = fc->stream_nb;
874 for (sm_id = 0; sm_id < nb_fs; sm_id++)
875 (*pkt_fwd)(fsm[sm_id]);
876 } while (! fc->stopped);
880 start_pkt_forward_on_core(void *fwd_arg)
882 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
883 cur_fwd_config.fwd_eng->packet_fwd);
888 * Run the TXONLY packet forwarding engine to send a single burst of packets.
889 * Used to start communication flows in network loopback test configurations.
892 run_one_txonly_burst_on_core(void *fwd_arg)
894 struct fwd_lcore *fwd_lc;
895 struct fwd_lcore tmp_lcore;
897 fwd_lc = (struct fwd_lcore *) fwd_arg;
899 tmp_lcore.stopped = 1;
900 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
905 * Launch packet forwarding:
906 * - Setup per-port forwarding context.
907 * - launch logical cores with their forwarding configuration.
910 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
912 port_fwd_begin_t port_fwd_begin;
917 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
918 if (port_fwd_begin != NULL) {
919 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
920 (*port_fwd_begin)(fwd_ports_ids[i]);
922 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
923 lc_id = fwd_lcores_cpuids[i];
924 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
925 fwd_lcores[i]->stopped = 0;
926 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
927 fwd_lcores[i], lc_id);
929 printf("launch lcore %u failed - diag=%d\n",
936 * Launch packet forwarding configuration.
939 start_packet_forwarding(int with_tx_first)
941 port_fwd_begin_t port_fwd_begin;
942 port_fwd_end_t port_fwd_end;
943 struct rte_port *port;
948 if (all_ports_started() == 0) {
949 printf("Not all ports were started\n");
952 if (test_done == 0) {
953 printf("Packet forwarding already started\n");
957 for (i = 0; i < nb_fwd_ports; i++) {
958 pt_id = fwd_ports_ids[i];
959 port = &ports[pt_id];
960 if (!port->dcb_flag) {
961 printf("In DCB mode, all forwarding ports must "
962 "be configured in this mode.\n");
966 if (nb_fwd_lcores == 1) {
967 printf("In DCB mode,the nb forwarding cores "
968 "should be larger than 1.\n");
975 flush_fwd_rx_queues();
978 rxtx_config_display();
980 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
981 pt_id = fwd_ports_ids[i];
982 port = &ports[pt_id];
983 rte_eth_stats_get(pt_id, &port->stats);
984 port->tx_dropped = 0;
986 map_port_queue_stats_mapping_registers(pt_id, port);
988 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
989 fwd_streams[sm_id]->rx_packets = 0;
990 fwd_streams[sm_id]->tx_packets = 0;
991 fwd_streams[sm_id]->fwd_dropped = 0;
992 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
993 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
995 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
996 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
997 sizeof(fwd_streams[sm_id]->rx_burst_stats));
998 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
999 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1001 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1002 fwd_streams[sm_id]->core_cycles = 0;
1005 if (with_tx_first) {
1006 port_fwd_begin = tx_only_engine.port_fwd_begin;
1007 if (port_fwd_begin != NULL) {
1008 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1009 (*port_fwd_begin)(fwd_ports_ids[i]);
1011 launch_packet_forwarding(run_one_txonly_burst_on_core);
1012 rte_eal_mp_wait_lcore();
1013 port_fwd_end = tx_only_engine.port_fwd_end;
1014 if (port_fwd_end != NULL) {
1015 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1016 (*port_fwd_end)(fwd_ports_ids[i]);
1019 launch_packet_forwarding(start_pkt_forward_on_core);
1023 stop_packet_forwarding(void)
1025 struct rte_eth_stats stats;
1026 struct rte_port *port;
1027 port_fwd_end_t port_fwd_end;
1032 uint64_t total_recv;
1033 uint64_t total_xmit;
1034 uint64_t total_rx_dropped;
1035 uint64_t total_tx_dropped;
1036 uint64_t total_rx_nombuf;
1037 uint64_t tx_dropped;
1038 uint64_t rx_bad_ip_csum;
1039 uint64_t rx_bad_l4_csum;
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041 uint64_t fwd_cycles;
1043 static const char *acc_stats_border = "+++++++++++++++";
1045 if (all_ports_started() == 0) {
1046 printf("Not all ports were started\n");
1050 printf("Packet forwarding not started\n");
1053 printf("Telling cores to stop...");
1054 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1055 fwd_lcores[lc_id]->stopped = 1;
1056 printf("\nWaiting for lcores to finish...\n");
1057 rte_eal_mp_wait_lcore();
1058 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1059 if (port_fwd_end != NULL) {
1060 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1061 pt_id = fwd_ports_ids[i];
1062 (*port_fwd_end)(pt_id);
1065 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1068 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1069 if (cur_fwd_config.nb_fwd_streams >
1070 cur_fwd_config.nb_fwd_ports) {
1071 fwd_stream_stats_display(sm_id);
1072 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1073 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1075 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1077 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1080 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1081 tx_dropped = (uint64_t) (tx_dropped +
1082 fwd_streams[sm_id]->fwd_dropped);
1083 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1086 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1087 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1088 fwd_streams[sm_id]->rx_bad_ip_csum);
1089 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1093 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1094 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1095 fwd_streams[sm_id]->rx_bad_l4_csum);
1096 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1099 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1100 fwd_cycles = (uint64_t) (fwd_cycles +
1101 fwd_streams[sm_id]->core_cycles);
1106 total_rx_dropped = 0;
1107 total_tx_dropped = 0;
1108 total_rx_nombuf = 0;
1109 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1110 pt_id = fwd_ports_ids[i];
1112 port = &ports[pt_id];
1113 rte_eth_stats_get(pt_id, &stats);
1114 stats.ipackets -= port->stats.ipackets;
1115 port->stats.ipackets = 0;
1116 stats.opackets -= port->stats.opackets;
1117 port->stats.opackets = 0;
1118 stats.ibytes -= port->stats.ibytes;
1119 port->stats.ibytes = 0;
1120 stats.obytes -= port->stats.obytes;
1121 port->stats.obytes = 0;
1122 stats.ierrors -= port->stats.ierrors;
1123 port->stats.ierrors = 0;
1124 stats.oerrors -= port->stats.oerrors;
1125 port->stats.oerrors = 0;
1126 stats.rx_nombuf -= port->stats.rx_nombuf;
1127 port->stats.rx_nombuf = 0;
1128 stats.fdirmatch -= port->stats.fdirmatch;
1129 port->stats.rx_nombuf = 0;
1130 stats.fdirmiss -= port->stats.fdirmiss;
1131 port->stats.rx_nombuf = 0;
1133 total_recv += stats.ipackets;
1134 total_xmit += stats.opackets;
1135 total_rx_dropped += stats.ierrors;
1136 total_tx_dropped += port->tx_dropped;
1137 total_rx_nombuf += stats.rx_nombuf;
1139 fwd_port_stats_display(pt_id, &stats);
1141 printf("\n %s Accumulated forward statistics for all ports"
1143 acc_stats_border, acc_stats_border);
1144 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1146 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1148 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1149 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1150 if (total_rx_nombuf > 0)
1151 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1152 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1154 acc_stats_border, acc_stats_border);
1155 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1157 printf("\n CPU cycles/packet=%u (total cycles="
1158 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1159 (unsigned int)(fwd_cycles / total_recv),
1160 fwd_cycles, total_recv);
1162 printf("\nDone.\n");
1167 all_ports_started(void)
1170 struct rte_port *port;
1172 for (pi = 0; pi < nb_ports; pi++) {
1174 /* Check if there is a port which is not started */
1175 if (port->port_status != RTE_PORT_STARTED)
1179 /* No port is not started */
1184 start_port(portid_t pid)
1186 int diag, need_check_link_status = 0;
1189 struct rte_port *port;
1191 if (test_done == 0) {
1192 printf("Please stop forwarding first\n");
1196 if (init_fwd_streams() < 0) {
1197 printf("Fail from init_fwd_streams()\n");
1203 for (pi = 0; pi < nb_ports; pi++) {
1204 if (pid < nb_ports && pid != pi)
1208 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1209 RTE_PORT_HANDLING) == 0) {
1210 printf("Port %d is now not stopped\n", pi);
1214 if (port->need_reconfig > 0) {
1215 port->need_reconfig = 0;
1217 printf("Configuring Port %d (socket %d)\n", pi,
1218 rte_eth_dev_socket_id(pi));
1219 /* configure port */
1220 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1223 if (rte_atomic16_cmpset(&(port->port_status),
1224 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1225 printf("Port %d can not be set back "
1226 "to stopped\n", pi);
1227 printf("Fail to configure port %d\n", pi);
1228 /* try to reconfigure port next time */
1229 port->need_reconfig = 1;
1233 if (port->need_reconfig_queues > 0) {
1234 port->need_reconfig_queues = 0;
1235 /* setup tx queues */
1236 for (qi = 0; qi < nb_txq; qi++) {
1237 if ((numa_support) &&
1238 (txring_numa[pi] != NUMA_NO_CONFIG))
1239 diag = rte_eth_tx_queue_setup(pi, qi,
1240 nb_txd,txring_numa[pi],
1243 diag = rte_eth_tx_queue_setup(pi, qi,
1244 nb_txd,port->socket_id,
1250 /* Fail to setup tx queue, return */
1251 if (rte_atomic16_cmpset(&(port->port_status),
1253 RTE_PORT_STOPPED) == 0)
1254 printf("Port %d can not be set back "
1255 "to stopped\n", pi);
1256 printf("Fail to configure port %d tx queues\n", pi);
1257 /* try to reconfigure queues next time */
1258 port->need_reconfig_queues = 1;
1261 /* setup rx queues */
1262 for (qi = 0; qi < nb_rxq; qi++) {
1263 if ((numa_support) &&
1264 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1265 struct rte_mempool * mp =
1266 mbuf_pool_find(rxring_numa[pi]);
1268 printf("Failed to setup RX queue:"
1269 "No mempool allocation"
1270 "on the socket %d\n",
1275 diag = rte_eth_rx_queue_setup(pi, qi,
1276 nb_rxd,rxring_numa[pi],
1277 &(port->rx_conf),mp);
1280 diag = rte_eth_rx_queue_setup(pi, qi,
1281 nb_rxd,port->socket_id,
1283 mbuf_pool_find(port->socket_id));
1289 /* Fail to setup rx queue, return */
1290 if (rte_atomic16_cmpset(&(port->port_status),
1292 RTE_PORT_STOPPED) == 0)
1293 printf("Port %d can not be set back "
1294 "to stopped\n", pi);
1295 printf("Fail to configure port %d rx queues\n", pi);
1296 /* try to reconfigure queues next time */
1297 port->need_reconfig_queues = 1;
1302 if (rte_eth_dev_start(pi) < 0) {
1303 printf("Fail to start port %d\n", pi);
1305 /* Fail to setup rx queue, return */
1306 if (rte_atomic16_cmpset(&(port->port_status),
1307 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1308 printf("Port %d can not be set back to "
1313 if (rte_atomic16_cmpset(&(port->port_status),
1314 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1315 printf("Port %d can not be set into started\n", pi);
1317 /* at least one port started, need checking link status */
1318 need_check_link_status = 1;
1321 if (need_check_link_status)
1322 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1324 printf("Please stop the ports first\n");
1331 stop_port(portid_t pid)
1334 struct rte_port *port;
1335 int need_check_link_status = 0;
1337 if (test_done == 0) {
1338 printf("Please stop forwarding first\n");
1345 printf("Stopping ports...\n");
1347 for (pi = 0; pi < nb_ports; pi++) {
1348 if (pid < nb_ports && pid != pi)
1352 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1353 RTE_PORT_HANDLING) == 0)
1356 rte_eth_dev_stop(pi);
1358 if (rte_atomic16_cmpset(&(port->port_status),
1359 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1360 printf("Port %d can not be set into stopped\n", pi);
1361 need_check_link_status = 1;
1363 if (need_check_link_status)
1364 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1370 close_port(portid_t pid)
1373 struct rte_port *port;
1375 if (test_done == 0) {
1376 printf("Please stop forwarding first\n");
1380 printf("Closing ports...\n");
1382 for (pi = 0; pi < nb_ports; pi++) {
1383 if (pid < nb_ports && pid != pi)
1387 if (rte_atomic16_cmpset(&(port->port_status),
1388 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1389 printf("Port %d is now not stopped\n", pi);
1393 rte_eth_dev_close(pi);
1395 if (rte_atomic16_cmpset(&(port->port_status),
1396 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1397 printf("Port %d can not be set into stopped\n", pi);
1404 all_ports_stopped(void)
1407 struct rte_port *port;
1409 for (pi = 0; pi < nb_ports; pi++) {
1411 if (port->port_status != RTE_PORT_STOPPED)
1423 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1424 printf("Stopping port %d...", pt_id);
1426 rte_eth_dev_close(pt_id);
1432 typedef void (*cmd_func_t)(void);
1433 struct pmd_test_command {
1434 const char *cmd_name;
1435 cmd_func_t cmd_func;
1438 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1440 /* Check the link status of all ports in up to 9s, and print them finally */
1442 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1444 #define CHECK_INTERVAL 100 /* 100ms */
1445 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1446 uint8_t portid, count, all_ports_up, print_flag = 0;
1447 struct rte_eth_link link;
1449 printf("Checking link statuses...\n");
1451 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1453 for (portid = 0; portid < port_num; portid++) {
1454 if ((port_mask & (1 << portid)) == 0)
1456 memset(&link, 0, sizeof(link));
1457 rte_eth_link_get_nowait(portid, &link);
1458 /* print link status if flag set */
1459 if (print_flag == 1) {
1460 if (link.link_status)
1461 printf("Port %d Link Up - speed %u "
1462 "Mbps - %s\n", (uint8_t)portid,
1463 (unsigned)link.link_speed,
1464 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1465 ("full-duplex") : ("half-duplex\n"));
1467 printf("Port %d Link Down\n",
1471 /* clear all_ports_up flag if any link down */
1472 if (link.link_status == 0) {
1477 /* after finally printing all link status, get out */
1478 if (print_flag == 1)
1481 if (all_ports_up == 0) {
1483 rte_delay_ms(CHECK_INTERVAL);
1486 /* set the print_flag if all ports up or timeout */
1487 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1494 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1498 uint8_t mapping_found = 0;
1500 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1501 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1502 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1503 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1504 tx_queue_stats_mappings[i].queue_id,
1505 tx_queue_stats_mappings[i].stats_counter_id);
1512 port->tx_queue_stats_mapping_enabled = 1;
1517 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1521 uint8_t mapping_found = 0;
1523 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1524 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1525 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1526 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1527 rx_queue_stats_mappings[i].queue_id,
1528 rx_queue_stats_mappings[i].stats_counter_id);
1535 port->rx_queue_stats_mapping_enabled = 1;
1540 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1544 diag = set_tx_queue_stats_mapping_registers(pi, port);
1546 if (diag == -ENOTSUP) {
1547 port->tx_queue_stats_mapping_enabled = 0;
1548 printf("TX queue stats mapping not supported port id=%d\n", pi);
1551 rte_exit(EXIT_FAILURE,
1552 "set_tx_queue_stats_mapping_registers "
1553 "failed for port id=%d diag=%d\n",
1557 diag = set_rx_queue_stats_mapping_registers(pi, port);
1559 if (diag == -ENOTSUP) {
1560 port->rx_queue_stats_mapping_enabled = 0;
1561 printf("RX queue stats mapping not supported port id=%d\n", pi);
1564 rte_exit(EXIT_FAILURE,
1565 "set_rx_queue_stats_mapping_registers "
1566 "failed for port id=%d diag=%d\n",
1572 init_port_config(void)
1575 struct rte_port *port;
1577 for (pid = 0; pid < nb_ports; pid++) {
1579 port->dev_conf.rxmode = rx_mode;
1580 port->dev_conf.fdir_conf = fdir_conf;
1582 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1583 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1585 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1586 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1589 /* In SR-IOV mode, RSS mode is not available */
1590 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1591 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1592 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1594 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1597 port->rx_conf.rx_thresh = rx_thresh;
1598 port->rx_conf.rx_free_thresh = rx_free_thresh;
1599 port->rx_conf.rx_drop_en = rx_drop_en;
1600 port->tx_conf.tx_thresh = tx_thresh;
1601 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1602 port->tx_conf.tx_free_thresh = tx_free_thresh;
1603 port->tx_conf.txq_flags = txq_flags;
1605 rte_eth_macaddr_get(pid, &port->eth_addr);
1607 map_port_queue_stats_mapping_registers(pid, port);
1608 #ifdef RTE_NIC_BYPASS
1609 rte_eth_dev_bypass_init(pid);
1614 const uint16_t vlan_tags[] = {
1615 0, 1, 2, 3, 4, 5, 6, 7,
1616 8, 9, 10, 11, 12, 13, 14, 15,
1617 16, 17, 18, 19, 20, 21, 22, 23,
1618 24, 25, 26, 27, 28, 29, 30, 31
1622 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1627 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1628 * given above, and the number of traffic classes available for use.
1630 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1631 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1632 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1634 /* VMDQ+DCB RX and TX configrations */
1635 vmdq_rx_conf.enable_default_pool = 0;
1636 vmdq_rx_conf.default_pool = 0;
1637 vmdq_rx_conf.nb_queue_pools =
1638 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1639 vmdq_tx_conf.nb_queue_pools =
1640 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1642 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1643 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1644 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1645 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1647 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1648 vmdq_rx_conf.dcb_queue[i] = i;
1649 vmdq_tx_conf.dcb_queue[i] = i;
1652 /*set DCB mode of RX and TX of multiple queues*/
1653 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1654 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1655 if (dcb_conf->pfc_en)
1656 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1658 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1660 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1661 sizeof(struct rte_eth_vmdq_dcb_conf)));
1662 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1663 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1666 struct rte_eth_dcb_rx_conf rx_conf;
1667 struct rte_eth_dcb_tx_conf tx_conf;
1669 /* queue mapping configuration of DCB RX and TX */
1670 if (dcb_conf->num_tcs == ETH_4_TCS)
1671 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1673 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1675 rx_conf.nb_tcs = dcb_conf->num_tcs;
1676 tx_conf.nb_tcs = dcb_conf->num_tcs;
1678 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1679 rx_conf.dcb_queue[i] = i;
1680 tx_conf.dcb_queue[i] = i;
1682 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1683 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1684 if (dcb_conf->pfc_en)
1685 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1687 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1689 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1690 sizeof(struct rte_eth_dcb_rx_conf)));
1691 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1692 sizeof(struct rte_eth_dcb_tx_conf)));
1699 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1701 struct rte_eth_conf port_conf;
1702 struct rte_port *rte_port;
1707 /* rxq and txq configuration in dcb mode */
1710 rx_free_thresh = 64;
1712 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1713 /* Enter DCB configuration status */
1716 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1717 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1718 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1722 rte_port = &ports[pid];
1723 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1725 rte_port->rx_conf.rx_thresh = rx_thresh;
1726 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1727 rte_port->tx_conf.tx_thresh = tx_thresh;
1728 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1729 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1731 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1732 for (i = 0; i < nb_vlan; i++){
1733 rx_vft_set(pid, vlan_tags[i], 1);
1736 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1737 map_port_queue_stats_mapping_registers(pid, rte_port);
1739 rte_port->dcb_flag = 1;
1744 #ifdef RTE_EXEC_ENV_BAREMETAL
1749 main(int argc, char** argv)
1754 diag = rte_eal_init(argc, argv);
1756 rte_panic("Cannot init EAL\n");
1758 if (rte_pmd_init_all())
1759 rte_panic("Cannot init PMD\n");
1761 if (rte_eal_pci_probe())
1762 rte_panic("Cannot probe PCI\n");
1764 nb_ports = (portid_t) rte_eth_dev_count();
1766 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1768 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1769 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1770 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1771 "configuration file\n");
1773 set_def_fwd_config();
1775 rte_panic("Empty set of forwarding logical cores - check the "
1776 "core mask supplied in the command parameters\n");
1781 launch_args_parse(argc, argv);
1783 if (nb_rxq > nb_txq)
1784 printf("Warning: nb_rxq=%d enables RSS configuration, "
1785 "but nb_txq=%d will prevent to fully test it.\n",
1789 if (start_port(RTE_PORT_ALL) != 0)
1790 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1792 /* set all ports to promiscuous mode by default */
1793 for (port_id = 0; port_id < nb_ports; port_id++)
1794 rte_eth_promiscuous_enable(port_id);
1796 #ifdef RTE_LIBRTE_CMDLINE
1797 if (interactive == 1)
1805 printf("No commandline core given, start packet forwarding\n");
1806 start_packet_forwarding(0);
1807 printf("Press enter to exit\n");
1808 rc = read(0, &c, 1);