4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
88 * NUMA support configuration.
89 * When set, the NUMA support attempts to dispatch the allocation of the
90 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91 * probed ports among the CPU sockets 0 and 1.
92 * Otherwise, all memory is allocated from CPU socket 0.
94 uint8_t numa_support = 0; /**< No numa support by default */
97 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100 uint8_t socket_num = UMA_NO_CONFIG;
103 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108 * Record the Ethernet address of peer target ports to which packets are
110 * Must be instanciated with the ethernet addresses of peer traffic generator
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
117 * Probed Target Environment.
119 struct rte_port *ports; /**< For all probed ethernet ports. */
120 portid_t nb_ports; /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
125 * Test Forwarding Configuration.
126 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t nb_cfg_ports; /**< Number of configured ports. */
132 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
141 * Forwarding engines.
143 struct fwd_engine * fwd_engines[] = {
146 &mac_retry_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151 &ieee1588_fwd_engine,
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
161 * specified on command-line. */
164 * Configuration of packet segments used by the "txonly" processing engine.
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168 TXONLY_DEF_PACKET_LEN,
170 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
185 * Configurable number of RX/TX queues.
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 * Configurable number of RX/TX ring descriptors.
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 * Configurable values of RX and TX ring threshold registers.
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
209 struct rte_eth_thresh rx_thresh = {
210 .pthresh = RX_PTHRESH,
211 .hthresh = RX_HTHRESH,
212 .wthresh = RX_WTHRESH,
215 struct rte_eth_thresh tx_thresh = {
216 .pthresh = TX_PTHRESH,
217 .hthresh = TX_HTHRESH,
218 .wthresh = TX_WTHRESH,
222 * Configurable value of RX free threshold.
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
227 * Configurable value of RX drop enable.
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
232 * Configurable value of TX free threshold.
234 uint16_t tx_free_thresh = 0; /* Use default values. */
237 * Configurable value of TX RS bit threshold.
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
242 * Configurable value of TX queue flags.
244 uint32_t txq_flags = 0; /* No flags set. */
247 * Receive Side Scaling (RSS) configuration.
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
252 * Port topology configuration
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
257 * Avoids to flush all the RX streams before starts forwarding.
259 uint8_t no_flush_rx = 0; /* flush by default */
262 * NIC bypass mode configuration options.
264 #ifdef RTE_NIC_BYPASS
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 * Ethernet device configuration.
274 struct rte_eth_rxmode rx_mode = {
275 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 .header_split = 0, /**< Header Split disabled. */
278 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
281 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
283 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
286 struct rte_fdir_conf fdir_conf = {
287 .mode = RTE_FDIR_MODE_NONE,
288 .pballoc = RTE_FDIR_PBALLOC_64K,
289 .status = RTE_FDIR_REPORT_STATUS,
290 .flexbytes_offset = 0x6,
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
310 * Check if all the ports are started.
311 * If yes, return positive value. If not, return zero.
313 static int all_ports_started(void);
316 * Setup default configuration.
319 set_default_fwd_lcores_config(void)
325 for (i = 0; i < RTE_MAX_LCORE; i++) {
326 if (! rte_lcore_is_enabled(i))
328 if (i == rte_get_master_lcore())
330 fwd_lcores_cpuids[nb_lc++] = i;
332 nb_lcores = (lcoreid_t) nb_lc;
333 nb_cfg_lcores = nb_lcores;
338 set_def_peer_eth_addrs(void)
342 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344 peer_eth_addrs[i].addr_bytes[5] = i;
349 set_default_fwd_ports_config(void)
353 for (pt_id = 0; pt_id < nb_ports; pt_id++)
354 fwd_ports_ids[pt_id] = pt_id;
356 nb_cfg_ports = nb_ports;
357 nb_fwd_ports = nb_ports;
361 set_def_fwd_config(void)
363 set_default_fwd_lcores_config();
364 set_def_peer_eth_addrs();
365 set_default_fwd_ports_config();
369 * Configuration initialisation done once at init time.
371 struct mbuf_ctor_arg {
372 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
376 struct mbuf_pool_ctor_arg {
377 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
384 __attribute__((unused)) unsigned i)
386 struct mbuf_ctor_arg *mb_ctor_arg;
389 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390 mb = (struct rte_mbuf *) raw_mbuf;
392 mb->type = RTE_MBUF_PKT;
394 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396 mb_ctor_arg->seg_buf_offset);
397 mb->buf_len = mb_ctor_arg->seg_buf_size;
398 mb->type = RTE_MBUF_PKT;
400 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
402 mb->pkt.vlan_macip.data = 0;
403 mb->pkt.hash.rss = 0;
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
410 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
411 struct rte_pktmbuf_pool_private *mbp_priv;
413 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414 printf("%s(%s) private_data_size %d < %d\n",
415 __func__, mp->name, (int) mp->private_data_size,
416 (int) sizeof(struct rte_pktmbuf_pool_private));
419 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420 mbp_priv = rte_mempool_get_priv(mp);
421 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426 unsigned int socket_id)
428 char pool_name[RTE_MEMPOOL_NAMESIZE];
429 struct rte_mempool *rte_mp;
430 struct mbuf_pool_ctor_arg mbp_ctor_arg;
431 struct mbuf_ctor_arg mb_ctor_arg;
434 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
436 mb_ctor_arg.seg_buf_offset =
437 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444 (unsigned) mb_mempool_cache,
445 sizeof(struct rte_pktmbuf_pool_private),
446 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447 testpmd_mbuf_ctor, &mb_ctor_arg,
454 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455 (unsigned) mb_mempool_cache,
456 sizeof(struct rte_pktmbuf_pool_private),
457 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458 testpmd_mbuf_ctor, &mb_ctor_arg,
461 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462 (unsigned) mb_mempool_cache,
463 sizeof(struct rte_pktmbuf_pool_private),
464 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 testpmd_mbuf_ctor, &mb_ctor_arg,
470 if (rte_mp == NULL) {
471 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472 "failed\n", socket_id);
473 } else if (verbose_level > 0) {
474 rte_mempool_dump(rte_mp);
482 struct rte_port *port;
483 struct rte_mempool *mbp;
484 unsigned int nb_mbuf_per_pool;
486 uint8_t port_per_socket[MAX_SOCKET];
488 memset(port_per_socket,0,MAX_SOCKET);
489 /* Configuration of logical cores. */
490 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
491 sizeof(struct fwd_lcore *) * nb_lcores,
493 if (fwd_lcores == NULL) {
494 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
495 "failed\n", nb_lcores);
497 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
498 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
499 sizeof(struct fwd_lcore),
501 if (fwd_lcores[lc_id] == NULL) {
502 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
505 fwd_lcores[lc_id]->cpuid_idx = lc_id;
509 * Create pools of mbuf.
510 * If NUMA support is disabled, create a single pool of mbuf in
511 * socket 0 memory by default.
512 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
514 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
515 * nb_txd can be configured at run time.
517 if (param_total_num_mbufs)
518 nb_mbuf_per_pool = param_total_num_mbufs;
520 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
521 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
524 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
528 if (socket_num == UMA_NO_CONFIG)
529 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
531 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
535 * Records which Mbuf pool to use by each logical core, if needed.
537 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
538 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
540 mbp = mbuf_pool_find(0);
541 fwd_lcores[lc_id]->mbp = mbp;
544 /* Configuration of Ethernet ports. */
545 ports = rte_zmalloc("testpmd: ports",
546 sizeof(struct rte_port) * nb_ports,
549 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
550 "failed\n", nb_ports);
553 for (pid = 0; pid < nb_ports; pid++) {
555 rte_eth_dev_info_get(pid, &port->dev_info);
558 if (port_numa[pid] != NUMA_NO_CONFIG)
559 port_per_socket[port_numa[pid]]++;
561 uint32_t socket_id = rte_eth_dev_socket_id(pid);
562 port_per_socket[socket_id]++;
566 /* set flag to initialize port/queue */
567 port->need_reconfig = 1;
568 port->need_reconfig_queues = 1;
573 unsigned int nb_mbuf;
575 if (param_total_num_mbufs)
576 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
578 for (i = 0; i < MAX_SOCKET; i++) {
579 nb_mbuf = (nb_mbuf_per_pool *
582 mbuf_pool_create(mbuf_data_size,
587 /* Configuration of packet forwarding streams. */
588 if (init_fwd_streams() < 0)
589 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
593 init_fwd_streams(void)
596 struct rte_port *port;
597 streamid_t sm_id, nb_fwd_streams_new;
599 /* set socket id according to numa or not */
600 for (pid = 0; pid < nb_ports; pid++) {
602 if (nb_rxq > port->dev_info.max_rx_queues) {
603 printf("Fail: nb_rxq(%d) is greater than "
604 "max_rx_queues(%d)\n", nb_rxq,
605 port->dev_info.max_rx_queues);
608 if (nb_txq > port->dev_info.max_tx_queues) {
609 printf("Fail: nb_txq(%d) is greater than "
610 "max_tx_queues(%d)\n", nb_txq,
611 port->dev_info.max_tx_queues);
615 port->socket_id = rte_eth_dev_socket_id(pid);
617 if (socket_num == UMA_NO_CONFIG)
620 port->socket_id = socket_num;
624 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
625 if (nb_fwd_streams_new == nb_fwd_streams)
628 if (fwd_streams != NULL) {
629 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
630 if (fwd_streams[sm_id] == NULL)
632 rte_free(fwd_streams[sm_id]);
633 fwd_streams[sm_id] = NULL;
635 rte_free(fwd_streams);
640 nb_fwd_streams = nb_fwd_streams_new;
641 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
642 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
643 if (fwd_streams == NULL)
644 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
645 "failed\n", nb_fwd_streams);
647 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
648 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
649 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
650 if (fwd_streams[sm_id] == NULL)
651 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
658 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
660 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
662 unsigned int total_burst;
663 unsigned int nb_burst;
664 unsigned int burst_stats[3];
665 uint16_t pktnb_stats[3];
667 int burst_percent[3];
670 * First compute the total number of packet bursts and the
671 * two highest numbers of bursts of the same number of packets.
674 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
675 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
676 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
677 nb_burst = pbs->pkt_burst_spread[nb_pkt];
680 total_burst += nb_burst;
681 if (nb_burst > burst_stats[0]) {
682 burst_stats[1] = burst_stats[0];
683 pktnb_stats[1] = pktnb_stats[0];
684 burst_stats[0] = nb_burst;
685 pktnb_stats[0] = nb_pkt;
688 if (total_burst == 0)
690 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
691 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
692 burst_percent[0], (int) pktnb_stats[0]);
693 if (burst_stats[0] == total_burst) {
697 if (burst_stats[0] + burst_stats[1] == total_burst) {
698 printf(" + %d%% of %d pkts]\n",
699 100 - burst_percent[0], pktnb_stats[1]);
702 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
703 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
704 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
705 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
708 printf(" + %d%% of %d pkts + %d%% of others]\n",
709 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
711 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
714 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
716 struct rte_port *port;
719 static const char *fwd_stats_border = "----------------------";
721 port = &ports[port_id];
722 printf("\n %s Forward statistics for port %-2d %s\n",
723 fwd_stats_border, port_id, fwd_stats_border);
725 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
726 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
728 stats->ipackets, stats->ierrors,
729 (uint64_t) (stats->ipackets + stats->ierrors));
731 if (cur_fwd_eng == &csum_fwd_engine)
732 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
733 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
735 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
737 stats->opackets, port->tx_dropped,
738 (uint64_t) (stats->opackets + port->tx_dropped));
740 if (stats->rx_nombuf > 0)
741 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
745 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
747 stats->ipackets, stats->ierrors,
748 (uint64_t) (stats->ipackets + stats->ierrors));
750 if (cur_fwd_eng == &csum_fwd_engine)
751 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
752 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
754 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
756 stats->opackets, port->tx_dropped,
757 (uint64_t) (stats->opackets + port->tx_dropped));
759 if (stats->rx_nombuf > 0)
760 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
762 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
764 pkt_burst_stats_display("RX",
765 &port->rx_stream->rx_burst_stats);
767 pkt_burst_stats_display("TX",
768 &port->tx_stream->tx_burst_stats);
771 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
772 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
776 if (port->rx_queue_stats_mapping_enabled) {
778 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
779 printf(" Stats reg %2d RX-packets:%14"PRIu64
780 " RX-errors:%14"PRIu64
781 " RX-bytes:%14"PRIu64"\n",
782 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
786 if (port->tx_queue_stats_mapping_enabled) {
787 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
788 printf(" Stats reg %2d TX-packets:%14"PRIu64
789 " TX-bytes:%14"PRIu64"\n",
790 i, stats->q_opackets[i], stats->q_obytes[i]);
794 printf(" %s--------------------------------%s\n",
795 fwd_stats_border, fwd_stats_border);
799 fwd_stream_stats_display(streamid_t stream_id)
801 struct fwd_stream *fs;
802 static const char *fwd_top_stats_border = "-------";
804 fs = fwd_streams[stream_id];
805 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
806 (fs->fwd_dropped == 0))
808 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
809 "TX Port=%2d/Queue=%2d %s\n",
810 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
811 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
812 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
813 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
815 /* if checksum mode */
816 if (cur_fwd_eng == &csum_fwd_engine) {
817 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
818 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
821 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
822 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
823 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
828 flush_fwd_rx_queues(void)
830 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
838 for (j = 0; j < 2; j++) {
839 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
840 for (rxq = 0; rxq < nb_rxq; rxq++) {
841 port_id = fwd_ports_ids[rxp];
843 nb_rx = rte_eth_rx_burst(port_id, rxq,
844 pkts_burst, MAX_PKT_BURST);
845 for (i = 0; i < nb_rx; i++)
846 rte_pktmbuf_free(pkts_burst[i]);
850 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
855 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
857 struct fwd_stream **fsm;
861 fsm = &fwd_streams[fc->stream_idx];
862 nb_fs = fc->stream_nb;
864 for (sm_id = 0; sm_id < nb_fs; sm_id++)
865 (*pkt_fwd)(fsm[sm_id]);
866 } while (! fc->stopped);
870 start_pkt_forward_on_core(void *fwd_arg)
872 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
873 cur_fwd_config.fwd_eng->packet_fwd);
878 * Run the TXONLY packet forwarding engine to send a single burst of packets.
879 * Used to start communication flows in network loopback test configurations.
882 run_one_txonly_burst_on_core(void *fwd_arg)
884 struct fwd_lcore *fwd_lc;
885 struct fwd_lcore tmp_lcore;
887 fwd_lc = (struct fwd_lcore *) fwd_arg;
889 tmp_lcore.stopped = 1;
890 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
895 * Launch packet forwarding:
896 * - Setup per-port forwarding context.
897 * - launch logical cores with their forwarding configuration.
900 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
902 port_fwd_begin_t port_fwd_begin;
907 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
908 if (port_fwd_begin != NULL) {
909 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
910 (*port_fwd_begin)(fwd_ports_ids[i]);
912 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
913 lc_id = fwd_lcores_cpuids[i];
914 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
915 fwd_lcores[i]->stopped = 0;
916 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
917 fwd_lcores[i], lc_id);
919 printf("launch lcore %u failed - diag=%d\n",
926 * Launch packet forwarding configuration.
929 start_packet_forwarding(int with_tx_first)
931 port_fwd_begin_t port_fwd_begin;
932 port_fwd_end_t port_fwd_end;
933 struct rte_port *port;
938 if (all_ports_started() == 0) {
939 printf("Not all ports were started\n");
942 if (test_done == 0) {
943 printf("Packet forwarding already started\n");
947 for (i = 0; i < nb_fwd_ports; i++) {
948 pt_id = fwd_ports_ids[i];
949 port = &ports[pt_id];
950 if (!port->dcb_flag) {
951 printf("In DCB mode, all forwarding ports must "
952 "be configured in this mode.\n");
956 if (nb_fwd_lcores == 1) {
957 printf("In DCB mode,the nb forwarding cores "
958 "should be larger than 1.\n");
965 flush_fwd_rx_queues();
968 rxtx_config_display();
970 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
971 pt_id = fwd_ports_ids[i];
972 port = &ports[pt_id];
973 rte_eth_stats_get(pt_id, &port->stats);
974 port->tx_dropped = 0;
976 map_port_queue_stats_mapping_registers(pt_id, port);
978 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
979 fwd_streams[sm_id]->rx_packets = 0;
980 fwd_streams[sm_id]->tx_packets = 0;
981 fwd_streams[sm_id]->fwd_dropped = 0;
982 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
983 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
985 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
986 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
987 sizeof(fwd_streams[sm_id]->rx_burst_stats));
988 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
989 sizeof(fwd_streams[sm_id]->tx_burst_stats));
991 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
992 fwd_streams[sm_id]->core_cycles = 0;
996 port_fwd_begin = tx_only_engine.port_fwd_begin;
997 if (port_fwd_begin != NULL) {
998 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999 (*port_fwd_begin)(fwd_ports_ids[i]);
1001 launch_packet_forwarding(run_one_txonly_burst_on_core);
1002 rte_eal_mp_wait_lcore();
1003 port_fwd_end = tx_only_engine.port_fwd_end;
1004 if (port_fwd_end != NULL) {
1005 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1006 (*port_fwd_end)(fwd_ports_ids[i]);
1009 launch_packet_forwarding(start_pkt_forward_on_core);
1013 stop_packet_forwarding(void)
1015 struct rte_eth_stats stats;
1016 struct rte_port *port;
1017 port_fwd_end_t port_fwd_end;
1022 uint64_t total_recv;
1023 uint64_t total_xmit;
1024 uint64_t total_rx_dropped;
1025 uint64_t total_tx_dropped;
1026 uint64_t total_rx_nombuf;
1027 uint64_t tx_dropped;
1028 uint64_t rx_bad_ip_csum;
1029 uint64_t rx_bad_l4_csum;
1030 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1031 uint64_t fwd_cycles;
1033 static const char *acc_stats_border = "+++++++++++++++";
1035 if (all_ports_started() == 0) {
1036 printf("Not all ports were started\n");
1040 printf("Packet forwarding not started\n");
1043 printf("Telling cores to stop...");
1044 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1045 fwd_lcores[lc_id]->stopped = 1;
1046 printf("\nWaiting for lcores to finish...\n");
1047 rte_eal_mp_wait_lcore();
1048 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1049 if (port_fwd_end != NULL) {
1050 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1051 pt_id = fwd_ports_ids[i];
1052 (*port_fwd_end)(pt_id);
1055 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1058 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1059 if (cur_fwd_config.nb_fwd_streams >
1060 cur_fwd_config.nb_fwd_ports) {
1061 fwd_stream_stats_display(sm_id);
1062 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1063 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1065 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1067 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1070 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1071 tx_dropped = (uint64_t) (tx_dropped +
1072 fwd_streams[sm_id]->fwd_dropped);
1073 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1076 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1077 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1078 fwd_streams[sm_id]->rx_bad_ip_csum);
1079 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1083 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1084 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1085 fwd_streams[sm_id]->rx_bad_l4_csum);
1086 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1089 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1090 fwd_cycles = (uint64_t) (fwd_cycles +
1091 fwd_streams[sm_id]->core_cycles);
1096 total_rx_dropped = 0;
1097 total_tx_dropped = 0;
1098 total_rx_nombuf = 0;
1099 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100 pt_id = fwd_ports_ids[i];
1102 port = &ports[pt_id];
1103 rte_eth_stats_get(pt_id, &stats);
1104 stats.ipackets -= port->stats.ipackets;
1105 port->stats.ipackets = 0;
1106 stats.opackets -= port->stats.opackets;
1107 port->stats.opackets = 0;
1108 stats.ibytes -= port->stats.ibytes;
1109 port->stats.ibytes = 0;
1110 stats.obytes -= port->stats.obytes;
1111 port->stats.obytes = 0;
1112 stats.ierrors -= port->stats.ierrors;
1113 port->stats.ierrors = 0;
1114 stats.oerrors -= port->stats.oerrors;
1115 port->stats.oerrors = 0;
1116 stats.rx_nombuf -= port->stats.rx_nombuf;
1117 port->stats.rx_nombuf = 0;
1118 stats.fdirmatch -= port->stats.fdirmatch;
1119 port->stats.rx_nombuf = 0;
1120 stats.fdirmiss -= port->stats.fdirmiss;
1121 port->stats.rx_nombuf = 0;
1123 total_recv += stats.ipackets;
1124 total_xmit += stats.opackets;
1125 total_rx_dropped += stats.ierrors;
1126 total_tx_dropped += port->tx_dropped;
1127 total_rx_nombuf += stats.rx_nombuf;
1129 fwd_port_stats_display(pt_id, &stats);
1131 printf("\n %s Accumulated forward statistics for all ports"
1133 acc_stats_border, acc_stats_border);
1134 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1136 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1138 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1139 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1140 if (total_rx_nombuf > 0)
1141 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1142 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1144 acc_stats_border, acc_stats_border);
1145 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1147 printf("\n CPU cycles/packet=%u (total cycles="
1148 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1149 (unsigned int)(fwd_cycles / total_recv),
1150 fwd_cycles, total_recv);
1152 printf("\nDone.\n");
1157 all_ports_started(void)
1160 struct rte_port *port;
1162 for (pi = 0; pi < nb_ports; pi++) {
1164 /* Check if there is a port which is not started */
1165 if (port->port_status != RTE_PORT_STARTED)
1169 /* No port is not started */
1174 start_port(portid_t pid)
1176 int diag, need_check_link_status = 0;
1179 struct rte_port *port;
1181 if (test_done == 0) {
1182 printf("Please stop forwarding first\n");
1186 if (init_fwd_streams() < 0) {
1187 printf("Fail from init_fwd_streams()\n");
1193 for (pi = 0; pi < nb_ports; pi++) {
1194 if (pid < nb_ports && pid != pi)
1198 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1199 RTE_PORT_HANDLING) == 0) {
1200 printf("Port %d is now not stopped\n", pi);
1204 if (port->need_reconfig > 0) {
1205 port->need_reconfig = 0;
1207 printf("Configuring Port %d (socket %d)\n", pi,
1208 rte_eth_dev_socket_id(pi));
1209 /* configure port */
1210 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1213 if (rte_atomic16_cmpset(&(port->port_status),
1214 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1215 printf("Port %d can not be set back "
1216 "to stopped\n", pi);
1217 printf("Fail to configure port %d\n", pi);
1218 /* try to reconfigure port next time */
1219 port->need_reconfig = 1;
1223 if (port->need_reconfig_queues > 0) {
1224 port->need_reconfig_queues = 0;
1225 /* setup tx queues */
1226 for (qi = 0; qi < nb_txq; qi++) {
1227 if ((numa_support) &&
1228 (txring_numa[pi] != NUMA_NO_CONFIG))
1229 diag = rte_eth_tx_queue_setup(pi, qi,
1230 nb_txd,txring_numa[pi],
1233 diag = rte_eth_tx_queue_setup(pi, qi,
1234 nb_txd,port->socket_id,
1240 /* Fail to setup tx queue, return */
1241 if (rte_atomic16_cmpset(&(port->port_status),
1243 RTE_PORT_STOPPED) == 0)
1244 printf("Port %d can not be set back "
1245 "to stopped\n", pi);
1246 printf("Fail to configure port %d tx queues\n", pi);
1247 /* try to reconfigure queues next time */
1248 port->need_reconfig_queues = 1;
1251 /* setup rx queues */
1252 for (qi = 0; qi < nb_rxq; qi++) {
1253 if ((numa_support) &&
1254 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1255 struct rte_mempool * mp =
1256 mbuf_pool_find(rxring_numa[pi]);
1258 printf("Failed to setup RX queue:"
1259 "No mempool allocation"
1260 "on the socket %d\n",
1265 diag = rte_eth_rx_queue_setup(pi, qi,
1266 nb_rxd,rxring_numa[pi],
1267 &(port->rx_conf),mp);
1270 diag = rte_eth_rx_queue_setup(pi, qi,
1271 nb_rxd,port->socket_id,
1273 mbuf_pool_find(port->socket_id));
1279 /* Fail to setup rx queue, return */
1280 if (rte_atomic16_cmpset(&(port->port_status),
1282 RTE_PORT_STOPPED) == 0)
1283 printf("Port %d can not be set back "
1284 "to stopped\n", pi);
1285 printf("Fail to configure port %d rx queues\n", pi);
1286 /* try to reconfigure queues next time */
1287 port->need_reconfig_queues = 1;
1292 if (rte_eth_dev_start(pi) < 0) {
1293 printf("Fail to start port %d\n", pi);
1295 /* Fail to setup rx queue, return */
1296 if (rte_atomic16_cmpset(&(port->port_status),
1297 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1298 printf("Port %d can not be set back to "
1303 if (rte_atomic16_cmpset(&(port->port_status),
1304 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1305 printf("Port %d can not be set into started\n", pi);
1307 /* at least one port started, need checking link status */
1308 need_check_link_status = 1;
1311 if (need_check_link_status)
1312 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1314 printf("Please stop the ports first\n");
1321 stop_port(portid_t pid)
1324 struct rte_port *port;
1325 int need_check_link_status = 0;
1327 if (test_done == 0) {
1328 printf("Please stop forwarding first\n");
1335 printf("Stopping ports...\n");
1337 for (pi = 0; pi < nb_ports; pi++) {
1338 if (pid < nb_ports && pid != pi)
1342 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1343 RTE_PORT_HANDLING) == 0)
1346 rte_eth_dev_stop(pi);
1348 if (rte_atomic16_cmpset(&(port->port_status),
1349 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1350 printf("Port %d can not be set into stopped\n", pi);
1351 need_check_link_status = 1;
1353 if (need_check_link_status)
1354 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1360 close_port(portid_t pid)
1363 struct rte_port *port;
1365 if (test_done == 0) {
1366 printf("Please stop forwarding first\n");
1370 printf("Closing ports...\n");
1372 for (pi = 0; pi < nb_ports; pi++) {
1373 if (pid < nb_ports && pid != pi)
1377 if (rte_atomic16_cmpset(&(port->port_status),
1378 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1379 printf("Port %d is now not stopped\n", pi);
1383 rte_eth_dev_close(pi);
1385 if (rte_atomic16_cmpset(&(port->port_status),
1386 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1387 printf("Port %d can not be set into stopped\n", pi);
1394 all_ports_stopped(void)
1397 struct rte_port *port;
1399 for (pi = 0; pi < nb_ports; pi++) {
1401 if (port->port_status != RTE_PORT_STOPPED)
1413 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1414 printf("Stopping port %d...", pt_id);
1416 rte_eth_dev_close(pt_id);
1422 typedef void (*cmd_func_t)(void);
1423 struct pmd_test_command {
1424 const char *cmd_name;
1425 cmd_func_t cmd_func;
1428 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1430 /* Check the link status of all ports in up to 9s, and print them finally */
1432 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1434 #define CHECK_INTERVAL 100 /* 100ms */
1435 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1436 uint8_t portid, count, all_ports_up, print_flag = 0;
1437 struct rte_eth_link link;
1439 printf("Checking link statuses...\n");
1441 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1443 for (portid = 0; portid < port_num; portid++) {
1444 if ((port_mask & (1 << portid)) == 0)
1446 memset(&link, 0, sizeof(link));
1447 rte_eth_link_get_nowait(portid, &link);
1448 /* print link status if flag set */
1449 if (print_flag == 1) {
1450 if (link.link_status)
1451 printf("Port %d Link Up - speed %u "
1452 "Mbps - %s\n", (uint8_t)portid,
1453 (unsigned)link.link_speed,
1454 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1455 ("full-duplex") : ("half-duplex\n"));
1457 printf("Port %d Link Down\n",
1461 /* clear all_ports_up flag if any link down */
1462 if (link.link_status == 0) {
1467 /* after finally printing all link status, get out */
1468 if (print_flag == 1)
1471 if (all_ports_up == 0) {
1473 rte_delay_ms(CHECK_INTERVAL);
1476 /* set the print_flag if all ports up or timeout */
1477 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1484 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1488 uint8_t mapping_found = 0;
1490 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1491 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1492 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1493 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1494 tx_queue_stats_mappings[i].queue_id,
1495 tx_queue_stats_mappings[i].stats_counter_id);
1502 port->tx_queue_stats_mapping_enabled = 1;
1507 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1511 uint8_t mapping_found = 0;
1513 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1514 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1515 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1516 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1517 rx_queue_stats_mappings[i].queue_id,
1518 rx_queue_stats_mappings[i].stats_counter_id);
1525 port->rx_queue_stats_mapping_enabled = 1;
1530 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1534 diag = set_tx_queue_stats_mapping_registers(pi, port);
1536 if (diag == -ENOTSUP) {
1537 port->tx_queue_stats_mapping_enabled = 0;
1538 printf("TX queue stats mapping not supported port id=%d\n", pi);
1541 rte_exit(EXIT_FAILURE,
1542 "set_tx_queue_stats_mapping_registers "
1543 "failed for port id=%d diag=%d\n",
1547 diag = set_rx_queue_stats_mapping_registers(pi, port);
1549 if (diag == -ENOTSUP) {
1550 port->rx_queue_stats_mapping_enabled = 0;
1551 printf("RX queue stats mapping not supported port id=%d\n", pi);
1554 rte_exit(EXIT_FAILURE,
1555 "set_rx_queue_stats_mapping_registers "
1556 "failed for port id=%d diag=%d\n",
1562 init_port_config(void)
1565 struct rte_port *port;
1567 for (pid = 0; pid < nb_ports; pid++) {
1569 port->dev_conf.rxmode = rx_mode;
1570 port->dev_conf.fdir_conf = fdir_conf;
1572 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1573 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1575 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1576 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1578 port->rx_conf.rx_thresh = rx_thresh;
1579 port->rx_conf.rx_free_thresh = rx_free_thresh;
1580 port->rx_conf.rx_drop_en = rx_drop_en;
1581 port->tx_conf.tx_thresh = tx_thresh;
1582 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1583 port->tx_conf.tx_free_thresh = tx_free_thresh;
1584 port->tx_conf.txq_flags = txq_flags;
1586 rte_eth_macaddr_get(pid, &port->eth_addr);
1588 map_port_queue_stats_mapping_registers(pid, port);
1589 #ifdef RTE_NIC_BYPASS
1590 rte_eth_dev_bypass_init(pid);
1595 const uint16_t vlan_tags[] = {
1596 0, 1, 2, 3, 4, 5, 6, 7,
1597 8, 9, 10, 11, 12, 13, 14, 15,
1598 16, 17, 18, 19, 20, 21, 22, 23,
1599 24, 25, 26, 27, 28, 29, 30, 31
1603 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1608 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1609 * given above, and the number of traffic classes available for use.
1611 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1612 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1613 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1615 /* VMDQ+DCB RX and TX configrations */
1616 vmdq_rx_conf.enable_default_pool = 0;
1617 vmdq_rx_conf.default_pool = 0;
1618 vmdq_rx_conf.nb_queue_pools =
1619 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1620 vmdq_tx_conf.nb_queue_pools =
1621 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1623 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1624 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1625 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1626 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1628 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1629 vmdq_rx_conf.dcb_queue[i] = i;
1630 vmdq_tx_conf.dcb_queue[i] = i;
1633 /*set DCB mode of RX and TX of multiple queues*/
1634 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1635 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1636 if (dcb_conf->pfc_en)
1637 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1639 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1641 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1642 sizeof(struct rte_eth_vmdq_dcb_conf)));
1643 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1644 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1647 struct rte_eth_dcb_rx_conf rx_conf;
1648 struct rte_eth_dcb_tx_conf tx_conf;
1650 /* queue mapping configuration of DCB RX and TX */
1651 if (dcb_conf->num_tcs == ETH_4_TCS)
1652 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1654 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1656 rx_conf.nb_tcs = dcb_conf->num_tcs;
1657 tx_conf.nb_tcs = dcb_conf->num_tcs;
1659 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1660 rx_conf.dcb_queue[i] = i;
1661 tx_conf.dcb_queue[i] = i;
1663 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1664 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1665 if (dcb_conf->pfc_en)
1666 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1668 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1670 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1671 sizeof(struct rte_eth_dcb_rx_conf)));
1672 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1673 sizeof(struct rte_eth_dcb_tx_conf)));
1680 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1682 struct rte_eth_conf port_conf;
1683 struct rte_port *rte_port;
1688 /* rxq and txq configuration in dcb mode */
1691 rx_free_thresh = 64;
1693 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1694 /* Enter DCB configuration status */
1697 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1698 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1699 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1703 rte_port = &ports[pid];
1704 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1706 rte_port->rx_conf.rx_thresh = rx_thresh;
1707 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1708 rte_port->tx_conf.tx_thresh = tx_thresh;
1709 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1710 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1712 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1713 for (i = 0; i < nb_vlan; i++){
1714 rx_vft_set(pid, vlan_tags[i], 1);
1717 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1718 map_port_queue_stats_mapping_registers(pid, rte_port);
1720 rte_port->dcb_flag = 1;
1725 #ifdef RTE_EXEC_ENV_BAREMETAL
1730 main(int argc, char** argv)
1735 diag = rte_eal_init(argc, argv);
1737 rte_panic("Cannot init EAL\n");
1739 if (rte_pmd_init_all())
1740 rte_panic("Cannot init PMD\n");
1742 if (rte_eal_pci_probe())
1743 rte_panic("Cannot probe PCI\n");
1745 nb_ports = (portid_t) rte_eth_dev_count();
1747 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1749 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1750 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1751 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1752 "configuration file\n");
1754 set_def_fwd_config();
1756 rte_panic("Empty set of forwarding logical cores - check the "
1757 "core mask supplied in the command parameters\n");
1762 launch_args_parse(argc, argv);
1764 if (nb_rxq > nb_txq)
1765 printf("Warning: nb_rxq=%d enables RSS configuration, "
1766 "but nb_txq=%d will prevent to fully test it.\n",
1770 if (start_port(RTE_PORT_ALL) != 0)
1771 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1773 /* set all ports to promiscuous mode by default */
1774 for (port_id = 0; port_id < nb_ports; port_id++)
1775 rte_eth_promiscuous_enable(port_id);
1777 if (interactive == 1)
1783 printf("No commandline core given, start packet forwarding\n");
1784 start_packet_forwarding(0);
1785 printf("Press enter to exit\n");
1786 rc = read(0, &c, 1);