4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
203 * Configurable values of RX and TX ring threshold registers.
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
209 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
213 struct rte_eth_thresh rx_thresh = {
214 .pthresh = RX_PTHRESH,
215 .hthresh = RX_HTHRESH,
216 .wthresh = RX_WTHRESH,
219 struct rte_eth_thresh tx_thresh = {
220 .pthresh = TX_PTHRESH,
221 .hthresh = TX_HTHRESH,
222 .wthresh = TX_WTHRESH,
226 * Configurable value of RX free threshold.
228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
231 * Configurable value of RX drop enable.
233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
236 * Configurable value of TX free threshold.
238 uint16_t tx_free_thresh = 0; /* Use default values. */
241 * Configurable value of TX RS bit threshold.
243 uint16_t tx_rs_thresh = 0; /* Use default values. */
246 * Configurable value of TX queue flags.
248 uint32_t txq_flags = 0; /* No flags set. */
251 * Receive Side Scaling (RSS) configuration.
253 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
256 * Port topology configuration
258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
261 * Avoids to flush all the RX streams before starts forwarding.
263 uint8_t no_flush_rx = 0; /* flush by default */
266 * NIC bypass mode configuration options.
268 #ifdef RTE_NIC_BYPASS
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
276 * Ethernet device configuration.
278 struct rte_eth_rxmode rx_mode = {
279 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
281 .header_split = 0, /**< Header Split disabled. */
282 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
283 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
284 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
285 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
286 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
287 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
290 struct rte_fdir_conf fdir_conf = {
291 .mode = RTE_FDIR_MODE_NONE,
292 .pballoc = RTE_FDIR_PBALLOC_64K,
293 .status = RTE_FDIR_REPORT_STATUS,
294 .flexbytes_offset = 0x6,
298 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
300 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
301 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
303 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
304 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
306 uint16_t nb_tx_queue_stats_mappings = 0;
307 uint16_t nb_rx_queue_stats_mappings = 0;
309 /* Forward function declarations */
310 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
311 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
314 * Check if all the ports are started.
315 * If yes, return positive value. If not, return zero.
317 static int all_ports_started(void);
320 * Setup default configuration.
323 set_default_fwd_lcores_config(void)
329 for (i = 0; i < RTE_MAX_LCORE; i++) {
330 if (! rte_lcore_is_enabled(i))
332 if (i == rte_get_master_lcore())
334 fwd_lcores_cpuids[nb_lc++] = i;
336 nb_lcores = (lcoreid_t) nb_lc;
337 nb_cfg_lcores = nb_lcores;
342 set_def_peer_eth_addrs(void)
346 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
347 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
348 peer_eth_addrs[i].addr_bytes[5] = i;
353 set_default_fwd_ports_config(void)
357 for (pt_id = 0; pt_id < nb_ports; pt_id++)
358 fwd_ports_ids[pt_id] = pt_id;
360 nb_cfg_ports = nb_ports;
361 nb_fwd_ports = nb_ports;
365 set_def_fwd_config(void)
367 set_default_fwd_lcores_config();
368 set_def_peer_eth_addrs();
369 set_default_fwd_ports_config();
373 * Configuration initialisation done once at init time.
375 struct mbuf_ctor_arg {
376 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
377 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
380 struct mbuf_pool_ctor_arg {
381 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
385 testpmd_mbuf_ctor(struct rte_mempool *mp,
388 __attribute__((unused)) unsigned i)
390 struct mbuf_ctor_arg *mb_ctor_arg;
393 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
394 mb = (struct rte_mbuf *) raw_mbuf;
396 mb->type = RTE_MBUF_PKT;
398 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
399 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
400 mb_ctor_arg->seg_buf_offset);
401 mb->buf_len = mb_ctor_arg->seg_buf_size;
402 mb->type = RTE_MBUF_PKT;
404 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
406 mb->pkt.vlan_macip.data = 0;
407 mb->pkt.hash.rss = 0;
411 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
414 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
415 struct rte_pktmbuf_pool_private *mbp_priv;
417 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
418 printf("%s(%s) private_data_size %d < %d\n",
419 __func__, mp->name, (int) mp->private_data_size,
420 (int) sizeof(struct rte_pktmbuf_pool_private));
423 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
424 mbp_priv = rte_mempool_get_priv(mp);
425 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
429 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
430 unsigned int socket_id)
432 char pool_name[RTE_MEMPOOL_NAMESIZE];
433 struct rte_mempool *rte_mp;
434 struct mbuf_pool_ctor_arg mbp_ctor_arg;
435 struct mbuf_ctor_arg mb_ctor_arg;
438 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
440 mb_ctor_arg.seg_buf_offset =
441 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
442 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
443 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
444 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
446 #ifdef RTE_LIBRTE_PMD_XENVIRT
447 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
448 (unsigned) mb_mempool_cache,
449 sizeof(struct rte_pktmbuf_pool_private),
450 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
451 testpmd_mbuf_ctor, &mb_ctor_arg,
458 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
459 (unsigned) mb_mempool_cache,
460 sizeof(struct rte_pktmbuf_pool_private),
461 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
462 testpmd_mbuf_ctor, &mb_ctor_arg,
465 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
466 (unsigned) mb_mempool_cache,
467 sizeof(struct rte_pktmbuf_pool_private),
468 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
469 testpmd_mbuf_ctor, &mb_ctor_arg,
474 if (rte_mp == NULL) {
475 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
476 "failed\n", socket_id);
477 } else if (verbose_level > 0) {
478 rte_mempool_dump(rte_mp);
483 * Check given socket id is valid or not with NUMA mode,
484 * if valid, return 0, else return -1
487 check_socket_id(const unsigned int socket_id)
489 static int warning_once = 0;
491 if (socket_id >= MAX_SOCKET) {
492 if (!warning_once && numa_support)
493 printf("Warning: NUMA should be configured manually by"
494 " using --port-numa-config and"
495 " --ring-numa-config parameters along with"
507 struct rte_port *port;
508 struct rte_mempool *mbp;
509 unsigned int nb_mbuf_per_pool;
511 uint8_t port_per_socket[MAX_SOCKET];
513 memset(port_per_socket,0,MAX_SOCKET);
514 /* Configuration of logical cores. */
515 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
516 sizeof(struct fwd_lcore *) * nb_lcores,
518 if (fwd_lcores == NULL) {
519 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
520 "failed\n", nb_lcores);
522 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
523 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
524 sizeof(struct fwd_lcore),
526 if (fwd_lcores[lc_id] == NULL) {
527 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
530 fwd_lcores[lc_id]->cpuid_idx = lc_id;
534 * Create pools of mbuf.
535 * If NUMA support is disabled, create a single pool of mbuf in
536 * socket 0 memory by default.
537 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
539 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
540 * nb_txd can be configured at run time.
542 if (param_total_num_mbufs)
543 nb_mbuf_per_pool = param_total_num_mbufs;
545 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
546 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
549 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
553 if (socket_num == UMA_NO_CONFIG)
554 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
556 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
560 /* Configuration of Ethernet ports. */
561 ports = rte_zmalloc("testpmd: ports",
562 sizeof(struct rte_port) * nb_ports,
565 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
566 "failed\n", nb_ports);
569 for (pid = 0; pid < nb_ports; pid++) {
571 rte_eth_dev_info_get(pid, &port->dev_info);
574 if (port_numa[pid] != NUMA_NO_CONFIG)
575 port_per_socket[port_numa[pid]]++;
577 uint32_t socket_id = rte_eth_dev_socket_id(pid);
579 /* if socket_id is invalid, set to 0 */
580 if (check_socket_id(socket_id) < 0)
582 port_per_socket[socket_id]++;
586 /* set flag to initialize port/queue */
587 port->need_reconfig = 1;
588 port->need_reconfig_queues = 1;
593 unsigned int nb_mbuf;
595 if (param_total_num_mbufs)
596 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
598 for (i = 0; i < MAX_SOCKET; i++) {
599 nb_mbuf = (nb_mbuf_per_pool *
602 mbuf_pool_create(mbuf_data_size,
609 * Records which Mbuf pool to use by each logical core, if needed.
611 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
612 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
614 mbp = mbuf_pool_find(0);
615 fwd_lcores[lc_id]->mbp = mbp;
618 /* Configuration of packet forwarding streams. */
619 if (init_fwd_streams() < 0)
620 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
624 init_fwd_streams(void)
627 struct rte_port *port;
628 streamid_t sm_id, nb_fwd_streams_new;
630 /* set socket id according to numa or not */
631 for (pid = 0; pid < nb_ports; pid++) {
633 if (nb_rxq > port->dev_info.max_rx_queues) {
634 printf("Fail: nb_rxq(%d) is greater than "
635 "max_rx_queues(%d)\n", nb_rxq,
636 port->dev_info.max_rx_queues);
639 if (nb_txq > port->dev_info.max_tx_queues) {
640 printf("Fail: nb_txq(%d) is greater than "
641 "max_tx_queues(%d)\n", nb_txq,
642 port->dev_info.max_tx_queues);
646 if (port_numa[pid] != NUMA_NO_CONFIG)
647 port->socket_id = port_numa[pid];
649 port->socket_id = rte_eth_dev_socket_id(pid);
651 /* if socket_id is invalid, set to 0 */
652 if (check_socket_id(port->socket_id) < 0)
657 if (socket_num == UMA_NO_CONFIG)
660 port->socket_id = socket_num;
664 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
665 if (nb_fwd_streams_new == nb_fwd_streams)
668 if (fwd_streams != NULL) {
669 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
670 if (fwd_streams[sm_id] == NULL)
672 rte_free(fwd_streams[sm_id]);
673 fwd_streams[sm_id] = NULL;
675 rte_free(fwd_streams);
680 nb_fwd_streams = nb_fwd_streams_new;
681 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
682 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
683 if (fwd_streams == NULL)
684 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
685 "failed\n", nb_fwd_streams);
687 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
688 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
689 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
690 if (fwd_streams[sm_id] == NULL)
691 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
698 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
700 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
702 unsigned int total_burst;
703 unsigned int nb_burst;
704 unsigned int burst_stats[3];
705 uint16_t pktnb_stats[3];
707 int burst_percent[3];
710 * First compute the total number of packet bursts and the
711 * two highest numbers of bursts of the same number of packets.
714 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
715 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
716 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
717 nb_burst = pbs->pkt_burst_spread[nb_pkt];
720 total_burst += nb_burst;
721 if (nb_burst > burst_stats[0]) {
722 burst_stats[1] = burst_stats[0];
723 pktnb_stats[1] = pktnb_stats[0];
724 burst_stats[0] = nb_burst;
725 pktnb_stats[0] = nb_pkt;
728 if (total_burst == 0)
730 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
731 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
732 burst_percent[0], (int) pktnb_stats[0]);
733 if (burst_stats[0] == total_burst) {
737 if (burst_stats[0] + burst_stats[1] == total_burst) {
738 printf(" + %d%% of %d pkts]\n",
739 100 - burst_percent[0], pktnb_stats[1]);
742 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
743 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
744 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
745 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
748 printf(" + %d%% of %d pkts + %d%% of others]\n",
749 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
751 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
754 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
756 struct rte_port *port;
759 static const char *fwd_stats_border = "----------------------";
761 port = &ports[port_id];
762 printf("\n %s Forward statistics for port %-2d %s\n",
763 fwd_stats_border, port_id, fwd_stats_border);
765 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
766 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
768 stats->ipackets, stats->ierrors,
769 (uint64_t) (stats->ipackets + stats->ierrors));
771 if (cur_fwd_eng == &csum_fwd_engine)
772 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
773 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
775 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
777 stats->opackets, port->tx_dropped,
778 (uint64_t) (stats->opackets + port->tx_dropped));
780 if (stats->rx_nombuf > 0)
781 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
785 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
787 stats->ipackets, stats->ierrors,
788 (uint64_t) (stats->ipackets + stats->ierrors));
790 if (cur_fwd_eng == &csum_fwd_engine)
791 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
792 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
794 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
796 stats->opackets, port->tx_dropped,
797 (uint64_t) (stats->opackets + port->tx_dropped));
799 if (stats->rx_nombuf > 0)
800 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
803 /* Display statistics of XON/XOFF pause frames, if any. */
804 if ((stats->tx_pause_xon | stats->rx_pause_xon |
805 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
806 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
807 stats->rx_pause_xoff, stats->rx_pause_xon);
808 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
809 stats->tx_pause_xoff, stats->tx_pause_xon);
812 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
814 pkt_burst_stats_display("RX",
815 &port->rx_stream->rx_burst_stats);
817 pkt_burst_stats_display("TX",
818 &port->tx_stream->tx_burst_stats);
821 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
822 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
826 if (port->rx_queue_stats_mapping_enabled) {
828 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
829 printf(" Stats reg %2d RX-packets:%14"PRIu64
830 " RX-errors:%14"PRIu64
831 " RX-bytes:%14"PRIu64"\n",
832 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
836 if (port->tx_queue_stats_mapping_enabled) {
837 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
838 printf(" Stats reg %2d TX-packets:%14"PRIu64
839 " TX-bytes:%14"PRIu64"\n",
840 i, stats->q_opackets[i], stats->q_obytes[i]);
844 printf(" %s--------------------------------%s\n",
845 fwd_stats_border, fwd_stats_border);
849 fwd_stream_stats_display(streamid_t stream_id)
851 struct fwd_stream *fs;
852 static const char *fwd_top_stats_border = "-------";
854 fs = fwd_streams[stream_id];
855 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
856 (fs->fwd_dropped == 0))
858 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
859 "TX Port=%2d/Queue=%2d %s\n",
860 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
861 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
862 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
863 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
865 /* if checksum mode */
866 if (cur_fwd_eng == &csum_fwd_engine) {
867 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
868 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
871 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
872 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
873 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
878 flush_fwd_rx_queues(void)
880 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
888 for (j = 0; j < 2; j++) {
889 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
890 for (rxq = 0; rxq < nb_rxq; rxq++) {
891 port_id = fwd_ports_ids[rxp];
893 nb_rx = rte_eth_rx_burst(port_id, rxq,
894 pkts_burst, MAX_PKT_BURST);
895 for (i = 0; i < nb_rx; i++)
896 rte_pktmbuf_free(pkts_burst[i]);
900 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
905 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
907 struct fwd_stream **fsm;
911 fsm = &fwd_streams[fc->stream_idx];
912 nb_fs = fc->stream_nb;
914 for (sm_id = 0; sm_id < nb_fs; sm_id++)
915 (*pkt_fwd)(fsm[sm_id]);
916 } while (! fc->stopped);
920 start_pkt_forward_on_core(void *fwd_arg)
922 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
923 cur_fwd_config.fwd_eng->packet_fwd);
928 * Run the TXONLY packet forwarding engine to send a single burst of packets.
929 * Used to start communication flows in network loopback test configurations.
932 run_one_txonly_burst_on_core(void *fwd_arg)
934 struct fwd_lcore *fwd_lc;
935 struct fwd_lcore tmp_lcore;
937 fwd_lc = (struct fwd_lcore *) fwd_arg;
939 tmp_lcore.stopped = 1;
940 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
945 * Launch packet forwarding:
946 * - Setup per-port forwarding context.
947 * - launch logical cores with their forwarding configuration.
950 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
952 port_fwd_begin_t port_fwd_begin;
957 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
958 if (port_fwd_begin != NULL) {
959 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
960 (*port_fwd_begin)(fwd_ports_ids[i]);
962 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
963 lc_id = fwd_lcores_cpuids[i];
964 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
965 fwd_lcores[i]->stopped = 0;
966 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
967 fwd_lcores[i], lc_id);
969 printf("launch lcore %u failed - diag=%d\n",
976 * Launch packet forwarding configuration.
979 start_packet_forwarding(int with_tx_first)
981 port_fwd_begin_t port_fwd_begin;
982 port_fwd_end_t port_fwd_end;
983 struct rte_port *port;
988 if (all_ports_started() == 0) {
989 printf("Not all ports were started\n");
992 if (test_done == 0) {
993 printf("Packet forwarding already started\n");
997 for (i = 0; i < nb_fwd_ports; i++) {
998 pt_id = fwd_ports_ids[i];
999 port = &ports[pt_id];
1000 if (!port->dcb_flag) {
1001 printf("In DCB mode, all forwarding ports must "
1002 "be configured in this mode.\n");
1006 if (nb_fwd_lcores == 1) {
1007 printf("In DCB mode,the nb forwarding cores "
1008 "should be larger than 1.\n");
1015 flush_fwd_rx_queues();
1018 rxtx_config_display();
1020 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1021 pt_id = fwd_ports_ids[i];
1022 port = &ports[pt_id];
1023 rte_eth_stats_get(pt_id, &port->stats);
1024 port->tx_dropped = 0;
1026 map_port_queue_stats_mapping_registers(pt_id, port);
1028 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1029 fwd_streams[sm_id]->rx_packets = 0;
1030 fwd_streams[sm_id]->tx_packets = 0;
1031 fwd_streams[sm_id]->fwd_dropped = 0;
1032 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1033 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1035 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1036 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1037 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1038 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1039 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1041 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1042 fwd_streams[sm_id]->core_cycles = 0;
1045 if (with_tx_first) {
1046 port_fwd_begin = tx_only_engine.port_fwd_begin;
1047 if (port_fwd_begin != NULL) {
1048 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1049 (*port_fwd_begin)(fwd_ports_ids[i]);
1051 launch_packet_forwarding(run_one_txonly_burst_on_core);
1052 rte_eal_mp_wait_lcore();
1053 port_fwd_end = tx_only_engine.port_fwd_end;
1054 if (port_fwd_end != NULL) {
1055 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1056 (*port_fwd_end)(fwd_ports_ids[i]);
1059 launch_packet_forwarding(start_pkt_forward_on_core);
1063 stop_packet_forwarding(void)
1065 struct rte_eth_stats stats;
1066 struct rte_port *port;
1067 port_fwd_end_t port_fwd_end;
1072 uint64_t total_recv;
1073 uint64_t total_xmit;
1074 uint64_t total_rx_dropped;
1075 uint64_t total_tx_dropped;
1076 uint64_t total_rx_nombuf;
1077 uint64_t tx_dropped;
1078 uint64_t rx_bad_ip_csum;
1079 uint64_t rx_bad_l4_csum;
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081 uint64_t fwd_cycles;
1083 static const char *acc_stats_border = "+++++++++++++++";
1085 if (all_ports_started() == 0) {
1086 printf("Not all ports were started\n");
1090 printf("Packet forwarding not started\n");
1093 printf("Telling cores to stop...");
1094 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1095 fwd_lcores[lc_id]->stopped = 1;
1096 printf("\nWaiting for lcores to finish...\n");
1097 rte_eal_mp_wait_lcore();
1098 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1099 if (port_fwd_end != NULL) {
1100 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1101 pt_id = fwd_ports_ids[i];
1102 (*port_fwd_end)(pt_id);
1105 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1108 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1109 if (cur_fwd_config.nb_fwd_streams >
1110 cur_fwd_config.nb_fwd_ports) {
1111 fwd_stream_stats_display(sm_id);
1112 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1113 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1115 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1117 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1120 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1121 tx_dropped = (uint64_t) (tx_dropped +
1122 fwd_streams[sm_id]->fwd_dropped);
1123 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1126 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1127 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1128 fwd_streams[sm_id]->rx_bad_ip_csum);
1129 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1133 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1134 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1135 fwd_streams[sm_id]->rx_bad_l4_csum);
1136 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1139 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1140 fwd_cycles = (uint64_t) (fwd_cycles +
1141 fwd_streams[sm_id]->core_cycles);
1146 total_rx_dropped = 0;
1147 total_tx_dropped = 0;
1148 total_rx_nombuf = 0;
1149 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1150 pt_id = fwd_ports_ids[i];
1152 port = &ports[pt_id];
1153 rte_eth_stats_get(pt_id, &stats);
1154 stats.ipackets -= port->stats.ipackets;
1155 port->stats.ipackets = 0;
1156 stats.opackets -= port->stats.opackets;
1157 port->stats.opackets = 0;
1158 stats.ibytes -= port->stats.ibytes;
1159 port->stats.ibytes = 0;
1160 stats.obytes -= port->stats.obytes;
1161 port->stats.obytes = 0;
1162 stats.ierrors -= port->stats.ierrors;
1163 port->stats.ierrors = 0;
1164 stats.oerrors -= port->stats.oerrors;
1165 port->stats.oerrors = 0;
1166 stats.rx_nombuf -= port->stats.rx_nombuf;
1167 port->stats.rx_nombuf = 0;
1168 stats.fdirmatch -= port->stats.fdirmatch;
1169 port->stats.rx_nombuf = 0;
1170 stats.fdirmiss -= port->stats.fdirmiss;
1171 port->stats.rx_nombuf = 0;
1173 total_recv += stats.ipackets;
1174 total_xmit += stats.opackets;
1175 total_rx_dropped += stats.ierrors;
1176 total_tx_dropped += port->tx_dropped;
1177 total_rx_nombuf += stats.rx_nombuf;
1179 fwd_port_stats_display(pt_id, &stats);
1181 printf("\n %s Accumulated forward statistics for all ports"
1183 acc_stats_border, acc_stats_border);
1184 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1186 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1188 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1189 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1190 if (total_rx_nombuf > 0)
1191 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1192 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1194 acc_stats_border, acc_stats_border);
1195 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1197 printf("\n CPU cycles/packet=%u (total cycles="
1198 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1199 (unsigned int)(fwd_cycles / total_recv),
1200 fwd_cycles, total_recv);
1202 printf("\nDone.\n");
1207 all_ports_started(void)
1210 struct rte_port *port;
1212 for (pi = 0; pi < nb_ports; pi++) {
1214 /* Check if there is a port which is not started */
1215 if (port->port_status != RTE_PORT_STARTED)
1219 /* No port is not started */
1224 start_port(portid_t pid)
1226 int diag, need_check_link_status = 0;
1229 struct rte_port *port;
1232 if (test_done == 0) {
1233 printf("Please stop forwarding first\n");
1237 if (init_fwd_streams() < 0) {
1238 printf("Fail from init_fwd_streams()\n");
1244 for (pi = 0; pi < nb_ports; pi++) {
1245 if (pid < nb_ports && pid != pi)
1249 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1250 RTE_PORT_HANDLING) == 0) {
1251 printf("Port %d is now not stopped\n", pi);
1255 if (port->need_reconfig > 0) {
1256 port->need_reconfig = 0;
1258 printf("Configuring Port %d (socket %u)\n", pi,
1260 /* configure port */
1261 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1264 if (rte_atomic16_cmpset(&(port->port_status),
1265 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1266 printf("Port %d can not be set back "
1267 "to stopped\n", pi);
1268 printf("Fail to configure port %d\n", pi);
1269 /* try to reconfigure port next time */
1270 port->need_reconfig = 1;
1274 if (port->need_reconfig_queues > 0) {
1275 port->need_reconfig_queues = 0;
1276 /* setup tx queues */
1277 for (qi = 0; qi < nb_txq; qi++) {
1278 if ((numa_support) &&
1279 (txring_numa[pi] != NUMA_NO_CONFIG))
1280 diag = rte_eth_tx_queue_setup(pi, qi,
1281 nb_txd,txring_numa[pi],
1284 diag = rte_eth_tx_queue_setup(pi, qi,
1285 nb_txd,port->socket_id,
1291 /* Fail to setup tx queue, return */
1292 if (rte_atomic16_cmpset(&(port->port_status),
1294 RTE_PORT_STOPPED) == 0)
1295 printf("Port %d can not be set back "
1296 "to stopped\n", pi);
1297 printf("Fail to configure port %d tx queues\n", pi);
1298 /* try to reconfigure queues next time */
1299 port->need_reconfig_queues = 1;
1302 /* setup rx queues */
1303 for (qi = 0; qi < nb_rxq; qi++) {
1304 if ((numa_support) &&
1305 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1306 struct rte_mempool * mp =
1307 mbuf_pool_find(rxring_numa[pi]);
1309 printf("Failed to setup RX queue:"
1310 "No mempool allocation"
1311 "on the socket %d\n",
1316 diag = rte_eth_rx_queue_setup(pi, qi,
1317 nb_rxd,rxring_numa[pi],
1318 &(port->rx_conf),mp);
1321 diag = rte_eth_rx_queue_setup(pi, qi,
1322 nb_rxd,port->socket_id,
1324 mbuf_pool_find(port->socket_id));
1330 /* Fail to setup rx queue, return */
1331 if (rte_atomic16_cmpset(&(port->port_status),
1333 RTE_PORT_STOPPED) == 0)
1334 printf("Port %d can not be set back "
1335 "to stopped\n", pi);
1336 printf("Fail to configure port %d rx queues\n", pi);
1337 /* try to reconfigure queues next time */
1338 port->need_reconfig_queues = 1;
1343 if (rte_eth_dev_start(pi) < 0) {
1344 printf("Fail to start port %d\n", pi);
1346 /* Fail to setup rx queue, return */
1347 if (rte_atomic16_cmpset(&(port->port_status),
1348 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1349 printf("Port %d can not be set back to "
1354 if (rte_atomic16_cmpset(&(port->port_status),
1355 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1356 printf("Port %d can not be set into started\n", pi);
1358 mac_addr = port->eth_addr.addr_bytes;
1359 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1360 mac_addr[0], mac_addr[1], mac_addr[2],
1361 mac_addr[3], mac_addr[4], mac_addr[5]);
1363 /* at least one port started, need checking link status */
1364 need_check_link_status = 1;
1367 if (need_check_link_status)
1368 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1370 printf("Please stop the ports first\n");
1377 stop_port(portid_t pid)
1380 struct rte_port *port;
1381 int need_check_link_status = 0;
1383 if (test_done == 0) {
1384 printf("Please stop forwarding first\n");
1391 printf("Stopping ports...\n");
1393 for (pi = 0; pi < nb_ports; pi++) {
1394 if (pid < nb_ports && pid != pi)
1398 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1399 RTE_PORT_HANDLING) == 0)
1402 rte_eth_dev_stop(pi);
1404 if (rte_atomic16_cmpset(&(port->port_status),
1405 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1406 printf("Port %d can not be set into stopped\n", pi);
1407 need_check_link_status = 1;
1409 if (need_check_link_status)
1410 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1416 close_port(portid_t pid)
1419 struct rte_port *port;
1421 if (test_done == 0) {
1422 printf("Please stop forwarding first\n");
1426 printf("Closing ports...\n");
1428 for (pi = 0; pi < nb_ports; pi++) {
1429 if (pid < nb_ports && pid != pi)
1433 if (rte_atomic16_cmpset(&(port->port_status),
1434 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1435 printf("Port %d is now not stopped\n", pi);
1439 rte_eth_dev_close(pi);
1441 if (rte_atomic16_cmpset(&(port->port_status),
1442 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1443 printf("Port %d can not be set into stopped\n", pi);
1450 all_ports_stopped(void)
1453 struct rte_port *port;
1455 for (pi = 0; pi < nb_ports; pi++) {
1457 if (port->port_status != RTE_PORT_STOPPED)
1469 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1470 printf("Stopping port %d...", pt_id);
1472 rte_eth_dev_close(pt_id);
1478 typedef void (*cmd_func_t)(void);
1479 struct pmd_test_command {
1480 const char *cmd_name;
1481 cmd_func_t cmd_func;
1484 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1486 /* Check the link status of all ports in up to 9s, and print them finally */
1488 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1490 #define CHECK_INTERVAL 100 /* 100ms */
1491 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1492 uint8_t portid, count, all_ports_up, print_flag = 0;
1493 struct rte_eth_link link;
1495 printf("Checking link statuses...\n");
1497 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1499 for (portid = 0; portid < port_num; portid++) {
1500 if ((port_mask & (1 << portid)) == 0)
1502 memset(&link, 0, sizeof(link));
1503 rte_eth_link_get_nowait(portid, &link);
1504 /* print link status if flag set */
1505 if (print_flag == 1) {
1506 if (link.link_status)
1507 printf("Port %d Link Up - speed %u "
1508 "Mbps - %s\n", (uint8_t)portid,
1509 (unsigned)link.link_speed,
1510 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1511 ("full-duplex") : ("half-duplex\n"));
1513 printf("Port %d Link Down\n",
1517 /* clear all_ports_up flag if any link down */
1518 if (link.link_status == 0) {
1523 /* after finally printing all link status, get out */
1524 if (print_flag == 1)
1527 if (all_ports_up == 0) {
1529 rte_delay_ms(CHECK_INTERVAL);
1532 /* set the print_flag if all ports up or timeout */
1533 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1540 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1544 uint8_t mapping_found = 0;
1546 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1547 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1548 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1549 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1550 tx_queue_stats_mappings[i].queue_id,
1551 tx_queue_stats_mappings[i].stats_counter_id);
1558 port->tx_queue_stats_mapping_enabled = 1;
1563 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1567 uint8_t mapping_found = 0;
1569 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1570 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1571 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1572 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1573 rx_queue_stats_mappings[i].queue_id,
1574 rx_queue_stats_mappings[i].stats_counter_id);
1581 port->rx_queue_stats_mapping_enabled = 1;
1586 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1590 diag = set_tx_queue_stats_mapping_registers(pi, port);
1592 if (diag == -ENOTSUP) {
1593 port->tx_queue_stats_mapping_enabled = 0;
1594 printf("TX queue stats mapping not supported port id=%d\n", pi);
1597 rte_exit(EXIT_FAILURE,
1598 "set_tx_queue_stats_mapping_registers "
1599 "failed for port id=%d diag=%d\n",
1603 diag = set_rx_queue_stats_mapping_registers(pi, port);
1605 if (diag == -ENOTSUP) {
1606 port->rx_queue_stats_mapping_enabled = 0;
1607 printf("RX queue stats mapping not supported port id=%d\n", pi);
1610 rte_exit(EXIT_FAILURE,
1611 "set_rx_queue_stats_mapping_registers "
1612 "failed for port id=%d diag=%d\n",
1618 init_port_config(void)
1621 struct rte_port *port;
1623 for (pid = 0; pid < nb_ports; pid++) {
1625 port->dev_conf.rxmode = rx_mode;
1626 port->dev_conf.fdir_conf = fdir_conf;
1628 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1629 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1631 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1632 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1635 /* In SR-IOV mode, RSS mode is not available */
1636 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1637 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1638 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1640 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1643 port->rx_conf.rx_thresh = rx_thresh;
1644 port->rx_conf.rx_free_thresh = rx_free_thresh;
1645 port->rx_conf.rx_drop_en = rx_drop_en;
1646 port->tx_conf.tx_thresh = tx_thresh;
1647 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1648 port->tx_conf.tx_free_thresh = tx_free_thresh;
1649 port->tx_conf.txq_flags = txq_flags;
1651 rte_eth_macaddr_get(pid, &port->eth_addr);
1653 map_port_queue_stats_mapping_registers(pid, port);
1654 #ifdef RTE_NIC_BYPASS
1655 rte_eth_dev_bypass_init(pid);
1660 const uint16_t vlan_tags[] = {
1661 0, 1, 2, 3, 4, 5, 6, 7,
1662 8, 9, 10, 11, 12, 13, 14, 15,
1663 16, 17, 18, 19, 20, 21, 22, 23,
1664 24, 25, 26, 27, 28, 29, 30, 31
1668 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1673 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1674 * given above, and the number of traffic classes available for use.
1676 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1677 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1678 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1680 /* VMDQ+DCB RX and TX configrations */
1681 vmdq_rx_conf.enable_default_pool = 0;
1682 vmdq_rx_conf.default_pool = 0;
1683 vmdq_rx_conf.nb_queue_pools =
1684 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1685 vmdq_tx_conf.nb_queue_pools =
1686 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1688 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1689 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1690 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1691 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1693 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1694 vmdq_rx_conf.dcb_queue[i] = i;
1695 vmdq_tx_conf.dcb_queue[i] = i;
1698 /*set DCB mode of RX and TX of multiple queues*/
1699 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1700 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1701 if (dcb_conf->pfc_en)
1702 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1704 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1706 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1707 sizeof(struct rte_eth_vmdq_dcb_conf)));
1708 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1709 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1712 struct rte_eth_dcb_rx_conf rx_conf;
1713 struct rte_eth_dcb_tx_conf tx_conf;
1715 /* queue mapping configuration of DCB RX and TX */
1716 if (dcb_conf->num_tcs == ETH_4_TCS)
1717 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1719 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1721 rx_conf.nb_tcs = dcb_conf->num_tcs;
1722 tx_conf.nb_tcs = dcb_conf->num_tcs;
1724 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1725 rx_conf.dcb_queue[i] = i;
1726 tx_conf.dcb_queue[i] = i;
1728 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1729 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1730 if (dcb_conf->pfc_en)
1731 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1733 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1735 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1736 sizeof(struct rte_eth_dcb_rx_conf)));
1737 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1738 sizeof(struct rte_eth_dcb_tx_conf)));
1745 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1747 struct rte_eth_conf port_conf;
1748 struct rte_port *rte_port;
1753 /* rxq and txq configuration in dcb mode */
1756 rx_free_thresh = 64;
1758 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1759 /* Enter DCB configuration status */
1762 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1763 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1764 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1768 rte_port = &ports[pid];
1769 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1771 rte_port->rx_conf.rx_thresh = rx_thresh;
1772 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1773 rte_port->tx_conf.tx_thresh = tx_thresh;
1774 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1775 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1777 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1778 for (i = 0; i < nb_vlan; i++){
1779 rx_vft_set(pid, vlan_tags[i], 1);
1782 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1783 map_port_queue_stats_mapping_registers(pid, rte_port);
1785 rte_port->dcb_flag = 1;
1790 #ifdef RTE_EXEC_ENV_BAREMETAL
1795 main(int argc, char** argv)
1800 diag = rte_eal_init(argc, argv);
1802 rte_panic("Cannot init EAL\n");
1804 if (rte_pmd_init_all())
1805 rte_panic("Cannot init PMD\n");
1807 if (rte_eal_pci_probe())
1808 rte_panic("Cannot probe PCI\n");
1810 nb_ports = (portid_t) rte_eth_dev_count();
1812 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1814 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1815 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1816 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1817 "configuration file\n");
1819 set_def_fwd_config();
1821 rte_panic("Empty set of forwarding logical cores - check the "
1822 "core mask supplied in the command parameters\n");
1827 launch_args_parse(argc, argv);
1829 if (nb_rxq > nb_txq)
1830 printf("Warning: nb_rxq=%d enables RSS configuration, "
1831 "but nb_txq=%d will prevent to fully test it.\n",
1835 if (start_port(RTE_PORT_ALL) != 0)
1836 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1838 /* set all ports to promiscuous mode by default */
1839 for (port_id = 0; port_id < nb_ports; port_id++)
1840 rte_eth_promiscuous_enable(port_id);
1842 #ifdef RTE_LIBRTE_CMDLINE
1843 if (interactive == 1) {
1845 printf("Start automatic packet forwarding\n");
1846 start_packet_forwarding(0);
1855 printf("No commandline core given, start packet forwarding\n");
1856 start_packet_forwarding(0);
1857 printf("Press enter to exit\n");
1858 rc = read(0, &c, 1);