4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
289 .flexbytes_offset = 0x6,
293 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
296 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
299 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301 uint16_t nb_tx_queue_stats_mappings = 0;
302 uint16_t nb_rx_queue_stats_mappings = 0;
304 /* Forward function declarations */
305 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
306 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
309 * Check if all the ports are started.
310 * If yes, return positive value. If not, return zero.
312 static int all_ports_started(void);
315 * Setup default configuration.
318 set_default_fwd_lcores_config(void)
324 for (i = 0; i < RTE_MAX_LCORE; i++) {
325 if (! rte_lcore_is_enabled(i))
327 if (i == rte_get_master_lcore())
329 fwd_lcores_cpuids[nb_lc++] = i;
331 nb_lcores = (lcoreid_t) nb_lc;
332 nb_cfg_lcores = nb_lcores;
337 set_def_peer_eth_addrs(void)
341 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
342 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
343 peer_eth_addrs[i].addr_bytes[5] = i;
348 set_default_fwd_ports_config(void)
352 for (pt_id = 0; pt_id < nb_ports; pt_id++)
353 fwd_ports_ids[pt_id] = pt_id;
355 nb_cfg_ports = nb_ports;
356 nb_fwd_ports = nb_ports;
360 set_def_fwd_config(void)
362 set_default_fwd_lcores_config();
363 set_def_peer_eth_addrs();
364 set_default_fwd_ports_config();
368 * Configuration initialisation done once at init time.
370 struct mbuf_ctor_arg {
371 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
372 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
375 struct mbuf_pool_ctor_arg {
376 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
380 testpmd_mbuf_ctor(struct rte_mempool *mp,
383 __attribute__((unused)) unsigned i)
385 struct mbuf_ctor_arg *mb_ctor_arg;
388 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
389 mb = (struct rte_mbuf *) raw_mbuf;
392 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
393 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
394 mb_ctor_arg->seg_buf_offset);
395 mb->buf_len = mb_ctor_arg->seg_buf_size;
397 mb->data_off = RTE_PKTMBUF_HEADROOM;
405 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
408 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
409 struct rte_pktmbuf_pool_private *mbp_priv;
411 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
412 printf("%s(%s) private_data_size %d < %d\n",
413 __func__, mp->name, (int) mp->private_data_size,
414 (int) sizeof(struct rte_pktmbuf_pool_private));
417 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
418 mbp_priv = rte_mempool_get_priv(mp);
419 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
423 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
424 unsigned int socket_id)
426 char pool_name[RTE_MEMPOOL_NAMESIZE];
427 struct rte_mempool *rte_mp;
428 struct mbuf_pool_ctor_arg mbp_ctor_arg;
429 struct mbuf_ctor_arg mb_ctor_arg;
432 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
434 mb_ctor_arg.seg_buf_offset =
435 (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
436 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
437 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
438 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
440 #ifdef RTE_LIBRTE_PMD_XENVIRT
441 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
442 (unsigned) mb_mempool_cache,
443 sizeof(struct rte_pktmbuf_pool_private),
444 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
445 testpmd_mbuf_ctor, &mb_ctor_arg,
452 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
453 (unsigned) mb_mempool_cache,
454 sizeof(struct rte_pktmbuf_pool_private),
455 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
456 testpmd_mbuf_ctor, &mb_ctor_arg,
459 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
460 (unsigned) mb_mempool_cache,
461 sizeof(struct rte_pktmbuf_pool_private),
462 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
463 testpmd_mbuf_ctor, &mb_ctor_arg,
468 if (rte_mp == NULL) {
469 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
470 "failed\n", socket_id);
471 } else if (verbose_level > 0) {
472 rte_mempool_dump(stdout, rte_mp);
477 * Check given socket id is valid or not with NUMA mode,
478 * if valid, return 0, else return -1
481 check_socket_id(const unsigned int socket_id)
483 static int warning_once = 0;
485 if (socket_id >= MAX_SOCKET) {
486 if (!warning_once && numa_support)
487 printf("Warning: NUMA should be configured manually by"
488 " using --port-numa-config and"
489 " --ring-numa-config parameters along with"
501 struct rte_port *port;
502 struct rte_mempool *mbp;
503 unsigned int nb_mbuf_per_pool;
505 uint8_t port_per_socket[MAX_SOCKET];
507 memset(port_per_socket,0,MAX_SOCKET);
508 /* Configuration of logical cores. */
509 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
510 sizeof(struct fwd_lcore *) * nb_lcores,
511 RTE_CACHE_LINE_SIZE);
512 if (fwd_lcores == NULL) {
513 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
514 "failed\n", nb_lcores);
516 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
517 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
518 sizeof(struct fwd_lcore),
519 RTE_CACHE_LINE_SIZE);
520 if (fwd_lcores[lc_id] == NULL) {
521 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
524 fwd_lcores[lc_id]->cpuid_idx = lc_id;
528 * Create pools of mbuf.
529 * If NUMA support is disabled, create a single pool of mbuf in
530 * socket 0 memory by default.
531 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
533 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
534 * nb_txd can be configured at run time.
536 if (param_total_num_mbufs)
537 nb_mbuf_per_pool = param_total_num_mbufs;
539 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
540 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
543 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
547 if (socket_num == UMA_NO_CONFIG)
548 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
550 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
554 /* Configuration of Ethernet ports. */
555 ports = rte_zmalloc("testpmd: ports",
556 sizeof(struct rte_port) * nb_ports,
557 RTE_CACHE_LINE_SIZE);
559 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
560 "failed\n", nb_ports);
563 for (pid = 0; pid < nb_ports; pid++) {
565 rte_eth_dev_info_get(pid, &port->dev_info);
568 if (port_numa[pid] != NUMA_NO_CONFIG)
569 port_per_socket[port_numa[pid]]++;
571 uint32_t socket_id = rte_eth_dev_socket_id(pid);
573 /* if socket_id is invalid, set to 0 */
574 if (check_socket_id(socket_id) < 0)
576 port_per_socket[socket_id]++;
580 /* set flag to initialize port/queue */
581 port->need_reconfig = 1;
582 port->need_reconfig_queues = 1;
587 unsigned int nb_mbuf;
589 if (param_total_num_mbufs)
590 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
592 for (i = 0; i < MAX_SOCKET; i++) {
593 nb_mbuf = (nb_mbuf_per_pool *
596 mbuf_pool_create(mbuf_data_size,
603 * Records which Mbuf pool to use by each logical core, if needed.
605 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
606 mbp = mbuf_pool_find(
607 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
610 mbp = mbuf_pool_find(0);
611 fwd_lcores[lc_id]->mbp = mbp;
614 /* Configuration of packet forwarding streams. */
615 if (init_fwd_streams() < 0)
616 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
621 reconfig(portid_t new_port_id, unsigned socket_id)
623 struct rte_port *port;
625 /* Reconfiguration of Ethernet ports. */
626 ports = rte_realloc(ports,
627 sizeof(struct rte_port) * nb_ports,
628 RTE_CACHE_LINE_SIZE);
630 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
634 port = &ports[new_port_id];
635 rte_eth_dev_info_get(new_port_id, &port->dev_info);
637 /* set flag to initialize port/queue */
638 port->need_reconfig = 1;
639 port->need_reconfig_queues = 1;
640 port->socket_id = socket_id;
647 init_fwd_streams(void)
650 struct rte_port *port;
651 streamid_t sm_id, nb_fwd_streams_new;
653 /* set socket id according to numa or not */
654 for (pid = 0; pid < nb_ports; pid++) {
656 if (nb_rxq > port->dev_info.max_rx_queues) {
657 printf("Fail: nb_rxq(%d) is greater than "
658 "max_rx_queues(%d)\n", nb_rxq,
659 port->dev_info.max_rx_queues);
662 if (nb_txq > port->dev_info.max_tx_queues) {
663 printf("Fail: nb_txq(%d) is greater than "
664 "max_tx_queues(%d)\n", nb_txq,
665 port->dev_info.max_tx_queues);
669 if (port_numa[pid] != NUMA_NO_CONFIG)
670 port->socket_id = port_numa[pid];
672 port->socket_id = rte_eth_dev_socket_id(pid);
674 /* if socket_id is invalid, set to 0 */
675 if (check_socket_id(port->socket_id) < 0)
680 if (socket_num == UMA_NO_CONFIG)
683 port->socket_id = socket_num;
687 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
688 if (nb_fwd_streams_new == nb_fwd_streams)
691 if (fwd_streams != NULL) {
692 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693 if (fwd_streams[sm_id] == NULL)
695 rte_free(fwd_streams[sm_id]);
696 fwd_streams[sm_id] = NULL;
698 rte_free(fwd_streams);
703 nb_fwd_streams = nb_fwd_streams_new;
704 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
705 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
706 if (fwd_streams == NULL)
707 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
708 "failed\n", nb_fwd_streams);
710 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
711 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
712 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
713 if (fwd_streams[sm_id] == NULL)
714 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
721 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
723 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
725 unsigned int total_burst;
726 unsigned int nb_burst;
727 unsigned int burst_stats[3];
728 uint16_t pktnb_stats[3];
730 int burst_percent[3];
733 * First compute the total number of packet bursts and the
734 * two highest numbers of bursts of the same number of packets.
737 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
738 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
739 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
740 nb_burst = pbs->pkt_burst_spread[nb_pkt];
743 total_burst += nb_burst;
744 if (nb_burst > burst_stats[0]) {
745 burst_stats[1] = burst_stats[0];
746 pktnb_stats[1] = pktnb_stats[0];
747 burst_stats[0] = nb_burst;
748 pktnb_stats[0] = nb_pkt;
751 if (total_burst == 0)
753 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
754 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
755 burst_percent[0], (int) pktnb_stats[0]);
756 if (burst_stats[0] == total_burst) {
760 if (burst_stats[0] + burst_stats[1] == total_burst) {
761 printf(" + %d%% of %d pkts]\n",
762 100 - burst_percent[0], pktnb_stats[1]);
765 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
766 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
767 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
768 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
771 printf(" + %d%% of %d pkts + %d%% of others]\n",
772 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
774 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
777 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
779 struct rte_port *port;
782 static const char *fwd_stats_border = "----------------------";
784 port = &ports[port_id];
785 printf("\n %s Forward statistics for port %-2d %s\n",
786 fwd_stats_border, port_id, fwd_stats_border);
788 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
789 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
791 stats->ipackets, stats->imissed,
792 (uint64_t) (stats->ipackets + stats->imissed));
794 if (cur_fwd_eng == &csum_fwd_engine)
795 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
796 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
797 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
798 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
799 "RX-error: %-"PRIu64"\n",
800 stats->ibadcrc, stats->ibadlen, stats->ierrors);
801 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
804 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
806 stats->opackets, port->tx_dropped,
807 (uint64_t) (stats->opackets + port->tx_dropped));
810 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
812 stats->ipackets, stats->imissed,
813 (uint64_t) (stats->ipackets + stats->imissed));
815 if (cur_fwd_eng == &csum_fwd_engine)
816 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
817 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
818 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
819 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
820 " RX-error:%"PRIu64"\n",
821 stats->ibadcrc, stats->ibadlen, stats->ierrors);
822 printf(" RX-nombufs: %14"PRIu64"\n",
826 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
828 stats->opackets, port->tx_dropped,
829 (uint64_t) (stats->opackets + port->tx_dropped));
832 /* Display statistics of XON/XOFF pause frames, if any. */
833 if ((stats->tx_pause_xon | stats->rx_pause_xon |
834 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
835 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
836 stats->rx_pause_xoff, stats->rx_pause_xon);
837 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
838 stats->tx_pause_xoff, stats->tx_pause_xon);
841 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
843 pkt_burst_stats_display("RX",
844 &port->rx_stream->rx_burst_stats);
846 pkt_burst_stats_display("TX",
847 &port->tx_stream->tx_burst_stats);
850 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
851 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
855 if (port->rx_queue_stats_mapping_enabled) {
857 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
858 printf(" Stats reg %2d RX-packets:%14"PRIu64
859 " RX-errors:%14"PRIu64
860 " RX-bytes:%14"PRIu64"\n",
861 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
865 if (port->tx_queue_stats_mapping_enabled) {
866 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
867 printf(" Stats reg %2d TX-packets:%14"PRIu64
868 " TX-bytes:%14"PRIu64"\n",
869 i, stats->q_opackets[i], stats->q_obytes[i]);
873 printf(" %s--------------------------------%s\n",
874 fwd_stats_border, fwd_stats_border);
878 fwd_stream_stats_display(streamid_t stream_id)
880 struct fwd_stream *fs;
881 static const char *fwd_top_stats_border = "-------";
883 fs = fwd_streams[stream_id];
884 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
885 (fs->fwd_dropped == 0))
887 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
888 "TX Port=%2d/Queue=%2d %s\n",
889 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
890 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
891 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
892 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
894 /* if checksum mode */
895 if (cur_fwd_eng == &csum_fwd_engine) {
896 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
897 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
900 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
901 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
902 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
907 flush_fwd_rx_queues(void)
909 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
917 for (j = 0; j < 2; j++) {
918 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
919 for (rxq = 0; rxq < nb_rxq; rxq++) {
920 port_id = fwd_ports_ids[rxp];
922 nb_rx = rte_eth_rx_burst(port_id, rxq,
923 pkts_burst, MAX_PKT_BURST);
924 for (i = 0; i < nb_rx; i++)
925 rte_pktmbuf_free(pkts_burst[i]);
929 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
934 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
936 struct fwd_stream **fsm;
940 fsm = &fwd_streams[fc->stream_idx];
941 nb_fs = fc->stream_nb;
943 for (sm_id = 0; sm_id < nb_fs; sm_id++)
944 (*pkt_fwd)(fsm[sm_id]);
945 } while (! fc->stopped);
949 start_pkt_forward_on_core(void *fwd_arg)
951 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
952 cur_fwd_config.fwd_eng->packet_fwd);
957 * Run the TXONLY packet forwarding engine to send a single burst of packets.
958 * Used to start communication flows in network loopback test configurations.
961 run_one_txonly_burst_on_core(void *fwd_arg)
963 struct fwd_lcore *fwd_lc;
964 struct fwd_lcore tmp_lcore;
966 fwd_lc = (struct fwd_lcore *) fwd_arg;
968 tmp_lcore.stopped = 1;
969 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
974 * Launch packet forwarding:
975 * - Setup per-port forwarding context.
976 * - launch logical cores with their forwarding configuration.
979 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
981 port_fwd_begin_t port_fwd_begin;
986 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
987 if (port_fwd_begin != NULL) {
988 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
989 (*port_fwd_begin)(fwd_ports_ids[i]);
991 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
992 lc_id = fwd_lcores_cpuids[i];
993 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
994 fwd_lcores[i]->stopped = 0;
995 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
996 fwd_lcores[i], lc_id);
998 printf("launch lcore %u failed - diag=%d\n",
1005 * Launch packet forwarding configuration.
1008 start_packet_forwarding(int with_tx_first)
1010 port_fwd_begin_t port_fwd_begin;
1011 port_fwd_end_t port_fwd_end;
1012 struct rte_port *port;
1017 if (all_ports_started() == 0) {
1018 printf("Not all ports were started\n");
1021 if (test_done == 0) {
1022 printf("Packet forwarding already started\n");
1026 for (i = 0; i < nb_fwd_ports; i++) {
1027 pt_id = fwd_ports_ids[i];
1028 port = &ports[pt_id];
1029 if (!port->dcb_flag) {
1030 printf("In DCB mode, all forwarding ports must "
1031 "be configured in this mode.\n");
1035 if (nb_fwd_lcores == 1) {
1036 printf("In DCB mode,the nb forwarding cores "
1037 "should be larger than 1.\n");
1044 flush_fwd_rx_queues();
1047 rxtx_config_display();
1049 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1050 pt_id = fwd_ports_ids[i];
1051 port = &ports[pt_id];
1052 rte_eth_stats_get(pt_id, &port->stats);
1053 port->tx_dropped = 0;
1055 map_port_queue_stats_mapping_registers(pt_id, port);
1057 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1058 fwd_streams[sm_id]->rx_packets = 0;
1059 fwd_streams[sm_id]->tx_packets = 0;
1060 fwd_streams[sm_id]->fwd_dropped = 0;
1061 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1062 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1064 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1065 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1066 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1067 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1068 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1070 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1071 fwd_streams[sm_id]->core_cycles = 0;
1074 if (with_tx_first) {
1075 port_fwd_begin = tx_only_engine.port_fwd_begin;
1076 if (port_fwd_begin != NULL) {
1077 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1078 (*port_fwd_begin)(fwd_ports_ids[i]);
1080 launch_packet_forwarding(run_one_txonly_burst_on_core);
1081 rte_eal_mp_wait_lcore();
1082 port_fwd_end = tx_only_engine.port_fwd_end;
1083 if (port_fwd_end != NULL) {
1084 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1085 (*port_fwd_end)(fwd_ports_ids[i]);
1088 launch_packet_forwarding(start_pkt_forward_on_core);
1092 stop_packet_forwarding(void)
1094 struct rte_eth_stats stats;
1095 struct rte_port *port;
1096 port_fwd_end_t port_fwd_end;
1101 uint64_t total_recv;
1102 uint64_t total_xmit;
1103 uint64_t total_rx_dropped;
1104 uint64_t total_tx_dropped;
1105 uint64_t total_rx_nombuf;
1106 uint64_t tx_dropped;
1107 uint64_t rx_bad_ip_csum;
1108 uint64_t rx_bad_l4_csum;
1109 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1110 uint64_t fwd_cycles;
1112 static const char *acc_stats_border = "+++++++++++++++";
1114 if (all_ports_started() == 0) {
1115 printf("Not all ports were started\n");
1119 printf("Packet forwarding not started\n");
1122 printf("Telling cores to stop...");
1123 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1124 fwd_lcores[lc_id]->stopped = 1;
1125 printf("\nWaiting for lcores to finish...\n");
1126 rte_eal_mp_wait_lcore();
1127 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1128 if (port_fwd_end != NULL) {
1129 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1130 pt_id = fwd_ports_ids[i];
1131 (*port_fwd_end)(pt_id);
1134 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1137 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1138 if (cur_fwd_config.nb_fwd_streams >
1139 cur_fwd_config.nb_fwd_ports) {
1140 fwd_stream_stats_display(sm_id);
1141 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1142 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1144 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1146 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1149 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1150 tx_dropped = (uint64_t) (tx_dropped +
1151 fwd_streams[sm_id]->fwd_dropped);
1152 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1155 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1156 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1157 fwd_streams[sm_id]->rx_bad_ip_csum);
1158 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1162 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1163 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1164 fwd_streams[sm_id]->rx_bad_l4_csum);
1165 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1168 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1169 fwd_cycles = (uint64_t) (fwd_cycles +
1170 fwd_streams[sm_id]->core_cycles);
1175 total_rx_dropped = 0;
1176 total_tx_dropped = 0;
1177 total_rx_nombuf = 0;
1178 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1179 pt_id = fwd_ports_ids[i];
1181 port = &ports[pt_id];
1182 rte_eth_stats_get(pt_id, &stats);
1183 stats.ipackets -= port->stats.ipackets;
1184 port->stats.ipackets = 0;
1185 stats.opackets -= port->stats.opackets;
1186 port->stats.opackets = 0;
1187 stats.ibytes -= port->stats.ibytes;
1188 port->stats.ibytes = 0;
1189 stats.obytes -= port->stats.obytes;
1190 port->stats.obytes = 0;
1191 stats.imissed -= port->stats.imissed;
1192 port->stats.imissed = 0;
1193 stats.oerrors -= port->stats.oerrors;
1194 port->stats.oerrors = 0;
1195 stats.rx_nombuf -= port->stats.rx_nombuf;
1196 port->stats.rx_nombuf = 0;
1197 stats.fdirmatch -= port->stats.fdirmatch;
1198 port->stats.rx_nombuf = 0;
1199 stats.fdirmiss -= port->stats.fdirmiss;
1200 port->stats.rx_nombuf = 0;
1202 total_recv += stats.ipackets;
1203 total_xmit += stats.opackets;
1204 total_rx_dropped += stats.imissed;
1205 total_tx_dropped += port->tx_dropped;
1206 total_rx_nombuf += stats.rx_nombuf;
1208 fwd_port_stats_display(pt_id, &stats);
1210 printf("\n %s Accumulated forward statistics for all ports"
1212 acc_stats_border, acc_stats_border);
1213 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1215 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1217 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1218 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1219 if (total_rx_nombuf > 0)
1220 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1221 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1223 acc_stats_border, acc_stats_border);
1224 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1226 printf("\n CPU cycles/packet=%u (total cycles="
1227 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1228 (unsigned int)(fwd_cycles / total_recv),
1229 fwd_cycles, total_recv);
1231 printf("\nDone.\n");
1236 dev_set_link_up(portid_t pid)
1238 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1239 printf("\nSet link up fail.\n");
1243 dev_set_link_down(portid_t pid)
1245 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1246 printf("\nSet link down fail.\n");
1250 all_ports_started(void)
1253 struct rte_port *port;
1255 for (pi = 0; pi < nb_ports; pi++) {
1257 /* Check if there is a port which is not started */
1258 if (port->port_status != RTE_PORT_STARTED)
1262 /* No port is not started */
1267 start_port(portid_t pid)
1269 int diag, need_check_link_status = 0;
1272 struct rte_port *port;
1273 struct ether_addr mac_addr;
1275 if (test_done == 0) {
1276 printf("Please stop forwarding first\n");
1280 if (init_fwd_streams() < 0) {
1281 printf("Fail from init_fwd_streams()\n");
1287 for (pi = 0; pi < nb_ports; pi++) {
1288 if (pid < nb_ports && pid != pi)
1292 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1293 RTE_PORT_HANDLING) == 0) {
1294 printf("Port %d is now not stopped\n", pi);
1298 if (port->need_reconfig > 0) {
1299 port->need_reconfig = 0;
1301 printf("Configuring Port %d (socket %u)\n", pi,
1303 /* configure port */
1304 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1307 if (rte_atomic16_cmpset(&(port->port_status),
1308 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1309 printf("Port %d can not be set back "
1310 "to stopped\n", pi);
1311 printf("Fail to configure port %d\n", pi);
1312 /* try to reconfigure port next time */
1313 port->need_reconfig = 1;
1317 if (port->need_reconfig_queues > 0) {
1318 port->need_reconfig_queues = 0;
1319 /* setup tx queues */
1320 for (qi = 0; qi < nb_txq; qi++) {
1321 if ((numa_support) &&
1322 (txring_numa[pi] != NUMA_NO_CONFIG))
1323 diag = rte_eth_tx_queue_setup(pi, qi,
1324 nb_txd,txring_numa[pi],
1327 diag = rte_eth_tx_queue_setup(pi, qi,
1328 nb_txd,port->socket_id,
1334 /* Fail to setup tx queue, return */
1335 if (rte_atomic16_cmpset(&(port->port_status),
1337 RTE_PORT_STOPPED) == 0)
1338 printf("Port %d can not be set back "
1339 "to stopped\n", pi);
1340 printf("Fail to configure port %d tx queues\n", pi);
1341 /* try to reconfigure queues next time */
1342 port->need_reconfig_queues = 1;
1345 /* setup rx queues */
1346 for (qi = 0; qi < nb_rxq; qi++) {
1347 if ((numa_support) &&
1348 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1349 struct rte_mempool * mp =
1350 mbuf_pool_find(rxring_numa[pi]);
1352 printf("Failed to setup RX queue:"
1353 "No mempool allocation"
1354 "on the socket %d\n",
1359 diag = rte_eth_rx_queue_setup(pi, qi,
1360 nb_rxd,rxring_numa[pi],
1361 &(port->rx_conf),mp);
1364 diag = rte_eth_rx_queue_setup(pi, qi,
1365 nb_rxd,port->socket_id,
1367 mbuf_pool_find(port->socket_id));
1373 /* Fail to setup rx queue, return */
1374 if (rte_atomic16_cmpset(&(port->port_status),
1376 RTE_PORT_STOPPED) == 0)
1377 printf("Port %d can not be set back "
1378 "to stopped\n", pi);
1379 printf("Fail to configure port %d rx queues\n", pi);
1380 /* try to reconfigure queues next time */
1381 port->need_reconfig_queues = 1;
1386 if (rte_eth_dev_start(pi) < 0) {
1387 printf("Fail to start port %d\n", pi);
1389 /* Fail to setup rx queue, return */
1390 if (rte_atomic16_cmpset(&(port->port_status),
1391 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1392 printf("Port %d can not be set back to "
1397 if (rte_atomic16_cmpset(&(port->port_status),
1398 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1399 printf("Port %d can not be set into started\n", pi);
1401 rte_eth_macaddr_get(pi, &mac_addr);
1402 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1403 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1404 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1405 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1407 /* at least one port started, need checking link status */
1408 need_check_link_status = 1;
1411 if (need_check_link_status && !no_link_check)
1412 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1414 printf("Please stop the ports first\n");
1421 stop_port(portid_t pid)
1424 struct rte_port *port;
1425 int need_check_link_status = 0;
1427 if (test_done == 0) {
1428 printf("Please stop forwarding first\n");
1435 printf("Stopping ports...\n");
1437 for (pi = 0; pi < nb_ports; pi++) {
1438 if (pid < nb_ports && pid != pi)
1442 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1443 RTE_PORT_HANDLING) == 0)
1446 rte_eth_dev_stop(pi);
1448 if (rte_atomic16_cmpset(&(port->port_status),
1449 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1450 printf("Port %d can not be set into stopped\n", pi);
1451 need_check_link_status = 1;
1453 if (need_check_link_status && !no_link_check)
1454 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1460 close_port(portid_t pid)
1463 struct rte_port *port;
1465 if (test_done == 0) {
1466 printf("Please stop forwarding first\n");
1470 printf("Closing ports...\n");
1472 for (pi = 0; pi < nb_ports; pi++) {
1473 if (pid < nb_ports && pid != pi)
1477 if (rte_atomic16_cmpset(&(port->port_status),
1478 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1479 printf("Port %d is now not stopped\n", pi);
1483 rte_eth_dev_close(pi);
1485 if (rte_atomic16_cmpset(&(port->port_status),
1486 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1487 printf("Port %d can not be set into stopped\n", pi);
1494 all_ports_stopped(void)
1497 struct rte_port *port;
1499 for (pi = 0; pi < nb_ports; pi++) {
1501 if (port->port_status != RTE_PORT_STOPPED)
1509 port_is_started(portid_t port_id)
1511 if (port_id_is_invalid(port_id))
1514 if (ports[port_id].port_status != RTE_PORT_STARTED)
1525 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1526 printf("Stopping port %d...", pt_id);
1528 rte_eth_dev_close(pt_id);
1534 typedef void (*cmd_func_t)(void);
1535 struct pmd_test_command {
1536 const char *cmd_name;
1537 cmd_func_t cmd_func;
1540 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1542 /* Check the link status of all ports in up to 9s, and print them finally */
1544 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1546 #define CHECK_INTERVAL 100 /* 100ms */
1547 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1548 uint8_t portid, count, all_ports_up, print_flag = 0;
1549 struct rte_eth_link link;
1551 printf("Checking link statuses...\n");
1553 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1555 for (portid = 0; portid < port_num; portid++) {
1556 if ((port_mask & (1 << portid)) == 0)
1558 memset(&link, 0, sizeof(link));
1559 rte_eth_link_get_nowait(portid, &link);
1560 /* print link status if flag set */
1561 if (print_flag == 1) {
1562 if (link.link_status)
1563 printf("Port %d Link Up - speed %u "
1564 "Mbps - %s\n", (uint8_t)portid,
1565 (unsigned)link.link_speed,
1566 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1567 ("full-duplex") : ("half-duplex\n"));
1569 printf("Port %d Link Down\n",
1573 /* clear all_ports_up flag if any link down */
1574 if (link.link_status == 0) {
1579 /* after finally printing all link status, get out */
1580 if (print_flag == 1)
1583 if (all_ports_up == 0) {
1585 rte_delay_ms(CHECK_INTERVAL);
1588 /* set the print_flag if all ports up or timeout */
1589 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1596 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1600 uint8_t mapping_found = 0;
1602 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1603 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1604 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1605 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1606 tx_queue_stats_mappings[i].queue_id,
1607 tx_queue_stats_mappings[i].stats_counter_id);
1614 port->tx_queue_stats_mapping_enabled = 1;
1619 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1623 uint8_t mapping_found = 0;
1625 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1626 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1627 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1628 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1629 rx_queue_stats_mappings[i].queue_id,
1630 rx_queue_stats_mappings[i].stats_counter_id);
1637 port->rx_queue_stats_mapping_enabled = 1;
1642 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1646 diag = set_tx_queue_stats_mapping_registers(pi, port);
1648 if (diag == -ENOTSUP) {
1649 port->tx_queue_stats_mapping_enabled = 0;
1650 printf("TX queue stats mapping not supported port id=%d\n", pi);
1653 rte_exit(EXIT_FAILURE,
1654 "set_tx_queue_stats_mapping_registers "
1655 "failed for port id=%d diag=%d\n",
1659 diag = set_rx_queue_stats_mapping_registers(pi, port);
1661 if (diag == -ENOTSUP) {
1662 port->rx_queue_stats_mapping_enabled = 0;
1663 printf("RX queue stats mapping not supported port id=%d\n", pi);
1666 rte_exit(EXIT_FAILURE,
1667 "set_rx_queue_stats_mapping_registers "
1668 "failed for port id=%d diag=%d\n",
1674 rxtx_port_config(struct rte_port *port)
1676 port->rx_conf = port->dev_info.default_rxconf;
1677 port->tx_conf = port->dev_info.default_txconf;
1679 /* Check if any RX/TX parameters have been passed */
1680 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1681 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1683 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1684 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1686 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1687 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1689 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1690 port->rx_conf.rx_free_thresh = rx_free_thresh;
1692 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1693 port->rx_conf.rx_drop_en = rx_drop_en;
1695 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1696 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1698 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1699 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1701 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1702 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1704 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1705 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1707 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1708 port->tx_conf.tx_free_thresh = tx_free_thresh;
1710 if (txq_flags != RTE_PMD_PARAM_UNSET)
1711 port->tx_conf.txq_flags = txq_flags;
1715 init_port_config(void)
1718 struct rte_port *port;
1720 for (pid = 0; pid < nb_ports; pid++) {
1722 port->dev_conf.rxmode = rx_mode;
1723 port->dev_conf.fdir_conf = fdir_conf;
1725 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1726 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1728 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1729 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1732 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1733 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1734 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1736 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1739 if (port->dev_info.max_vfs != 0) {
1740 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1741 port->dev_conf.rxmode.mq_mode =
1744 port->dev_conf.rxmode.mq_mode =
1747 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1750 rxtx_port_config(port);
1752 rte_eth_macaddr_get(pid, &port->eth_addr);
1754 map_port_queue_stats_mapping_registers(pid, port);
1755 #ifdef RTE_NIC_BYPASS
1756 rte_eth_dev_bypass_init(pid);
1761 const uint16_t vlan_tags[] = {
1762 0, 1, 2, 3, 4, 5, 6, 7,
1763 8, 9, 10, 11, 12, 13, 14, 15,
1764 16, 17, 18, 19, 20, 21, 22, 23,
1765 24, 25, 26, 27, 28, 29, 30, 31
1769 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1774 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1775 * given above, and the number of traffic classes available for use.
1777 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1778 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1779 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1781 /* VMDQ+DCB RX and TX configrations */
1782 vmdq_rx_conf.enable_default_pool = 0;
1783 vmdq_rx_conf.default_pool = 0;
1784 vmdq_rx_conf.nb_queue_pools =
1785 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1786 vmdq_tx_conf.nb_queue_pools =
1787 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1789 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1790 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1791 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1792 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1794 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1795 vmdq_rx_conf.dcb_queue[i] = i;
1796 vmdq_tx_conf.dcb_queue[i] = i;
1799 /*set DCB mode of RX and TX of multiple queues*/
1800 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1801 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1802 if (dcb_conf->pfc_en)
1803 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1805 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1807 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1808 sizeof(struct rte_eth_vmdq_dcb_conf)));
1809 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1810 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1813 struct rte_eth_dcb_rx_conf rx_conf;
1814 struct rte_eth_dcb_tx_conf tx_conf;
1816 /* queue mapping configuration of DCB RX and TX */
1817 if (dcb_conf->num_tcs == ETH_4_TCS)
1818 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1820 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1822 rx_conf.nb_tcs = dcb_conf->num_tcs;
1823 tx_conf.nb_tcs = dcb_conf->num_tcs;
1825 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1826 rx_conf.dcb_queue[i] = i;
1827 tx_conf.dcb_queue[i] = i;
1829 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1830 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1831 if (dcb_conf->pfc_en)
1832 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1834 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1836 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1837 sizeof(struct rte_eth_dcb_rx_conf)));
1838 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1839 sizeof(struct rte_eth_dcb_tx_conf)));
1846 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1848 struct rte_eth_conf port_conf;
1849 struct rte_port *rte_port;
1854 /* rxq and txq configuration in dcb mode */
1857 rx_free_thresh = 64;
1859 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1860 /* Enter DCB configuration status */
1863 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1864 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1865 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1869 rte_port = &ports[pid];
1870 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1872 rxtx_port_config(rte_port);
1874 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1875 for (i = 0; i < nb_vlan; i++){
1876 rx_vft_set(pid, vlan_tags[i], 1);
1879 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1880 map_port_queue_stats_mapping_registers(pid, rte_port);
1882 rte_port->dcb_flag = 1;
1888 main(int argc, char** argv)
1893 diag = rte_eal_init(argc, argv);
1895 rte_panic("Cannot init EAL\n");
1897 nb_ports = (portid_t) rte_eth_dev_count();
1899 rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
1901 set_def_fwd_config();
1903 rte_panic("Empty set of forwarding logical cores - check the "
1904 "core mask supplied in the command parameters\n");
1909 launch_args_parse(argc, argv);
1911 if (nb_rxq > nb_txq)
1912 printf("Warning: nb_rxq=%d enables RSS configuration, "
1913 "but nb_txq=%d will prevent to fully test it.\n",
1917 if (start_port(RTE_PORT_ALL) != 0)
1918 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1920 /* set all ports to promiscuous mode by default */
1921 for (port_id = 0; port_id < nb_ports; port_id++)
1922 rte_eth_promiscuous_enable(port_id);
1924 #ifdef RTE_LIBRTE_CMDLINE
1925 if (interactive == 1) {
1927 printf("Start automatic packet forwarding\n");
1928 start_packet_forwarding(0);
1937 printf("No commandline core given, start packet forwarding\n");
1938 start_packet_forwarding(0);
1939 printf("Press enter to exit\n");
1940 rc = read(0, &c, 1);