4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
292 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
294 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
295 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
298 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
300 uint16_t nb_tx_queue_stats_mappings = 0;
301 uint16_t nb_rx_queue_stats_mappings = 0;
303 /* Forward function declarations */
304 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
305 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
308 * Check if all the ports are started.
309 * If yes, return positive value. If not, return zero.
311 static int all_ports_started(void);
314 * Setup default configuration.
317 set_default_fwd_lcores_config(void)
323 for (i = 0; i < RTE_MAX_LCORE; i++) {
324 if (! rte_lcore_is_enabled(i))
326 if (i == rte_get_master_lcore())
328 fwd_lcores_cpuids[nb_lc++] = i;
330 nb_lcores = (lcoreid_t) nb_lc;
331 nb_cfg_lcores = nb_lcores;
336 set_def_peer_eth_addrs(void)
340 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
341 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
342 peer_eth_addrs[i].addr_bytes[5] = i;
347 set_default_fwd_ports_config(void)
351 for (pt_id = 0; pt_id < nb_ports; pt_id++)
352 fwd_ports_ids[pt_id] = pt_id;
354 nb_cfg_ports = nb_ports;
355 nb_fwd_ports = nb_ports;
359 set_def_fwd_config(void)
361 set_default_fwd_lcores_config();
362 set_def_peer_eth_addrs();
363 set_default_fwd_ports_config();
367 * Configuration initialisation done once at init time.
369 struct mbuf_ctor_arg {
370 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
371 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
374 struct mbuf_pool_ctor_arg {
375 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
379 testpmd_mbuf_ctor(struct rte_mempool *mp,
382 __attribute__((unused)) unsigned i)
384 struct mbuf_ctor_arg *mb_ctor_arg;
387 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
388 mb = (struct rte_mbuf *) raw_mbuf;
391 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
392 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
393 mb_ctor_arg->seg_buf_offset);
394 mb->buf_len = mb_ctor_arg->seg_buf_size;
396 mb->data_off = RTE_PKTMBUF_HEADROOM;
404 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
407 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
408 struct rte_pktmbuf_pool_private *mbp_priv;
410 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
411 printf("%s(%s) private_data_size %d < %d\n",
412 __func__, mp->name, (int) mp->private_data_size,
413 (int) sizeof(struct rte_pktmbuf_pool_private));
416 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
417 mbp_priv = rte_mempool_get_priv(mp);
418 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
422 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
423 unsigned int socket_id)
425 char pool_name[RTE_MEMPOOL_NAMESIZE];
426 struct rte_mempool *rte_mp;
427 struct mbuf_pool_ctor_arg mbp_ctor_arg;
428 struct mbuf_ctor_arg mb_ctor_arg;
431 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
433 mb_ctor_arg.seg_buf_offset =
434 (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
435 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
436 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
437 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
439 #ifdef RTE_LIBRTE_PMD_XENVIRT
440 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
441 (unsigned) mb_mempool_cache,
442 sizeof(struct rte_pktmbuf_pool_private),
443 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
444 testpmd_mbuf_ctor, &mb_ctor_arg,
451 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
452 (unsigned) mb_mempool_cache,
453 sizeof(struct rte_pktmbuf_pool_private),
454 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
455 testpmd_mbuf_ctor, &mb_ctor_arg,
458 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
459 (unsigned) mb_mempool_cache,
460 sizeof(struct rte_pktmbuf_pool_private),
461 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
462 testpmd_mbuf_ctor, &mb_ctor_arg,
467 if (rte_mp == NULL) {
468 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
469 "failed\n", socket_id);
470 } else if (verbose_level > 0) {
471 rte_mempool_dump(stdout, rte_mp);
476 * Check given socket id is valid or not with NUMA mode,
477 * if valid, return 0, else return -1
480 check_socket_id(const unsigned int socket_id)
482 static int warning_once = 0;
484 if (socket_id >= MAX_SOCKET) {
485 if (!warning_once && numa_support)
486 printf("Warning: NUMA should be configured manually by"
487 " using --port-numa-config and"
488 " --ring-numa-config parameters along with"
500 struct rte_port *port;
501 struct rte_mempool *mbp;
502 unsigned int nb_mbuf_per_pool;
504 uint8_t port_per_socket[MAX_SOCKET];
506 memset(port_per_socket,0,MAX_SOCKET);
507 /* Configuration of logical cores. */
508 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
509 sizeof(struct fwd_lcore *) * nb_lcores,
510 RTE_CACHE_LINE_SIZE);
511 if (fwd_lcores == NULL) {
512 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
513 "failed\n", nb_lcores);
515 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
516 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
517 sizeof(struct fwd_lcore),
518 RTE_CACHE_LINE_SIZE);
519 if (fwd_lcores[lc_id] == NULL) {
520 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
523 fwd_lcores[lc_id]->cpuid_idx = lc_id;
527 * Create pools of mbuf.
528 * If NUMA support is disabled, create a single pool of mbuf in
529 * socket 0 memory by default.
530 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
532 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
533 * nb_txd can be configured at run time.
535 if (param_total_num_mbufs)
536 nb_mbuf_per_pool = param_total_num_mbufs;
538 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
539 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
542 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
546 if (socket_num == UMA_NO_CONFIG)
547 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
549 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
553 /* Configuration of Ethernet ports. */
554 ports = rte_zmalloc("testpmd: ports",
555 sizeof(struct rte_port) * nb_ports,
556 RTE_CACHE_LINE_SIZE);
558 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
559 "failed\n", nb_ports);
562 for (pid = 0; pid < nb_ports; pid++) {
564 rte_eth_dev_info_get(pid, &port->dev_info);
567 if (port_numa[pid] != NUMA_NO_CONFIG)
568 port_per_socket[port_numa[pid]]++;
570 uint32_t socket_id = rte_eth_dev_socket_id(pid);
572 /* if socket_id is invalid, set to 0 */
573 if (check_socket_id(socket_id) < 0)
575 port_per_socket[socket_id]++;
579 /* set flag to initialize port/queue */
580 port->need_reconfig = 1;
581 port->need_reconfig_queues = 1;
586 unsigned int nb_mbuf;
588 if (param_total_num_mbufs)
589 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
591 for (i = 0; i < MAX_SOCKET; i++) {
592 nb_mbuf = (nb_mbuf_per_pool *
595 mbuf_pool_create(mbuf_data_size,
602 * Records which Mbuf pool to use by each logical core, if needed.
604 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
605 mbp = mbuf_pool_find(
606 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
609 mbp = mbuf_pool_find(0);
610 fwd_lcores[lc_id]->mbp = mbp;
613 /* Configuration of packet forwarding streams. */
614 if (init_fwd_streams() < 0)
615 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
620 reconfig(portid_t new_port_id, unsigned socket_id)
622 struct rte_port *port;
624 /* Reconfiguration of Ethernet ports. */
625 ports = rte_realloc(ports,
626 sizeof(struct rte_port) * nb_ports,
627 RTE_CACHE_LINE_SIZE);
629 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
633 port = &ports[new_port_id];
634 rte_eth_dev_info_get(new_port_id, &port->dev_info);
636 /* set flag to initialize port/queue */
637 port->need_reconfig = 1;
638 port->need_reconfig_queues = 1;
639 port->socket_id = socket_id;
646 init_fwd_streams(void)
649 struct rte_port *port;
650 streamid_t sm_id, nb_fwd_streams_new;
652 /* set socket id according to numa or not */
653 for (pid = 0; pid < nb_ports; pid++) {
655 if (nb_rxq > port->dev_info.max_rx_queues) {
656 printf("Fail: nb_rxq(%d) is greater than "
657 "max_rx_queues(%d)\n", nb_rxq,
658 port->dev_info.max_rx_queues);
661 if (nb_txq > port->dev_info.max_tx_queues) {
662 printf("Fail: nb_txq(%d) is greater than "
663 "max_tx_queues(%d)\n", nb_txq,
664 port->dev_info.max_tx_queues);
668 if (port_numa[pid] != NUMA_NO_CONFIG)
669 port->socket_id = port_numa[pid];
671 port->socket_id = rte_eth_dev_socket_id(pid);
673 /* if socket_id is invalid, set to 0 */
674 if (check_socket_id(port->socket_id) < 0)
679 if (socket_num == UMA_NO_CONFIG)
682 port->socket_id = socket_num;
686 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
687 if (nb_fwd_streams_new == nb_fwd_streams)
690 if (fwd_streams != NULL) {
691 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
692 if (fwd_streams[sm_id] == NULL)
694 rte_free(fwd_streams[sm_id]);
695 fwd_streams[sm_id] = NULL;
697 rte_free(fwd_streams);
702 nb_fwd_streams = nb_fwd_streams_new;
703 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
704 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
705 if (fwd_streams == NULL)
706 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
707 "failed\n", nb_fwd_streams);
709 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
710 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
711 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
712 if (fwd_streams[sm_id] == NULL)
713 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
720 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
722 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
724 unsigned int total_burst;
725 unsigned int nb_burst;
726 unsigned int burst_stats[3];
727 uint16_t pktnb_stats[3];
729 int burst_percent[3];
732 * First compute the total number of packet bursts and the
733 * two highest numbers of bursts of the same number of packets.
736 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
737 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
738 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
739 nb_burst = pbs->pkt_burst_spread[nb_pkt];
742 total_burst += nb_burst;
743 if (nb_burst > burst_stats[0]) {
744 burst_stats[1] = burst_stats[0];
745 pktnb_stats[1] = pktnb_stats[0];
746 burst_stats[0] = nb_burst;
747 pktnb_stats[0] = nb_pkt;
750 if (total_burst == 0)
752 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
753 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
754 burst_percent[0], (int) pktnb_stats[0]);
755 if (burst_stats[0] == total_burst) {
759 if (burst_stats[0] + burst_stats[1] == total_burst) {
760 printf(" + %d%% of %d pkts]\n",
761 100 - burst_percent[0], pktnb_stats[1]);
764 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
765 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
766 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
767 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
770 printf(" + %d%% of %d pkts + %d%% of others]\n",
771 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
773 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
776 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
778 struct rte_port *port;
781 static const char *fwd_stats_border = "----------------------";
783 port = &ports[port_id];
784 printf("\n %s Forward statistics for port %-2d %s\n",
785 fwd_stats_border, port_id, fwd_stats_border);
787 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
788 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
790 stats->ipackets, stats->imissed,
791 (uint64_t) (stats->ipackets + stats->imissed));
793 if (cur_fwd_eng == &csum_fwd_engine)
794 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
795 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
796 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
797 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
798 "RX-error: %-"PRIu64"\n",
799 stats->ibadcrc, stats->ibadlen, stats->ierrors);
800 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
803 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
805 stats->opackets, port->tx_dropped,
806 (uint64_t) (stats->opackets + port->tx_dropped));
809 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
811 stats->ipackets, stats->imissed,
812 (uint64_t) (stats->ipackets + stats->imissed));
814 if (cur_fwd_eng == &csum_fwd_engine)
815 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
816 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
817 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
818 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
819 " RX-error:%"PRIu64"\n",
820 stats->ibadcrc, stats->ibadlen, stats->ierrors);
821 printf(" RX-nombufs: %14"PRIu64"\n",
825 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
827 stats->opackets, port->tx_dropped,
828 (uint64_t) (stats->opackets + port->tx_dropped));
831 /* Display statistics of XON/XOFF pause frames, if any. */
832 if ((stats->tx_pause_xon | stats->rx_pause_xon |
833 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
834 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
835 stats->rx_pause_xoff, stats->rx_pause_xon);
836 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
837 stats->tx_pause_xoff, stats->tx_pause_xon);
840 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
842 pkt_burst_stats_display("RX",
843 &port->rx_stream->rx_burst_stats);
845 pkt_burst_stats_display("TX",
846 &port->tx_stream->tx_burst_stats);
849 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
850 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
854 if (port->rx_queue_stats_mapping_enabled) {
856 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
857 printf(" Stats reg %2d RX-packets:%14"PRIu64
858 " RX-errors:%14"PRIu64
859 " RX-bytes:%14"PRIu64"\n",
860 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
864 if (port->tx_queue_stats_mapping_enabled) {
865 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
866 printf(" Stats reg %2d TX-packets:%14"PRIu64
867 " TX-bytes:%14"PRIu64"\n",
868 i, stats->q_opackets[i], stats->q_obytes[i]);
872 printf(" %s--------------------------------%s\n",
873 fwd_stats_border, fwd_stats_border);
877 fwd_stream_stats_display(streamid_t stream_id)
879 struct fwd_stream *fs;
880 static const char *fwd_top_stats_border = "-------";
882 fs = fwd_streams[stream_id];
883 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
884 (fs->fwd_dropped == 0))
886 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
887 "TX Port=%2d/Queue=%2d %s\n",
888 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
889 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
890 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
891 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
893 /* if checksum mode */
894 if (cur_fwd_eng == &csum_fwd_engine) {
895 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
896 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
899 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
900 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
901 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
906 flush_fwd_rx_queues(void)
908 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
916 for (j = 0; j < 2; j++) {
917 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
918 for (rxq = 0; rxq < nb_rxq; rxq++) {
919 port_id = fwd_ports_ids[rxp];
921 nb_rx = rte_eth_rx_burst(port_id, rxq,
922 pkts_burst, MAX_PKT_BURST);
923 for (i = 0; i < nb_rx; i++)
924 rte_pktmbuf_free(pkts_burst[i]);
928 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
933 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
935 struct fwd_stream **fsm;
939 fsm = &fwd_streams[fc->stream_idx];
940 nb_fs = fc->stream_nb;
942 for (sm_id = 0; sm_id < nb_fs; sm_id++)
943 (*pkt_fwd)(fsm[sm_id]);
944 } while (! fc->stopped);
948 start_pkt_forward_on_core(void *fwd_arg)
950 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
951 cur_fwd_config.fwd_eng->packet_fwd);
956 * Run the TXONLY packet forwarding engine to send a single burst of packets.
957 * Used to start communication flows in network loopback test configurations.
960 run_one_txonly_burst_on_core(void *fwd_arg)
962 struct fwd_lcore *fwd_lc;
963 struct fwd_lcore tmp_lcore;
965 fwd_lc = (struct fwd_lcore *) fwd_arg;
967 tmp_lcore.stopped = 1;
968 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
973 * Launch packet forwarding:
974 * - Setup per-port forwarding context.
975 * - launch logical cores with their forwarding configuration.
978 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
980 port_fwd_begin_t port_fwd_begin;
985 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
986 if (port_fwd_begin != NULL) {
987 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
988 (*port_fwd_begin)(fwd_ports_ids[i]);
990 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
991 lc_id = fwd_lcores_cpuids[i];
992 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
993 fwd_lcores[i]->stopped = 0;
994 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
995 fwd_lcores[i], lc_id);
997 printf("launch lcore %u failed - diag=%d\n",
1004 * Launch packet forwarding configuration.
1007 start_packet_forwarding(int with_tx_first)
1009 port_fwd_begin_t port_fwd_begin;
1010 port_fwd_end_t port_fwd_end;
1011 struct rte_port *port;
1016 if (all_ports_started() == 0) {
1017 printf("Not all ports were started\n");
1020 if (test_done == 0) {
1021 printf("Packet forwarding already started\n");
1025 for (i = 0; i < nb_fwd_ports; i++) {
1026 pt_id = fwd_ports_ids[i];
1027 port = &ports[pt_id];
1028 if (!port->dcb_flag) {
1029 printf("In DCB mode, all forwarding ports must "
1030 "be configured in this mode.\n");
1034 if (nb_fwd_lcores == 1) {
1035 printf("In DCB mode,the nb forwarding cores "
1036 "should be larger than 1.\n");
1043 flush_fwd_rx_queues();
1046 rxtx_config_display();
1048 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1049 pt_id = fwd_ports_ids[i];
1050 port = &ports[pt_id];
1051 rte_eth_stats_get(pt_id, &port->stats);
1052 port->tx_dropped = 0;
1054 map_port_queue_stats_mapping_registers(pt_id, port);
1056 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1057 fwd_streams[sm_id]->rx_packets = 0;
1058 fwd_streams[sm_id]->tx_packets = 0;
1059 fwd_streams[sm_id]->fwd_dropped = 0;
1060 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1061 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1063 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1064 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1065 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1066 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1067 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1069 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1070 fwd_streams[sm_id]->core_cycles = 0;
1073 if (with_tx_first) {
1074 port_fwd_begin = tx_only_engine.port_fwd_begin;
1075 if (port_fwd_begin != NULL) {
1076 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1077 (*port_fwd_begin)(fwd_ports_ids[i]);
1079 launch_packet_forwarding(run_one_txonly_burst_on_core);
1080 rte_eal_mp_wait_lcore();
1081 port_fwd_end = tx_only_engine.port_fwd_end;
1082 if (port_fwd_end != NULL) {
1083 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1084 (*port_fwd_end)(fwd_ports_ids[i]);
1087 launch_packet_forwarding(start_pkt_forward_on_core);
1091 stop_packet_forwarding(void)
1093 struct rte_eth_stats stats;
1094 struct rte_port *port;
1095 port_fwd_end_t port_fwd_end;
1100 uint64_t total_recv;
1101 uint64_t total_xmit;
1102 uint64_t total_rx_dropped;
1103 uint64_t total_tx_dropped;
1104 uint64_t total_rx_nombuf;
1105 uint64_t tx_dropped;
1106 uint64_t rx_bad_ip_csum;
1107 uint64_t rx_bad_l4_csum;
1108 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1109 uint64_t fwd_cycles;
1111 static const char *acc_stats_border = "+++++++++++++++";
1113 if (all_ports_started() == 0) {
1114 printf("Not all ports were started\n");
1118 printf("Packet forwarding not started\n");
1121 printf("Telling cores to stop...");
1122 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1123 fwd_lcores[lc_id]->stopped = 1;
1124 printf("\nWaiting for lcores to finish...\n");
1125 rte_eal_mp_wait_lcore();
1126 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1127 if (port_fwd_end != NULL) {
1128 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1129 pt_id = fwd_ports_ids[i];
1130 (*port_fwd_end)(pt_id);
1133 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1136 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1137 if (cur_fwd_config.nb_fwd_streams >
1138 cur_fwd_config.nb_fwd_ports) {
1139 fwd_stream_stats_display(sm_id);
1140 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1141 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1143 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1145 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1148 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1149 tx_dropped = (uint64_t) (tx_dropped +
1150 fwd_streams[sm_id]->fwd_dropped);
1151 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1154 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1155 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1156 fwd_streams[sm_id]->rx_bad_ip_csum);
1157 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1161 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1162 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1163 fwd_streams[sm_id]->rx_bad_l4_csum);
1164 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1167 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1168 fwd_cycles = (uint64_t) (fwd_cycles +
1169 fwd_streams[sm_id]->core_cycles);
1174 total_rx_dropped = 0;
1175 total_tx_dropped = 0;
1176 total_rx_nombuf = 0;
1177 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1178 pt_id = fwd_ports_ids[i];
1180 port = &ports[pt_id];
1181 rte_eth_stats_get(pt_id, &stats);
1182 stats.ipackets -= port->stats.ipackets;
1183 port->stats.ipackets = 0;
1184 stats.opackets -= port->stats.opackets;
1185 port->stats.opackets = 0;
1186 stats.ibytes -= port->stats.ibytes;
1187 port->stats.ibytes = 0;
1188 stats.obytes -= port->stats.obytes;
1189 port->stats.obytes = 0;
1190 stats.imissed -= port->stats.imissed;
1191 port->stats.imissed = 0;
1192 stats.oerrors -= port->stats.oerrors;
1193 port->stats.oerrors = 0;
1194 stats.rx_nombuf -= port->stats.rx_nombuf;
1195 port->stats.rx_nombuf = 0;
1196 stats.fdirmatch -= port->stats.fdirmatch;
1197 port->stats.rx_nombuf = 0;
1198 stats.fdirmiss -= port->stats.fdirmiss;
1199 port->stats.rx_nombuf = 0;
1201 total_recv += stats.ipackets;
1202 total_xmit += stats.opackets;
1203 total_rx_dropped += stats.imissed;
1204 total_tx_dropped += port->tx_dropped;
1205 total_rx_nombuf += stats.rx_nombuf;
1207 fwd_port_stats_display(pt_id, &stats);
1209 printf("\n %s Accumulated forward statistics for all ports"
1211 acc_stats_border, acc_stats_border);
1212 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1214 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1216 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1217 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1218 if (total_rx_nombuf > 0)
1219 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1220 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1222 acc_stats_border, acc_stats_border);
1223 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1225 printf("\n CPU cycles/packet=%u (total cycles="
1226 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1227 (unsigned int)(fwd_cycles / total_recv),
1228 fwd_cycles, total_recv);
1230 printf("\nDone.\n");
1235 dev_set_link_up(portid_t pid)
1237 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1238 printf("\nSet link up fail.\n");
1242 dev_set_link_down(portid_t pid)
1244 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1245 printf("\nSet link down fail.\n");
1249 all_ports_started(void)
1252 struct rte_port *port;
1254 for (pi = 0; pi < nb_ports; pi++) {
1256 /* Check if there is a port which is not started */
1257 if (port->port_status != RTE_PORT_STARTED)
1261 /* No port is not started */
1266 start_port(portid_t pid)
1268 int diag, need_check_link_status = 0;
1271 struct rte_port *port;
1272 struct ether_addr mac_addr;
1274 if (test_done == 0) {
1275 printf("Please stop forwarding first\n");
1279 if (init_fwd_streams() < 0) {
1280 printf("Fail from init_fwd_streams()\n");
1286 for (pi = 0; pi < nb_ports; pi++) {
1287 if (pid < nb_ports && pid != pi)
1291 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1292 RTE_PORT_HANDLING) == 0) {
1293 printf("Port %d is now not stopped\n", pi);
1297 if (port->need_reconfig > 0) {
1298 port->need_reconfig = 0;
1300 printf("Configuring Port %d (socket %u)\n", pi,
1302 /* configure port */
1303 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1306 if (rte_atomic16_cmpset(&(port->port_status),
1307 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1308 printf("Port %d can not be set back "
1309 "to stopped\n", pi);
1310 printf("Fail to configure port %d\n", pi);
1311 /* try to reconfigure port next time */
1312 port->need_reconfig = 1;
1316 if (port->need_reconfig_queues > 0) {
1317 port->need_reconfig_queues = 0;
1318 /* setup tx queues */
1319 for (qi = 0; qi < nb_txq; qi++) {
1320 if ((numa_support) &&
1321 (txring_numa[pi] != NUMA_NO_CONFIG))
1322 diag = rte_eth_tx_queue_setup(pi, qi,
1323 nb_txd,txring_numa[pi],
1326 diag = rte_eth_tx_queue_setup(pi, qi,
1327 nb_txd,port->socket_id,
1333 /* Fail to setup tx queue, return */
1334 if (rte_atomic16_cmpset(&(port->port_status),
1336 RTE_PORT_STOPPED) == 0)
1337 printf("Port %d can not be set back "
1338 "to stopped\n", pi);
1339 printf("Fail to configure port %d tx queues\n", pi);
1340 /* try to reconfigure queues next time */
1341 port->need_reconfig_queues = 1;
1344 /* setup rx queues */
1345 for (qi = 0; qi < nb_rxq; qi++) {
1346 if ((numa_support) &&
1347 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1348 struct rte_mempool * mp =
1349 mbuf_pool_find(rxring_numa[pi]);
1351 printf("Failed to setup RX queue:"
1352 "No mempool allocation"
1353 "on the socket %d\n",
1358 diag = rte_eth_rx_queue_setup(pi, qi,
1359 nb_rxd,rxring_numa[pi],
1360 &(port->rx_conf),mp);
1363 diag = rte_eth_rx_queue_setup(pi, qi,
1364 nb_rxd,port->socket_id,
1366 mbuf_pool_find(port->socket_id));
1372 /* Fail to setup rx queue, return */
1373 if (rte_atomic16_cmpset(&(port->port_status),
1375 RTE_PORT_STOPPED) == 0)
1376 printf("Port %d can not be set back "
1377 "to stopped\n", pi);
1378 printf("Fail to configure port %d rx queues\n", pi);
1379 /* try to reconfigure queues next time */
1380 port->need_reconfig_queues = 1;
1385 if (rte_eth_dev_start(pi) < 0) {
1386 printf("Fail to start port %d\n", pi);
1388 /* Fail to setup rx queue, return */
1389 if (rte_atomic16_cmpset(&(port->port_status),
1390 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1391 printf("Port %d can not be set back to "
1396 if (rte_atomic16_cmpset(&(port->port_status),
1397 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1398 printf("Port %d can not be set into started\n", pi);
1400 rte_eth_macaddr_get(pi, &mac_addr);
1401 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1402 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1403 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1404 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1406 /* at least one port started, need checking link status */
1407 need_check_link_status = 1;
1410 if (need_check_link_status && !no_link_check)
1411 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1413 printf("Please stop the ports first\n");
1420 stop_port(portid_t pid)
1423 struct rte_port *port;
1424 int need_check_link_status = 0;
1426 if (test_done == 0) {
1427 printf("Please stop forwarding first\n");
1434 printf("Stopping ports...\n");
1436 for (pi = 0; pi < nb_ports; pi++) {
1437 if (pid < nb_ports && pid != pi)
1441 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1442 RTE_PORT_HANDLING) == 0)
1445 rte_eth_dev_stop(pi);
1447 if (rte_atomic16_cmpset(&(port->port_status),
1448 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1449 printf("Port %d can not be set into stopped\n", pi);
1450 need_check_link_status = 1;
1452 if (need_check_link_status && !no_link_check)
1453 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1459 close_port(portid_t pid)
1462 struct rte_port *port;
1464 if (test_done == 0) {
1465 printf("Please stop forwarding first\n");
1469 printf("Closing ports...\n");
1471 for (pi = 0; pi < nb_ports; pi++) {
1472 if (pid < nb_ports && pid != pi)
1476 if (rte_atomic16_cmpset(&(port->port_status),
1477 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1478 printf("Port %d is now not stopped\n", pi);
1482 rte_eth_dev_close(pi);
1484 if (rte_atomic16_cmpset(&(port->port_status),
1485 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1486 printf("Port %d can not be set into stopped\n", pi);
1493 all_ports_stopped(void)
1496 struct rte_port *port;
1498 for (pi = 0; pi < nb_ports; pi++) {
1500 if (port->port_status != RTE_PORT_STOPPED)
1508 port_is_started(portid_t port_id)
1510 if (port_id_is_invalid(port_id))
1513 if (ports[port_id].port_status != RTE_PORT_STARTED)
1524 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1525 printf("Stopping port %d...", pt_id);
1527 rte_eth_dev_close(pt_id);
1533 typedef void (*cmd_func_t)(void);
1534 struct pmd_test_command {
1535 const char *cmd_name;
1536 cmd_func_t cmd_func;
1539 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1541 /* Check the link status of all ports in up to 9s, and print them finally */
1543 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1545 #define CHECK_INTERVAL 100 /* 100ms */
1546 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1547 uint8_t portid, count, all_ports_up, print_flag = 0;
1548 struct rte_eth_link link;
1550 printf("Checking link statuses...\n");
1552 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1554 for (portid = 0; portid < port_num; portid++) {
1555 if ((port_mask & (1 << portid)) == 0)
1557 memset(&link, 0, sizeof(link));
1558 rte_eth_link_get_nowait(portid, &link);
1559 /* print link status if flag set */
1560 if (print_flag == 1) {
1561 if (link.link_status)
1562 printf("Port %d Link Up - speed %u "
1563 "Mbps - %s\n", (uint8_t)portid,
1564 (unsigned)link.link_speed,
1565 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1566 ("full-duplex") : ("half-duplex\n"));
1568 printf("Port %d Link Down\n",
1572 /* clear all_ports_up flag if any link down */
1573 if (link.link_status == 0) {
1578 /* after finally printing all link status, get out */
1579 if (print_flag == 1)
1582 if (all_ports_up == 0) {
1584 rte_delay_ms(CHECK_INTERVAL);
1587 /* set the print_flag if all ports up or timeout */
1588 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1595 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1599 uint8_t mapping_found = 0;
1601 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1602 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1603 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1604 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1605 tx_queue_stats_mappings[i].queue_id,
1606 tx_queue_stats_mappings[i].stats_counter_id);
1613 port->tx_queue_stats_mapping_enabled = 1;
1618 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1622 uint8_t mapping_found = 0;
1624 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1625 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1626 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1627 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1628 rx_queue_stats_mappings[i].queue_id,
1629 rx_queue_stats_mappings[i].stats_counter_id);
1636 port->rx_queue_stats_mapping_enabled = 1;
1641 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1645 diag = set_tx_queue_stats_mapping_registers(pi, port);
1647 if (diag == -ENOTSUP) {
1648 port->tx_queue_stats_mapping_enabled = 0;
1649 printf("TX queue stats mapping not supported port id=%d\n", pi);
1652 rte_exit(EXIT_FAILURE,
1653 "set_tx_queue_stats_mapping_registers "
1654 "failed for port id=%d diag=%d\n",
1658 diag = set_rx_queue_stats_mapping_registers(pi, port);
1660 if (diag == -ENOTSUP) {
1661 port->rx_queue_stats_mapping_enabled = 0;
1662 printf("RX queue stats mapping not supported port id=%d\n", pi);
1665 rte_exit(EXIT_FAILURE,
1666 "set_rx_queue_stats_mapping_registers "
1667 "failed for port id=%d diag=%d\n",
1673 rxtx_port_config(struct rte_port *port)
1675 port->rx_conf = port->dev_info.default_rxconf;
1676 port->tx_conf = port->dev_info.default_txconf;
1678 /* Check if any RX/TX parameters have been passed */
1679 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1680 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1682 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1683 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1685 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1686 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1688 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1689 port->rx_conf.rx_free_thresh = rx_free_thresh;
1691 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1692 port->rx_conf.rx_drop_en = rx_drop_en;
1694 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1695 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1697 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1698 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1700 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1701 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1703 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1704 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1706 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1707 port->tx_conf.tx_free_thresh = tx_free_thresh;
1709 if (txq_flags != RTE_PMD_PARAM_UNSET)
1710 port->tx_conf.txq_flags = txq_flags;
1714 init_port_config(void)
1717 struct rte_port *port;
1719 for (pid = 0; pid < nb_ports; pid++) {
1721 port->dev_conf.rxmode = rx_mode;
1722 port->dev_conf.fdir_conf = fdir_conf;
1724 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1725 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1727 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1728 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1731 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1732 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1733 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1735 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1738 if (port->dev_info.max_vfs != 0) {
1739 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1740 port->dev_conf.rxmode.mq_mode =
1743 port->dev_conf.rxmode.mq_mode =
1746 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1749 rxtx_port_config(port);
1751 rte_eth_macaddr_get(pid, &port->eth_addr);
1753 map_port_queue_stats_mapping_registers(pid, port);
1754 #ifdef RTE_NIC_BYPASS
1755 rte_eth_dev_bypass_init(pid);
1760 const uint16_t vlan_tags[] = {
1761 0, 1, 2, 3, 4, 5, 6, 7,
1762 8, 9, 10, 11, 12, 13, 14, 15,
1763 16, 17, 18, 19, 20, 21, 22, 23,
1764 24, 25, 26, 27, 28, 29, 30, 31
1768 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1773 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1774 * given above, and the number of traffic classes available for use.
1776 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1777 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1778 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1780 /* VMDQ+DCB RX and TX configrations */
1781 vmdq_rx_conf.enable_default_pool = 0;
1782 vmdq_rx_conf.default_pool = 0;
1783 vmdq_rx_conf.nb_queue_pools =
1784 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1785 vmdq_tx_conf.nb_queue_pools =
1786 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1788 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1789 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1790 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1791 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1793 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1794 vmdq_rx_conf.dcb_queue[i] = i;
1795 vmdq_tx_conf.dcb_queue[i] = i;
1798 /*set DCB mode of RX and TX of multiple queues*/
1799 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1800 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1801 if (dcb_conf->pfc_en)
1802 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1804 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1806 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1807 sizeof(struct rte_eth_vmdq_dcb_conf)));
1808 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1809 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1812 struct rte_eth_dcb_rx_conf rx_conf;
1813 struct rte_eth_dcb_tx_conf tx_conf;
1815 /* queue mapping configuration of DCB RX and TX */
1816 if (dcb_conf->num_tcs == ETH_4_TCS)
1817 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1819 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1821 rx_conf.nb_tcs = dcb_conf->num_tcs;
1822 tx_conf.nb_tcs = dcb_conf->num_tcs;
1824 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1825 rx_conf.dcb_queue[i] = i;
1826 tx_conf.dcb_queue[i] = i;
1828 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1829 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1830 if (dcb_conf->pfc_en)
1831 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1833 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1835 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1836 sizeof(struct rte_eth_dcb_rx_conf)));
1837 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1838 sizeof(struct rte_eth_dcb_tx_conf)));
1845 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1847 struct rte_eth_conf port_conf;
1848 struct rte_port *rte_port;
1853 /* rxq and txq configuration in dcb mode */
1856 rx_free_thresh = 64;
1858 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1859 /* Enter DCB configuration status */
1862 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1863 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1864 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1868 rte_port = &ports[pid];
1869 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1871 rxtx_port_config(rte_port);
1873 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1874 for (i = 0; i < nb_vlan; i++){
1875 rx_vft_set(pid, vlan_tags[i], 1);
1878 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1879 map_port_queue_stats_mapping_registers(pid, rte_port);
1881 rte_port->dcb_flag = 1;
1887 main(int argc, char** argv)
1892 diag = rte_eal_init(argc, argv);
1894 rte_panic("Cannot init EAL\n");
1896 nb_ports = (portid_t) rte_eth_dev_count();
1898 rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
1900 set_def_fwd_config();
1902 rte_panic("Empty set of forwarding logical cores - check the "
1903 "core mask supplied in the command parameters\n");
1908 launch_args_parse(argc, argv);
1910 if (nb_rxq > nb_txq)
1911 printf("Warning: nb_rxq=%d enables RSS configuration, "
1912 "but nb_txq=%d will prevent to fully test it.\n",
1916 if (start_port(RTE_PORT_ALL) != 0)
1917 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1919 /* set all ports to promiscuous mode by default */
1920 for (port_id = 0; port_id < nb_ports; port_id++)
1921 rte_eth_promiscuous_enable(port_id);
1923 #ifdef RTE_LIBRTE_CMDLINE
1924 if (interactive == 1) {
1926 printf("Start automatic packet forwarding\n");
1927 start_packet_forwarding(0);
1936 printf("No commandline core given, start packet forwarding\n");
1937 start_packet_forwarding(0);
1938 printf("Press enter to exit\n");
1939 rc = read(0, &c, 1);