4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
203 * Configurable values of RX and TX ring threshold registers.
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
213 struct rte_eth_thresh rx_thresh = {
214 .pthresh = RX_PTHRESH,
215 .hthresh = RX_HTHRESH,
216 .wthresh = RX_WTHRESH,
219 struct rte_eth_thresh tx_thresh = {
220 .pthresh = TX_PTHRESH,
221 .hthresh = TX_HTHRESH,
222 .wthresh = TX_WTHRESH,
226 * Configurable value of RX free threshold.
228 uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets,
229 This setting is needed for ixgbe to enable bulk alloc or vector
230 receive functionality. */
233 * Configurable value of RX drop enable.
235 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
238 * Configurable value of TX free threshold.
240 uint16_t tx_free_thresh = 0; /* Use default values. */
243 * Configurable value of TX RS bit threshold.
245 uint16_t tx_rs_thresh = 0; /* Use default values. */
248 * Configurable value of TX queue flags.
250 uint32_t txq_flags = 0; /* No flags set. */
253 * Receive Side Scaling (RSS) configuration.
255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 * Port topology configuration
260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 * Avoids to flush all the RX streams before starts forwarding.
265 uint8_t no_flush_rx = 0; /* flush by default */
268 * Avoids to check link status when starting/stopping a port.
270 uint8_t no_link_check = 0; /* check by default */
273 * NIC bypass mode configuration options.
275 #ifdef RTE_NIC_BYPASS
277 /* The NIC bypass watchdog timeout. */
278 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
283 * Ethernet device configuration.
285 struct rte_eth_rxmode rx_mode = {
286 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
288 .header_split = 0, /**< Header Split disabled. */
289 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
290 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
291 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
292 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
293 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
294 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
297 struct rte_fdir_conf fdir_conf = {
298 .mode = RTE_FDIR_MODE_NONE,
299 .pballoc = RTE_FDIR_PBALLOC_64K,
300 .status = RTE_FDIR_REPORT_STATUS,
301 .flexbytes_offset = 0x6,
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
316 /* Forward function declarations */
317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
318 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
321 * Check if all the ports are started.
322 * If yes, return positive value. If not, return zero.
324 static int all_ports_started(void);
327 * Setup default configuration.
330 set_default_fwd_lcores_config(void)
336 for (i = 0; i < RTE_MAX_LCORE; i++) {
337 if (! rte_lcore_is_enabled(i))
339 if (i == rte_get_master_lcore())
341 fwd_lcores_cpuids[nb_lc++] = i;
343 nb_lcores = (lcoreid_t) nb_lc;
344 nb_cfg_lcores = nb_lcores;
349 set_def_peer_eth_addrs(void)
353 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
354 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
355 peer_eth_addrs[i].addr_bytes[5] = i;
360 set_default_fwd_ports_config(void)
364 for (pt_id = 0; pt_id < nb_ports; pt_id++)
365 fwd_ports_ids[pt_id] = pt_id;
367 nb_cfg_ports = nb_ports;
368 nb_fwd_ports = nb_ports;
372 set_def_fwd_config(void)
374 set_default_fwd_lcores_config();
375 set_def_peer_eth_addrs();
376 set_default_fwd_ports_config();
380 * Configuration initialisation done once at init time.
382 struct mbuf_ctor_arg {
383 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
384 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
387 struct mbuf_pool_ctor_arg {
388 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
392 testpmd_mbuf_ctor(struct rte_mempool *mp,
395 __attribute__((unused)) unsigned i)
397 struct mbuf_ctor_arg *mb_ctor_arg;
400 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
401 mb = (struct rte_mbuf *) raw_mbuf;
404 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
405 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
406 mb_ctor_arg->seg_buf_offset);
407 mb->buf_len = mb_ctor_arg->seg_buf_size;
409 mb->data_off = RTE_PKTMBUF_HEADROOM;
417 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
420 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
421 struct rte_pktmbuf_pool_private *mbp_priv;
423 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
424 printf("%s(%s) private_data_size %d < %d\n",
425 __func__, mp->name, (int) mp->private_data_size,
426 (int) sizeof(struct rte_pktmbuf_pool_private));
429 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
430 mbp_priv = rte_mempool_get_priv(mp);
431 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
435 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
436 unsigned int socket_id)
438 char pool_name[RTE_MEMPOOL_NAMESIZE];
439 struct rte_mempool *rte_mp;
440 struct mbuf_pool_ctor_arg mbp_ctor_arg;
441 struct mbuf_ctor_arg mb_ctor_arg;
444 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
446 mb_ctor_arg.seg_buf_offset =
447 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
448 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
449 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
450 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
452 #ifdef RTE_LIBRTE_PMD_XENVIRT
453 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
454 (unsigned) mb_mempool_cache,
455 sizeof(struct rte_pktmbuf_pool_private),
456 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
457 testpmd_mbuf_ctor, &mb_ctor_arg,
464 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
465 (unsigned) mb_mempool_cache,
466 sizeof(struct rte_pktmbuf_pool_private),
467 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
468 testpmd_mbuf_ctor, &mb_ctor_arg,
471 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
472 (unsigned) mb_mempool_cache,
473 sizeof(struct rte_pktmbuf_pool_private),
474 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
475 testpmd_mbuf_ctor, &mb_ctor_arg,
480 if (rte_mp == NULL) {
481 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
482 "failed\n", socket_id);
483 } else if (verbose_level > 0) {
484 rte_mempool_dump(stdout, rte_mp);
489 * Check given socket id is valid or not with NUMA mode,
490 * if valid, return 0, else return -1
493 check_socket_id(const unsigned int socket_id)
495 static int warning_once = 0;
497 if (socket_id >= MAX_SOCKET) {
498 if (!warning_once && numa_support)
499 printf("Warning: NUMA should be configured manually by"
500 " using --port-numa-config and"
501 " --ring-numa-config parameters along with"
513 struct rte_port *port;
514 struct rte_mempool *mbp;
515 unsigned int nb_mbuf_per_pool;
517 uint8_t port_per_socket[MAX_SOCKET];
519 memset(port_per_socket,0,MAX_SOCKET);
520 /* Configuration of logical cores. */
521 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
522 sizeof(struct fwd_lcore *) * nb_lcores,
524 if (fwd_lcores == NULL) {
525 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
526 "failed\n", nb_lcores);
528 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
529 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
530 sizeof(struct fwd_lcore),
532 if (fwd_lcores[lc_id] == NULL) {
533 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
536 fwd_lcores[lc_id]->cpuid_idx = lc_id;
540 * Create pools of mbuf.
541 * If NUMA support is disabled, create a single pool of mbuf in
542 * socket 0 memory by default.
543 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
545 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
546 * nb_txd can be configured at run time.
548 if (param_total_num_mbufs)
549 nb_mbuf_per_pool = param_total_num_mbufs;
551 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
552 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
555 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
559 if (socket_num == UMA_NO_CONFIG)
560 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
562 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
566 /* Configuration of Ethernet ports. */
567 ports = rte_zmalloc("testpmd: ports",
568 sizeof(struct rte_port) * nb_ports,
571 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
572 "failed\n", nb_ports);
575 for (pid = 0; pid < nb_ports; pid++) {
577 rte_eth_dev_info_get(pid, &port->dev_info);
580 if (port_numa[pid] != NUMA_NO_CONFIG)
581 port_per_socket[port_numa[pid]]++;
583 uint32_t socket_id = rte_eth_dev_socket_id(pid);
585 /* if socket_id is invalid, set to 0 */
586 if (check_socket_id(socket_id) < 0)
588 port_per_socket[socket_id]++;
592 /* set flag to initialize port/queue */
593 port->need_reconfig = 1;
594 port->need_reconfig_queues = 1;
599 unsigned int nb_mbuf;
601 if (param_total_num_mbufs)
602 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
604 for (i = 0; i < MAX_SOCKET; i++) {
605 nb_mbuf = (nb_mbuf_per_pool *
608 mbuf_pool_create(mbuf_data_size,
615 * Records which Mbuf pool to use by each logical core, if needed.
617 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
618 mbp = mbuf_pool_find(
619 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
622 mbp = mbuf_pool_find(0);
623 fwd_lcores[lc_id]->mbp = mbp;
626 /* Configuration of packet forwarding streams. */
627 if (init_fwd_streams() < 0)
628 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
633 reconfig(portid_t new_port_id)
635 struct rte_port *port;
637 /* Reconfiguration of Ethernet ports. */
638 ports = rte_realloc(ports,
639 sizeof(struct rte_port) * nb_ports,
642 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
646 port = &ports[new_port_id];
647 rte_eth_dev_info_get(new_port_id, &port->dev_info);
649 /* set flag to initialize port/queue */
650 port->need_reconfig = 1;
651 port->need_reconfig_queues = 1;
658 init_fwd_streams(void)
661 struct rte_port *port;
662 streamid_t sm_id, nb_fwd_streams_new;
664 /* set socket id according to numa or not */
665 for (pid = 0; pid < nb_ports; pid++) {
667 if (nb_rxq > port->dev_info.max_rx_queues) {
668 printf("Fail: nb_rxq(%d) is greater than "
669 "max_rx_queues(%d)\n", nb_rxq,
670 port->dev_info.max_rx_queues);
673 if (nb_txq > port->dev_info.max_tx_queues) {
674 printf("Fail: nb_txq(%d) is greater than "
675 "max_tx_queues(%d)\n", nb_txq,
676 port->dev_info.max_tx_queues);
680 if (port_numa[pid] != NUMA_NO_CONFIG)
681 port->socket_id = port_numa[pid];
683 port->socket_id = rte_eth_dev_socket_id(pid);
685 /* if socket_id is invalid, set to 0 */
686 if (check_socket_id(port->socket_id) < 0)
691 if (socket_num == UMA_NO_CONFIG)
694 port->socket_id = socket_num;
698 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
699 if (nb_fwd_streams_new == nb_fwd_streams)
702 if (fwd_streams != NULL) {
703 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
704 if (fwd_streams[sm_id] == NULL)
706 rte_free(fwd_streams[sm_id]);
707 fwd_streams[sm_id] = NULL;
709 rte_free(fwd_streams);
714 nb_fwd_streams = nb_fwd_streams_new;
715 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
716 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
717 if (fwd_streams == NULL)
718 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
719 "failed\n", nb_fwd_streams);
721 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
722 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
723 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
724 if (fwd_streams[sm_id] == NULL)
725 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
736 unsigned int total_burst;
737 unsigned int nb_burst;
738 unsigned int burst_stats[3];
739 uint16_t pktnb_stats[3];
741 int burst_percent[3];
744 * First compute the total number of packet bursts and the
745 * two highest numbers of bursts of the same number of packets.
748 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
749 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
750 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
751 nb_burst = pbs->pkt_burst_spread[nb_pkt];
754 total_burst += nb_burst;
755 if (nb_burst > burst_stats[0]) {
756 burst_stats[1] = burst_stats[0];
757 pktnb_stats[1] = pktnb_stats[0];
758 burst_stats[0] = nb_burst;
759 pktnb_stats[0] = nb_pkt;
762 if (total_burst == 0)
764 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
765 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
766 burst_percent[0], (int) pktnb_stats[0]);
767 if (burst_stats[0] == total_burst) {
771 if (burst_stats[0] + burst_stats[1] == total_burst) {
772 printf(" + %d%% of %d pkts]\n",
773 100 - burst_percent[0], pktnb_stats[1]);
776 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
777 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
778 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
779 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
782 printf(" + %d%% of %d pkts + %d%% of others]\n",
783 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
790 struct rte_port *port;
793 static const char *fwd_stats_border = "----------------------";
795 port = &ports[port_id];
796 printf("\n %s Forward statistics for port %-2d %s\n",
797 fwd_stats_border, port_id, fwd_stats_border);
799 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
800 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
802 stats->ipackets, stats->imissed,
803 (uint64_t) (stats->ipackets + stats->imissed));
805 if (cur_fwd_eng == &csum_fwd_engine)
806 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
807 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
808 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
809 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
810 "RX-error: %-"PRIu64"\n",
811 stats->ibadcrc, stats->ibadlen, stats->ierrors);
812 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
815 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
817 stats->opackets, port->tx_dropped,
818 (uint64_t) (stats->opackets + port->tx_dropped));
821 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
823 stats->ipackets, stats->imissed,
824 (uint64_t) (stats->ipackets + stats->imissed));
826 if (cur_fwd_eng == &csum_fwd_engine)
827 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
828 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
829 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
830 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
831 " RX-error:%"PRIu64"\n",
832 stats->ibadcrc, stats->ibadlen, stats->ierrors);
833 printf(" RX-nombufs: %14"PRIu64"\n",
837 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
839 stats->opackets, port->tx_dropped,
840 (uint64_t) (stats->opackets + port->tx_dropped));
843 /* Display statistics of XON/XOFF pause frames, if any. */
844 if ((stats->tx_pause_xon | stats->rx_pause_xon |
845 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
846 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
847 stats->rx_pause_xoff, stats->rx_pause_xon);
848 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
849 stats->tx_pause_xoff, stats->tx_pause_xon);
852 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
854 pkt_burst_stats_display("RX",
855 &port->rx_stream->rx_burst_stats);
857 pkt_burst_stats_display("TX",
858 &port->tx_stream->tx_burst_stats);
861 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
862 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
866 if (port->rx_queue_stats_mapping_enabled) {
868 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
869 printf(" Stats reg %2d RX-packets:%14"PRIu64
870 " RX-errors:%14"PRIu64
871 " RX-bytes:%14"PRIu64"\n",
872 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
876 if (port->tx_queue_stats_mapping_enabled) {
877 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
878 printf(" Stats reg %2d TX-packets:%14"PRIu64
879 " TX-bytes:%14"PRIu64"\n",
880 i, stats->q_opackets[i], stats->q_obytes[i]);
884 printf(" %s--------------------------------%s\n",
885 fwd_stats_border, fwd_stats_border);
889 fwd_stream_stats_display(streamid_t stream_id)
891 struct fwd_stream *fs;
892 static const char *fwd_top_stats_border = "-------";
894 fs = fwd_streams[stream_id];
895 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
896 (fs->fwd_dropped == 0))
898 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
899 "TX Port=%2d/Queue=%2d %s\n",
900 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
901 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
902 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
903 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
905 /* if checksum mode */
906 if (cur_fwd_eng == &csum_fwd_engine) {
907 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
908 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
911 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
912 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
913 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
918 flush_fwd_rx_queues(void)
920 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
928 for (j = 0; j < 2; j++) {
929 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
930 for (rxq = 0; rxq < nb_rxq; rxq++) {
931 port_id = fwd_ports_ids[rxp];
933 nb_rx = rte_eth_rx_burst(port_id, rxq,
934 pkts_burst, MAX_PKT_BURST);
935 for (i = 0; i < nb_rx; i++)
936 rte_pktmbuf_free(pkts_burst[i]);
940 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
947 struct fwd_stream **fsm;
951 fsm = &fwd_streams[fc->stream_idx];
952 nb_fs = fc->stream_nb;
954 for (sm_id = 0; sm_id < nb_fs; sm_id++)
955 (*pkt_fwd)(fsm[sm_id]);
956 } while (! fc->stopped);
960 start_pkt_forward_on_core(void *fwd_arg)
962 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
963 cur_fwd_config.fwd_eng->packet_fwd);
968 * Run the TXONLY packet forwarding engine to send a single burst of packets.
969 * Used to start communication flows in network loopback test configurations.
972 run_one_txonly_burst_on_core(void *fwd_arg)
974 struct fwd_lcore *fwd_lc;
975 struct fwd_lcore tmp_lcore;
977 fwd_lc = (struct fwd_lcore *) fwd_arg;
979 tmp_lcore.stopped = 1;
980 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
985 * Launch packet forwarding:
986 * - Setup per-port forwarding context.
987 * - launch logical cores with their forwarding configuration.
990 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
992 port_fwd_begin_t port_fwd_begin;
997 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
998 if (port_fwd_begin != NULL) {
999 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1000 (*port_fwd_begin)(fwd_ports_ids[i]);
1002 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1003 lc_id = fwd_lcores_cpuids[i];
1004 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1005 fwd_lcores[i]->stopped = 0;
1006 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1007 fwd_lcores[i], lc_id);
1009 printf("launch lcore %u failed - diag=%d\n",
1016 * Launch packet forwarding configuration.
1019 start_packet_forwarding(int with_tx_first)
1021 port_fwd_begin_t port_fwd_begin;
1022 port_fwd_end_t port_fwd_end;
1023 struct rte_port *port;
1028 if (all_ports_started() == 0) {
1029 printf("Not all ports were started\n");
1032 if (test_done == 0) {
1033 printf("Packet forwarding already started\n");
1037 for (i = 0; i < nb_fwd_ports; i++) {
1038 pt_id = fwd_ports_ids[i];
1039 port = &ports[pt_id];
1040 if (!port->dcb_flag) {
1041 printf("In DCB mode, all forwarding ports must "
1042 "be configured in this mode.\n");
1046 if (nb_fwd_lcores == 1) {
1047 printf("In DCB mode,the nb forwarding cores "
1048 "should be larger than 1.\n");
1055 flush_fwd_rx_queues();
1058 rxtx_config_display();
1060 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1061 pt_id = fwd_ports_ids[i];
1062 port = &ports[pt_id];
1063 rte_eth_stats_get(pt_id, &port->stats);
1064 port->tx_dropped = 0;
1066 map_port_queue_stats_mapping_registers(pt_id, port);
1068 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1069 fwd_streams[sm_id]->rx_packets = 0;
1070 fwd_streams[sm_id]->tx_packets = 0;
1071 fwd_streams[sm_id]->fwd_dropped = 0;
1072 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1073 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1075 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1076 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1077 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1078 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1079 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1081 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1082 fwd_streams[sm_id]->core_cycles = 0;
1085 if (with_tx_first) {
1086 port_fwd_begin = tx_only_engine.port_fwd_begin;
1087 if (port_fwd_begin != NULL) {
1088 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1089 (*port_fwd_begin)(fwd_ports_ids[i]);
1091 launch_packet_forwarding(run_one_txonly_burst_on_core);
1092 rte_eal_mp_wait_lcore();
1093 port_fwd_end = tx_only_engine.port_fwd_end;
1094 if (port_fwd_end != NULL) {
1095 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1096 (*port_fwd_end)(fwd_ports_ids[i]);
1099 launch_packet_forwarding(start_pkt_forward_on_core);
1103 stop_packet_forwarding(void)
1105 struct rte_eth_stats stats;
1106 struct rte_port *port;
1107 port_fwd_end_t port_fwd_end;
1112 uint64_t total_recv;
1113 uint64_t total_xmit;
1114 uint64_t total_rx_dropped;
1115 uint64_t total_tx_dropped;
1116 uint64_t total_rx_nombuf;
1117 uint64_t tx_dropped;
1118 uint64_t rx_bad_ip_csum;
1119 uint64_t rx_bad_l4_csum;
1120 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1121 uint64_t fwd_cycles;
1123 static const char *acc_stats_border = "+++++++++++++++";
1125 if (all_ports_started() == 0) {
1126 printf("Not all ports were started\n");
1130 printf("Packet forwarding not started\n");
1133 printf("Telling cores to stop...");
1134 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1135 fwd_lcores[lc_id]->stopped = 1;
1136 printf("\nWaiting for lcores to finish...\n");
1137 rte_eal_mp_wait_lcore();
1138 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1139 if (port_fwd_end != NULL) {
1140 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1141 pt_id = fwd_ports_ids[i];
1142 (*port_fwd_end)(pt_id);
1145 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1148 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1149 if (cur_fwd_config.nb_fwd_streams >
1150 cur_fwd_config.nb_fwd_ports) {
1151 fwd_stream_stats_display(sm_id);
1152 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1153 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1155 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1157 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1160 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1161 tx_dropped = (uint64_t) (tx_dropped +
1162 fwd_streams[sm_id]->fwd_dropped);
1163 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1166 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1167 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1168 fwd_streams[sm_id]->rx_bad_ip_csum);
1169 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1173 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1174 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1175 fwd_streams[sm_id]->rx_bad_l4_csum);
1176 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1179 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1180 fwd_cycles = (uint64_t) (fwd_cycles +
1181 fwd_streams[sm_id]->core_cycles);
1186 total_rx_dropped = 0;
1187 total_tx_dropped = 0;
1188 total_rx_nombuf = 0;
1189 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1190 pt_id = fwd_ports_ids[i];
1192 port = &ports[pt_id];
1193 rte_eth_stats_get(pt_id, &stats);
1194 stats.ipackets -= port->stats.ipackets;
1195 port->stats.ipackets = 0;
1196 stats.opackets -= port->stats.opackets;
1197 port->stats.opackets = 0;
1198 stats.ibytes -= port->stats.ibytes;
1199 port->stats.ibytes = 0;
1200 stats.obytes -= port->stats.obytes;
1201 port->stats.obytes = 0;
1202 stats.imissed -= port->stats.imissed;
1203 port->stats.imissed = 0;
1204 stats.oerrors -= port->stats.oerrors;
1205 port->stats.oerrors = 0;
1206 stats.rx_nombuf -= port->stats.rx_nombuf;
1207 port->stats.rx_nombuf = 0;
1208 stats.fdirmatch -= port->stats.fdirmatch;
1209 port->stats.rx_nombuf = 0;
1210 stats.fdirmiss -= port->stats.fdirmiss;
1211 port->stats.rx_nombuf = 0;
1213 total_recv += stats.ipackets;
1214 total_xmit += stats.opackets;
1215 total_rx_dropped += stats.imissed;
1216 total_tx_dropped += port->tx_dropped;
1217 total_rx_nombuf += stats.rx_nombuf;
1219 fwd_port_stats_display(pt_id, &stats);
1221 printf("\n %s Accumulated forward statistics for all ports"
1223 acc_stats_border, acc_stats_border);
1224 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1226 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1228 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1229 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1230 if (total_rx_nombuf > 0)
1231 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1232 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1234 acc_stats_border, acc_stats_border);
1235 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1237 printf("\n CPU cycles/packet=%u (total cycles="
1238 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1239 (unsigned int)(fwd_cycles / total_recv),
1240 fwd_cycles, total_recv);
1242 printf("\nDone.\n");
1247 dev_set_link_up(portid_t pid)
1249 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1250 printf("\nSet link up fail.\n");
1254 dev_set_link_down(portid_t pid)
1256 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1257 printf("\nSet link down fail.\n");
1261 all_ports_started(void)
1264 struct rte_port *port;
1266 for (pi = 0; pi < nb_ports; pi++) {
1268 /* Check if there is a port which is not started */
1269 if (port->port_status != RTE_PORT_STARTED)
1273 /* No port is not started */
1278 start_port(portid_t pid)
1280 int diag, need_check_link_status = 0;
1283 struct rte_port *port;
1284 struct ether_addr mac_addr;
1286 if (test_done == 0) {
1287 printf("Please stop forwarding first\n");
1291 if (init_fwd_streams() < 0) {
1292 printf("Fail from init_fwd_streams()\n");
1298 for (pi = 0; pi < nb_ports; pi++) {
1299 if (pid < nb_ports && pid != pi)
1303 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1304 RTE_PORT_HANDLING) == 0) {
1305 printf("Port %d is now not stopped\n", pi);
1309 if (port->need_reconfig > 0) {
1310 port->need_reconfig = 0;
1312 printf("Configuring Port %d (socket %u)\n", pi,
1314 /* configure port */
1315 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1318 if (rte_atomic16_cmpset(&(port->port_status),
1319 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1320 printf("Port %d can not be set back "
1321 "to stopped\n", pi);
1322 printf("Fail to configure port %d\n", pi);
1323 /* try to reconfigure port next time */
1324 port->need_reconfig = 1;
1328 if (port->need_reconfig_queues > 0) {
1329 port->need_reconfig_queues = 0;
1330 /* setup tx queues */
1331 for (qi = 0; qi < nb_txq; qi++) {
1332 if ((numa_support) &&
1333 (txring_numa[pi] != NUMA_NO_CONFIG))
1334 diag = rte_eth_tx_queue_setup(pi, qi,
1335 nb_txd,txring_numa[pi],
1338 diag = rte_eth_tx_queue_setup(pi, qi,
1339 nb_txd,port->socket_id,
1345 /* Fail to setup tx queue, return */
1346 if (rte_atomic16_cmpset(&(port->port_status),
1348 RTE_PORT_STOPPED) == 0)
1349 printf("Port %d can not be set back "
1350 "to stopped\n", pi);
1351 printf("Fail to configure port %d tx queues\n", pi);
1352 /* try to reconfigure queues next time */
1353 port->need_reconfig_queues = 1;
1356 /* setup rx queues */
1357 for (qi = 0; qi < nb_rxq; qi++) {
1358 if ((numa_support) &&
1359 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1360 struct rte_mempool * mp =
1361 mbuf_pool_find(rxring_numa[pi]);
1363 printf("Failed to setup RX queue:"
1364 "No mempool allocation"
1365 "on the socket %d\n",
1370 diag = rte_eth_rx_queue_setup(pi, qi,
1371 nb_rxd,rxring_numa[pi],
1372 &(port->rx_conf),mp);
1375 diag = rte_eth_rx_queue_setup(pi, qi,
1376 nb_rxd,port->socket_id,
1378 mbuf_pool_find(port->socket_id));
1384 /* Fail to setup rx queue, return */
1385 if (rte_atomic16_cmpset(&(port->port_status),
1387 RTE_PORT_STOPPED) == 0)
1388 printf("Port %d can not be set back "
1389 "to stopped\n", pi);
1390 printf("Fail to configure port %d rx queues\n", pi);
1391 /* try to reconfigure queues next time */
1392 port->need_reconfig_queues = 1;
1397 if (rte_eth_dev_start(pi) < 0) {
1398 printf("Fail to start port %d\n", pi);
1400 /* Fail to setup rx queue, return */
1401 if (rte_atomic16_cmpset(&(port->port_status),
1402 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1403 printf("Port %d can not be set back to "
1408 if (rte_atomic16_cmpset(&(port->port_status),
1409 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1410 printf("Port %d can not be set into started\n", pi);
1412 rte_eth_macaddr_get(pi, &mac_addr);
1413 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1414 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1415 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1416 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1418 /* at least one port started, need checking link status */
1419 need_check_link_status = 1;
1422 if (need_check_link_status && !no_link_check)
1423 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1425 printf("Please stop the ports first\n");
1432 stop_port(portid_t pid)
1435 struct rte_port *port;
1436 int need_check_link_status = 0;
1438 if (test_done == 0) {
1439 printf("Please stop forwarding first\n");
1446 printf("Stopping ports...\n");
1448 for (pi = 0; pi < nb_ports; pi++) {
1449 if (pid < nb_ports && pid != pi)
1453 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1454 RTE_PORT_HANDLING) == 0)
1457 rte_eth_dev_stop(pi);
1459 if (rte_atomic16_cmpset(&(port->port_status),
1460 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1461 printf("Port %d can not be set into stopped\n", pi);
1462 need_check_link_status = 1;
1464 if (need_check_link_status && !no_link_check)
1465 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1471 close_port(portid_t pid)
1474 struct rte_port *port;
1476 if (test_done == 0) {
1477 printf("Please stop forwarding first\n");
1481 printf("Closing ports...\n");
1483 for (pi = 0; pi < nb_ports; pi++) {
1484 if (pid < nb_ports && pid != pi)
1488 if (rte_atomic16_cmpset(&(port->port_status),
1489 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1490 printf("Port %d is now not stopped\n", pi);
1494 rte_eth_dev_close(pi);
1496 if (rte_atomic16_cmpset(&(port->port_status),
1497 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1498 printf("Port %d can not be set into stopped\n", pi);
1505 all_ports_stopped(void)
1508 struct rte_port *port;
1510 for (pi = 0; pi < nb_ports; pi++) {
1512 if (port->port_status != RTE_PORT_STOPPED)
1520 port_is_started(portid_t port_id)
1522 if (port_id_is_invalid(port_id))
1525 if (ports[port_id].port_status != RTE_PORT_STARTED)
1536 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1537 printf("Stopping port %d...", pt_id);
1539 rte_eth_dev_close(pt_id);
1545 typedef void (*cmd_func_t)(void);
1546 struct pmd_test_command {
1547 const char *cmd_name;
1548 cmd_func_t cmd_func;
1551 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1553 /* Check the link status of all ports in up to 9s, and print them finally */
1555 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1557 #define CHECK_INTERVAL 100 /* 100ms */
1558 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1559 uint8_t portid, count, all_ports_up, print_flag = 0;
1560 struct rte_eth_link link;
1562 printf("Checking link statuses...\n");
1564 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1566 for (portid = 0; portid < port_num; portid++) {
1567 if ((port_mask & (1 << portid)) == 0)
1569 memset(&link, 0, sizeof(link));
1570 rte_eth_link_get_nowait(portid, &link);
1571 /* print link status if flag set */
1572 if (print_flag == 1) {
1573 if (link.link_status)
1574 printf("Port %d Link Up - speed %u "
1575 "Mbps - %s\n", (uint8_t)portid,
1576 (unsigned)link.link_speed,
1577 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1578 ("full-duplex") : ("half-duplex\n"));
1580 printf("Port %d Link Down\n",
1584 /* clear all_ports_up flag if any link down */
1585 if (link.link_status == 0) {
1590 /* after finally printing all link status, get out */
1591 if (print_flag == 1)
1594 if (all_ports_up == 0) {
1596 rte_delay_ms(CHECK_INTERVAL);
1599 /* set the print_flag if all ports up or timeout */
1600 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1607 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1611 uint8_t mapping_found = 0;
1613 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1614 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1615 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1616 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1617 tx_queue_stats_mappings[i].queue_id,
1618 tx_queue_stats_mappings[i].stats_counter_id);
1625 port->tx_queue_stats_mapping_enabled = 1;
1630 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1634 uint8_t mapping_found = 0;
1636 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1637 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1638 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1639 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1640 rx_queue_stats_mappings[i].queue_id,
1641 rx_queue_stats_mappings[i].stats_counter_id);
1648 port->rx_queue_stats_mapping_enabled = 1;
1653 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1657 diag = set_tx_queue_stats_mapping_registers(pi, port);
1659 if (diag == -ENOTSUP) {
1660 port->tx_queue_stats_mapping_enabled = 0;
1661 printf("TX queue stats mapping not supported port id=%d\n", pi);
1664 rte_exit(EXIT_FAILURE,
1665 "set_tx_queue_stats_mapping_registers "
1666 "failed for port id=%d diag=%d\n",
1670 diag = set_rx_queue_stats_mapping_registers(pi, port);
1672 if (diag == -ENOTSUP) {
1673 port->rx_queue_stats_mapping_enabled = 0;
1674 printf("RX queue stats mapping not supported port id=%d\n", pi);
1677 rte_exit(EXIT_FAILURE,
1678 "set_rx_queue_stats_mapping_registers "
1679 "failed for port id=%d diag=%d\n",
1685 init_port_config(void)
1688 struct rte_port *port;
1690 for (pid = 0; pid < nb_ports; pid++) {
1692 port->dev_conf.rxmode = rx_mode;
1693 port->dev_conf.fdir_conf = fdir_conf;
1695 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1696 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1698 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1699 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1702 /* In SR-IOV mode, RSS mode is not available */
1703 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1704 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1705 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1707 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1710 port->rx_conf.rx_thresh = rx_thresh;
1711 port->rx_conf.rx_free_thresh = rx_free_thresh;
1712 port->rx_conf.rx_drop_en = rx_drop_en;
1713 port->tx_conf.tx_thresh = tx_thresh;
1714 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1715 port->tx_conf.tx_free_thresh = tx_free_thresh;
1716 port->tx_conf.txq_flags = txq_flags;
1718 rte_eth_macaddr_get(pid, &port->eth_addr);
1720 map_port_queue_stats_mapping_registers(pid, port);
1721 #ifdef RTE_NIC_BYPASS
1722 rte_eth_dev_bypass_init(pid);
1727 const uint16_t vlan_tags[] = {
1728 0, 1, 2, 3, 4, 5, 6, 7,
1729 8, 9, 10, 11, 12, 13, 14, 15,
1730 16, 17, 18, 19, 20, 21, 22, 23,
1731 24, 25, 26, 27, 28, 29, 30, 31
1735 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1740 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1741 * given above, and the number of traffic classes available for use.
1743 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1744 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1745 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1747 /* VMDQ+DCB RX and TX configrations */
1748 vmdq_rx_conf.enable_default_pool = 0;
1749 vmdq_rx_conf.default_pool = 0;
1750 vmdq_rx_conf.nb_queue_pools =
1751 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1752 vmdq_tx_conf.nb_queue_pools =
1753 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1755 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1756 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1757 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1758 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1760 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1761 vmdq_rx_conf.dcb_queue[i] = i;
1762 vmdq_tx_conf.dcb_queue[i] = i;
1765 /*set DCB mode of RX and TX of multiple queues*/
1766 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1767 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1768 if (dcb_conf->pfc_en)
1769 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1771 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1773 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1774 sizeof(struct rte_eth_vmdq_dcb_conf)));
1775 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1776 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1779 struct rte_eth_dcb_rx_conf rx_conf;
1780 struct rte_eth_dcb_tx_conf tx_conf;
1782 /* queue mapping configuration of DCB RX and TX */
1783 if (dcb_conf->num_tcs == ETH_4_TCS)
1784 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1786 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1788 rx_conf.nb_tcs = dcb_conf->num_tcs;
1789 tx_conf.nb_tcs = dcb_conf->num_tcs;
1791 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1792 rx_conf.dcb_queue[i] = i;
1793 tx_conf.dcb_queue[i] = i;
1795 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1796 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1797 if (dcb_conf->pfc_en)
1798 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1800 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1802 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1803 sizeof(struct rte_eth_dcb_rx_conf)));
1804 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1805 sizeof(struct rte_eth_dcb_tx_conf)));
1812 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1814 struct rte_eth_conf port_conf;
1815 struct rte_port *rte_port;
1820 /* rxq and txq configuration in dcb mode */
1823 rx_free_thresh = 64;
1825 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1826 /* Enter DCB configuration status */
1829 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1830 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1831 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1835 rte_port = &ports[pid];
1836 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1838 rte_port->rx_conf.rx_thresh = rx_thresh;
1839 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1840 rte_port->tx_conf.tx_thresh = tx_thresh;
1841 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1842 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1844 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1845 for (i = 0; i < nb_vlan; i++){
1846 rx_vft_set(pid, vlan_tags[i], 1);
1849 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1850 map_port_queue_stats_mapping_registers(pid, rte_port);
1852 rte_port->dcb_flag = 1;
1857 #ifdef RTE_EXEC_ENV_BAREMETAL
1862 main(int argc, char** argv)
1867 diag = rte_eal_init(argc, argv);
1869 rte_panic("Cannot init EAL\n");
1871 nb_ports = (portid_t) rte_eth_dev_count();
1873 rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
1875 set_def_fwd_config();
1877 rte_panic("Empty set of forwarding logical cores - check the "
1878 "core mask supplied in the command parameters\n");
1883 launch_args_parse(argc, argv);
1885 if (nb_rxq > nb_txq)
1886 printf("Warning: nb_rxq=%d enables RSS configuration, "
1887 "but nb_txq=%d will prevent to fully test it.\n",
1891 if (start_port(RTE_PORT_ALL) != 0)
1892 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1894 /* set all ports to promiscuous mode by default */
1895 for (port_id = 0; port_id < nb_ports; port_id++)
1896 rte_eth_promiscuous_enable(port_id);
1898 #ifdef RTE_LIBRTE_CMDLINE
1899 if (interactive == 1) {
1901 printf("Start automatic packet forwarding\n");
1902 start_packet_forwarding(0);
1911 printf("No commandline core given, start packet forwarding\n");
1912 start_packet_forwarding(0);
1913 printf("Press enter to exit\n");
1914 rc = read(0, &c, 1);