4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
203 * Configurable values of RX and TX ring threshold registers.
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
213 struct rte_eth_thresh rx_thresh = {
214 .pthresh = RX_PTHRESH,
215 .hthresh = RX_HTHRESH,
216 .wthresh = RX_WTHRESH,
219 struct rte_eth_thresh tx_thresh = {
220 .pthresh = TX_PTHRESH,
221 .hthresh = TX_HTHRESH,
222 .wthresh = TX_WTHRESH,
226 * Configurable value of RX free threshold.
228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
231 * Configurable value of RX drop enable.
233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
236 * Configurable value of TX free threshold.
238 uint16_t tx_free_thresh = 0; /* Use default values. */
241 * Configurable value of TX RS bit threshold.
243 uint16_t tx_rs_thresh = 0; /* Use default values. */
246 * Configurable value of TX queue flags.
248 uint32_t txq_flags = 0; /* No flags set. */
251 * Receive Side Scaling (RSS) configuration.
253 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
256 * Port topology configuration
258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
261 * Avoids to flush all the RX streams before starts forwarding.
263 uint8_t no_flush_rx = 0; /* flush by default */
266 * Avoids to check link status when starting/stopping a port.
268 uint8_t no_link_check = 0; /* check by default */
271 * NIC bypass mode configuration options.
273 #ifdef RTE_NIC_BYPASS
275 /* The NIC bypass watchdog timeout. */
276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
281 * Ethernet device configuration.
283 struct rte_eth_rxmode rx_mode = {
284 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
286 .header_split = 0, /**< Header Split disabled. */
287 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
288 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
289 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
290 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
291 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
292 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
295 struct rte_fdir_conf fdir_conf = {
296 .mode = RTE_FDIR_MODE_NONE,
297 .pballoc = RTE_FDIR_PBALLOC_64K,
298 .status = RTE_FDIR_REPORT_STATUS,
299 .flexbytes_offset = 0x6,
303 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
311 uint16_t nb_tx_queue_stats_mappings = 0;
312 uint16_t nb_rx_queue_stats_mappings = 0;
314 /* Forward function declarations */
315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
319 * Check if all the ports are started.
320 * If yes, return positive value. If not, return zero.
322 static int all_ports_started(void);
325 * Setup default configuration.
328 set_default_fwd_lcores_config(void)
334 for (i = 0; i < RTE_MAX_LCORE; i++) {
335 if (! rte_lcore_is_enabled(i))
337 if (i == rte_get_master_lcore())
339 fwd_lcores_cpuids[nb_lc++] = i;
341 nb_lcores = (lcoreid_t) nb_lc;
342 nb_cfg_lcores = nb_lcores;
347 set_def_peer_eth_addrs(void)
351 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
352 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
353 peer_eth_addrs[i].addr_bytes[5] = i;
358 set_default_fwd_ports_config(void)
362 for (pt_id = 0; pt_id < nb_ports; pt_id++)
363 fwd_ports_ids[pt_id] = pt_id;
365 nb_cfg_ports = nb_ports;
366 nb_fwd_ports = nb_ports;
370 set_def_fwd_config(void)
372 set_default_fwd_lcores_config();
373 set_def_peer_eth_addrs();
374 set_default_fwd_ports_config();
378 * Configuration initialisation done once at init time.
380 struct mbuf_ctor_arg {
381 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
382 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
385 struct mbuf_pool_ctor_arg {
386 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
390 testpmd_mbuf_ctor(struct rte_mempool *mp,
393 __attribute__((unused)) unsigned i)
395 struct mbuf_ctor_arg *mb_ctor_arg;
398 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
399 mb = (struct rte_mbuf *) raw_mbuf;
401 mb->type = RTE_MBUF_PKT;
403 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
404 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
405 mb_ctor_arg->seg_buf_offset);
406 mb->buf_len = mb_ctor_arg->seg_buf_size;
407 mb->type = RTE_MBUF_PKT;
409 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
411 mb->pkt.vlan_macip.data = 0;
412 mb->pkt.hash.rss = 0;
416 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
419 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
420 struct rte_pktmbuf_pool_private *mbp_priv;
422 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
423 printf("%s(%s) private_data_size %d < %d\n",
424 __func__, mp->name, (int) mp->private_data_size,
425 (int) sizeof(struct rte_pktmbuf_pool_private));
428 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
429 mbp_priv = rte_mempool_get_priv(mp);
430 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
434 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
435 unsigned int socket_id)
437 char pool_name[RTE_MEMPOOL_NAMESIZE];
438 struct rte_mempool *rte_mp;
439 struct mbuf_pool_ctor_arg mbp_ctor_arg;
440 struct mbuf_ctor_arg mb_ctor_arg;
443 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
445 mb_ctor_arg.seg_buf_offset =
446 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
447 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
448 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
449 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
451 #ifdef RTE_LIBRTE_PMD_XENVIRT
452 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
453 (unsigned) mb_mempool_cache,
454 sizeof(struct rte_pktmbuf_pool_private),
455 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
456 testpmd_mbuf_ctor, &mb_ctor_arg,
463 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
464 (unsigned) mb_mempool_cache,
465 sizeof(struct rte_pktmbuf_pool_private),
466 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
467 testpmd_mbuf_ctor, &mb_ctor_arg,
470 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
471 (unsigned) mb_mempool_cache,
472 sizeof(struct rte_pktmbuf_pool_private),
473 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
474 testpmd_mbuf_ctor, &mb_ctor_arg,
479 if (rte_mp == NULL) {
480 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
481 "failed\n", socket_id);
482 } else if (verbose_level > 0) {
483 rte_mempool_dump(stdout, rte_mp);
488 * Check given socket id is valid or not with NUMA mode,
489 * if valid, return 0, else return -1
492 check_socket_id(const unsigned int socket_id)
494 static int warning_once = 0;
496 if (socket_id >= MAX_SOCKET) {
497 if (!warning_once && numa_support)
498 printf("Warning: NUMA should be configured manually by"
499 " using --port-numa-config and"
500 " --ring-numa-config parameters along with"
512 struct rte_port *port;
513 struct rte_mempool *mbp;
514 unsigned int nb_mbuf_per_pool;
516 uint8_t port_per_socket[MAX_SOCKET];
518 memset(port_per_socket,0,MAX_SOCKET);
519 /* Configuration of logical cores. */
520 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
521 sizeof(struct fwd_lcore *) * nb_lcores,
523 if (fwd_lcores == NULL) {
524 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
525 "failed\n", nb_lcores);
527 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
528 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
529 sizeof(struct fwd_lcore),
531 if (fwd_lcores[lc_id] == NULL) {
532 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
535 fwd_lcores[lc_id]->cpuid_idx = lc_id;
539 * Create pools of mbuf.
540 * If NUMA support is disabled, create a single pool of mbuf in
541 * socket 0 memory by default.
542 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
544 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
545 * nb_txd can be configured at run time.
547 if (param_total_num_mbufs)
548 nb_mbuf_per_pool = param_total_num_mbufs;
550 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
551 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
554 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
558 if (socket_num == UMA_NO_CONFIG)
559 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
561 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
565 /* Configuration of Ethernet ports. */
566 ports = rte_zmalloc("testpmd: ports",
567 sizeof(struct rte_port) * nb_ports,
570 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
571 "failed\n", nb_ports);
574 for (pid = 0; pid < nb_ports; pid++) {
576 rte_eth_dev_info_get(pid, &port->dev_info);
579 if (port_numa[pid] != NUMA_NO_CONFIG)
580 port_per_socket[port_numa[pid]]++;
582 uint32_t socket_id = rte_eth_dev_socket_id(pid);
584 /* if socket_id is invalid, set to 0 */
585 if (check_socket_id(socket_id) < 0)
587 port_per_socket[socket_id]++;
591 /* set flag to initialize port/queue */
592 port->need_reconfig = 1;
593 port->need_reconfig_queues = 1;
598 unsigned int nb_mbuf;
600 if (param_total_num_mbufs)
601 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
603 for (i = 0; i < MAX_SOCKET; i++) {
604 nb_mbuf = (nb_mbuf_per_pool *
607 mbuf_pool_create(mbuf_data_size,
614 * Records which Mbuf pool to use by each logical core, if needed.
616 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 mbp = mbuf_pool_find(
618 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
621 mbp = mbuf_pool_find(0);
622 fwd_lcores[lc_id]->mbp = mbp;
625 /* Configuration of packet forwarding streams. */
626 if (init_fwd_streams() < 0)
627 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
632 reconfig(portid_t new_port_id)
634 struct rte_port *port;
636 /* Reconfiguration of Ethernet ports. */
637 ports = rte_realloc(ports,
638 sizeof(struct rte_port) * nb_ports,
641 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
645 port = &ports[new_port_id];
646 rte_eth_dev_info_get(new_port_id, &port->dev_info);
648 /* set flag to initialize port/queue */
649 port->need_reconfig = 1;
650 port->need_reconfig_queues = 1;
657 init_fwd_streams(void)
660 struct rte_port *port;
661 streamid_t sm_id, nb_fwd_streams_new;
663 /* set socket id according to numa or not */
664 for (pid = 0; pid < nb_ports; pid++) {
666 if (nb_rxq > port->dev_info.max_rx_queues) {
667 printf("Fail: nb_rxq(%d) is greater than "
668 "max_rx_queues(%d)\n", nb_rxq,
669 port->dev_info.max_rx_queues);
672 if (nb_txq > port->dev_info.max_tx_queues) {
673 printf("Fail: nb_txq(%d) is greater than "
674 "max_tx_queues(%d)\n", nb_txq,
675 port->dev_info.max_tx_queues);
679 if (port_numa[pid] != NUMA_NO_CONFIG)
680 port->socket_id = port_numa[pid];
682 port->socket_id = rte_eth_dev_socket_id(pid);
684 /* if socket_id is invalid, set to 0 */
685 if (check_socket_id(port->socket_id) < 0)
690 if (socket_num == UMA_NO_CONFIG)
693 port->socket_id = socket_num;
697 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
698 if (nb_fwd_streams_new == nb_fwd_streams)
701 if (fwd_streams != NULL) {
702 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703 if (fwd_streams[sm_id] == NULL)
705 rte_free(fwd_streams[sm_id]);
706 fwd_streams[sm_id] = NULL;
708 rte_free(fwd_streams);
713 nb_fwd_streams = nb_fwd_streams_new;
714 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
716 if (fwd_streams == NULL)
717 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718 "failed\n", nb_fwd_streams);
720 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
723 if (fwd_streams[sm_id] == NULL)
724 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
735 unsigned int total_burst;
736 unsigned int nb_burst;
737 unsigned int burst_stats[3];
738 uint16_t pktnb_stats[3];
740 int burst_percent[3];
743 * First compute the total number of packet bursts and the
744 * two highest numbers of bursts of the same number of packets.
747 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750 nb_burst = pbs->pkt_burst_spread[nb_pkt];
753 total_burst += nb_burst;
754 if (nb_burst > burst_stats[0]) {
755 burst_stats[1] = burst_stats[0];
756 pktnb_stats[1] = pktnb_stats[0];
757 burst_stats[0] = nb_burst;
758 pktnb_stats[0] = nb_pkt;
761 if (total_burst == 0)
763 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765 burst_percent[0], (int) pktnb_stats[0]);
766 if (burst_stats[0] == total_burst) {
770 if (burst_stats[0] + burst_stats[1] == total_burst) {
771 printf(" + %d%% of %d pkts]\n",
772 100 - burst_percent[0], pktnb_stats[1]);
775 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
781 printf(" + %d%% of %d pkts + %d%% of others]\n",
782 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
789 struct rte_port *port;
792 static const char *fwd_stats_border = "----------------------";
794 port = &ports[port_id];
795 printf("\n %s Forward statistics for port %-2d %s\n",
796 fwd_stats_border, port_id, fwd_stats_border);
798 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
801 stats->ipackets, stats->imissed,
802 (uint64_t) (stats->ipackets + stats->imissed));
804 if (cur_fwd_eng == &csum_fwd_engine)
805 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
808 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
809 "RX-error: %-"PRIu64"\n",
810 stats->ibadcrc, stats->ibadlen, stats->ierrors);
811 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
814 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
816 stats->opackets, port->tx_dropped,
817 (uint64_t) (stats->opackets + port->tx_dropped));
820 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
822 stats->ipackets, stats->imissed,
823 (uint64_t) (stats->ipackets + stats->imissed));
825 if (cur_fwd_eng == &csum_fwd_engine)
826 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
827 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
828 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
829 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
830 " RX-error:%"PRIu64"\n",
831 stats->ibadcrc, stats->ibadlen, stats->ierrors);
832 printf(" RX-nombufs: %14"PRIu64"\n",
836 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
838 stats->opackets, port->tx_dropped,
839 (uint64_t) (stats->opackets + port->tx_dropped));
842 /* Display statistics of XON/XOFF pause frames, if any. */
843 if ((stats->tx_pause_xon | stats->rx_pause_xon |
844 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
845 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
846 stats->rx_pause_xoff, stats->rx_pause_xon);
847 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
848 stats->tx_pause_xoff, stats->tx_pause_xon);
851 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
853 pkt_burst_stats_display("RX",
854 &port->rx_stream->rx_burst_stats);
856 pkt_burst_stats_display("TX",
857 &port->tx_stream->tx_burst_stats);
860 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
861 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
865 if (port->rx_queue_stats_mapping_enabled) {
867 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
868 printf(" Stats reg %2d RX-packets:%14"PRIu64
869 " RX-errors:%14"PRIu64
870 " RX-bytes:%14"PRIu64"\n",
871 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
875 if (port->tx_queue_stats_mapping_enabled) {
876 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
877 printf(" Stats reg %2d TX-packets:%14"PRIu64
878 " TX-bytes:%14"PRIu64"\n",
879 i, stats->q_opackets[i], stats->q_obytes[i]);
883 printf(" %s--------------------------------%s\n",
884 fwd_stats_border, fwd_stats_border);
888 fwd_stream_stats_display(streamid_t stream_id)
890 struct fwd_stream *fs;
891 static const char *fwd_top_stats_border = "-------";
893 fs = fwd_streams[stream_id];
894 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
895 (fs->fwd_dropped == 0))
897 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
898 "TX Port=%2d/Queue=%2d %s\n",
899 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
900 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
901 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
902 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
904 /* if checksum mode */
905 if (cur_fwd_eng == &csum_fwd_engine) {
906 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
907 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
910 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
911 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
912 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
917 flush_fwd_rx_queues(void)
919 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
927 for (j = 0; j < 2; j++) {
928 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
929 for (rxq = 0; rxq < nb_rxq; rxq++) {
930 port_id = fwd_ports_ids[rxp];
932 nb_rx = rte_eth_rx_burst(port_id, rxq,
933 pkts_burst, MAX_PKT_BURST);
934 for (i = 0; i < nb_rx; i++)
935 rte_pktmbuf_free(pkts_burst[i]);
939 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
946 struct fwd_stream **fsm;
950 fsm = &fwd_streams[fc->stream_idx];
951 nb_fs = fc->stream_nb;
953 for (sm_id = 0; sm_id < nb_fs; sm_id++)
954 (*pkt_fwd)(fsm[sm_id]);
955 } while (! fc->stopped);
959 start_pkt_forward_on_core(void *fwd_arg)
961 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
962 cur_fwd_config.fwd_eng->packet_fwd);
967 * Run the TXONLY packet forwarding engine to send a single burst of packets.
968 * Used to start communication flows in network loopback test configurations.
971 run_one_txonly_burst_on_core(void *fwd_arg)
973 struct fwd_lcore *fwd_lc;
974 struct fwd_lcore tmp_lcore;
976 fwd_lc = (struct fwd_lcore *) fwd_arg;
978 tmp_lcore.stopped = 1;
979 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
984 * Launch packet forwarding:
985 * - Setup per-port forwarding context.
986 * - launch logical cores with their forwarding configuration.
989 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
991 port_fwd_begin_t port_fwd_begin;
996 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
997 if (port_fwd_begin != NULL) {
998 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
999 (*port_fwd_begin)(fwd_ports_ids[i]);
1001 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1002 lc_id = fwd_lcores_cpuids[i];
1003 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1004 fwd_lcores[i]->stopped = 0;
1005 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1006 fwd_lcores[i], lc_id);
1008 printf("launch lcore %u failed - diag=%d\n",
1015 * Launch packet forwarding configuration.
1018 start_packet_forwarding(int with_tx_first)
1020 port_fwd_begin_t port_fwd_begin;
1021 port_fwd_end_t port_fwd_end;
1022 struct rte_port *port;
1027 if (all_ports_started() == 0) {
1028 printf("Not all ports were started\n");
1031 if (test_done == 0) {
1032 printf("Packet forwarding already started\n");
1036 for (i = 0; i < nb_fwd_ports; i++) {
1037 pt_id = fwd_ports_ids[i];
1038 port = &ports[pt_id];
1039 if (!port->dcb_flag) {
1040 printf("In DCB mode, all forwarding ports must "
1041 "be configured in this mode.\n");
1045 if (nb_fwd_lcores == 1) {
1046 printf("In DCB mode,the nb forwarding cores "
1047 "should be larger than 1.\n");
1054 flush_fwd_rx_queues();
1057 rxtx_config_display();
1059 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1060 pt_id = fwd_ports_ids[i];
1061 port = &ports[pt_id];
1062 rte_eth_stats_get(pt_id, &port->stats);
1063 port->tx_dropped = 0;
1065 map_port_queue_stats_mapping_registers(pt_id, port);
1067 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1068 fwd_streams[sm_id]->rx_packets = 0;
1069 fwd_streams[sm_id]->tx_packets = 0;
1070 fwd_streams[sm_id]->fwd_dropped = 0;
1071 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1072 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1074 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1075 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1076 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1077 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1078 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081 fwd_streams[sm_id]->core_cycles = 0;
1084 if (with_tx_first) {
1085 port_fwd_begin = tx_only_engine.port_fwd_begin;
1086 if (port_fwd_begin != NULL) {
1087 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1088 (*port_fwd_begin)(fwd_ports_ids[i]);
1090 launch_packet_forwarding(run_one_txonly_burst_on_core);
1091 rte_eal_mp_wait_lcore();
1092 port_fwd_end = tx_only_engine.port_fwd_end;
1093 if (port_fwd_end != NULL) {
1094 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1095 (*port_fwd_end)(fwd_ports_ids[i]);
1098 launch_packet_forwarding(start_pkt_forward_on_core);
1102 stop_packet_forwarding(void)
1104 struct rte_eth_stats stats;
1105 struct rte_port *port;
1106 port_fwd_end_t port_fwd_end;
1111 uint64_t total_recv;
1112 uint64_t total_xmit;
1113 uint64_t total_rx_dropped;
1114 uint64_t total_tx_dropped;
1115 uint64_t total_rx_nombuf;
1116 uint64_t tx_dropped;
1117 uint64_t rx_bad_ip_csum;
1118 uint64_t rx_bad_l4_csum;
1119 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120 uint64_t fwd_cycles;
1122 static const char *acc_stats_border = "+++++++++++++++";
1124 if (all_ports_started() == 0) {
1125 printf("Not all ports were started\n");
1129 printf("Packet forwarding not started\n");
1132 printf("Telling cores to stop...");
1133 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1134 fwd_lcores[lc_id]->stopped = 1;
1135 printf("\nWaiting for lcores to finish...\n");
1136 rte_eal_mp_wait_lcore();
1137 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1138 if (port_fwd_end != NULL) {
1139 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1140 pt_id = fwd_ports_ids[i];
1141 (*port_fwd_end)(pt_id);
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1147 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1148 if (cur_fwd_config.nb_fwd_streams >
1149 cur_fwd_config.nb_fwd_ports) {
1150 fwd_stream_stats_display(sm_id);
1151 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1152 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1154 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1156 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1159 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1160 tx_dropped = (uint64_t) (tx_dropped +
1161 fwd_streams[sm_id]->fwd_dropped);
1162 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1165 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1166 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1167 fwd_streams[sm_id]->rx_bad_ip_csum);
1168 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1172 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1173 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1174 fwd_streams[sm_id]->rx_bad_l4_csum);
1175 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1179 fwd_cycles = (uint64_t) (fwd_cycles +
1180 fwd_streams[sm_id]->core_cycles);
1185 total_rx_dropped = 0;
1186 total_tx_dropped = 0;
1187 total_rx_nombuf = 0;
1188 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1189 pt_id = fwd_ports_ids[i];
1191 port = &ports[pt_id];
1192 rte_eth_stats_get(pt_id, &stats);
1193 stats.ipackets -= port->stats.ipackets;
1194 port->stats.ipackets = 0;
1195 stats.opackets -= port->stats.opackets;
1196 port->stats.opackets = 0;
1197 stats.ibytes -= port->stats.ibytes;
1198 port->stats.ibytes = 0;
1199 stats.obytes -= port->stats.obytes;
1200 port->stats.obytes = 0;
1201 stats.imissed -= port->stats.imissed;
1202 port->stats.imissed = 0;
1203 stats.oerrors -= port->stats.oerrors;
1204 port->stats.oerrors = 0;
1205 stats.rx_nombuf -= port->stats.rx_nombuf;
1206 port->stats.rx_nombuf = 0;
1207 stats.fdirmatch -= port->stats.fdirmatch;
1208 port->stats.rx_nombuf = 0;
1209 stats.fdirmiss -= port->stats.fdirmiss;
1210 port->stats.rx_nombuf = 0;
1212 total_recv += stats.ipackets;
1213 total_xmit += stats.opackets;
1214 total_rx_dropped += stats.imissed;
1215 total_tx_dropped += port->tx_dropped;
1216 total_rx_nombuf += stats.rx_nombuf;
1218 fwd_port_stats_display(pt_id, &stats);
1220 printf("\n %s Accumulated forward statistics for all ports"
1222 acc_stats_border, acc_stats_border);
1223 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1225 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1227 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1228 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1229 if (total_rx_nombuf > 0)
1230 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1231 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1233 acc_stats_border, acc_stats_border);
1234 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1236 printf("\n CPU cycles/packet=%u (total cycles="
1237 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1238 (unsigned int)(fwd_cycles / total_recv),
1239 fwd_cycles, total_recv);
1241 printf("\nDone.\n");
1246 dev_set_link_up(portid_t pid)
1248 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1249 printf("\nSet link up fail.\n");
1253 dev_set_link_down(portid_t pid)
1255 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1256 printf("\nSet link down fail.\n");
1260 all_ports_started(void)
1263 struct rte_port *port;
1265 for (pi = 0; pi < nb_ports; pi++) {
1267 /* Check if there is a port which is not started */
1268 if (port->port_status != RTE_PORT_STARTED)
1272 /* No port is not started */
1277 start_port(portid_t pid)
1279 int diag, need_check_link_status = 0;
1282 struct rte_port *port;
1283 struct ether_addr mac_addr;
1285 if (test_done == 0) {
1286 printf("Please stop forwarding first\n");
1290 if (init_fwd_streams() < 0) {
1291 printf("Fail from init_fwd_streams()\n");
1297 for (pi = 0; pi < nb_ports; pi++) {
1298 if (pid < nb_ports && pid != pi)
1302 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1303 RTE_PORT_HANDLING) == 0) {
1304 printf("Port %d is now not stopped\n", pi);
1308 if (port->need_reconfig > 0) {
1309 port->need_reconfig = 0;
1311 printf("Configuring Port %d (socket %u)\n", pi,
1313 /* configure port */
1314 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1317 if (rte_atomic16_cmpset(&(port->port_status),
1318 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1319 printf("Port %d can not be set back "
1320 "to stopped\n", pi);
1321 printf("Fail to configure port %d\n", pi);
1322 /* try to reconfigure port next time */
1323 port->need_reconfig = 1;
1327 if (port->need_reconfig_queues > 0) {
1328 port->need_reconfig_queues = 0;
1329 /* setup tx queues */
1330 for (qi = 0; qi < nb_txq; qi++) {
1331 if ((numa_support) &&
1332 (txring_numa[pi] != NUMA_NO_CONFIG))
1333 diag = rte_eth_tx_queue_setup(pi, qi,
1334 nb_txd,txring_numa[pi],
1337 diag = rte_eth_tx_queue_setup(pi, qi,
1338 nb_txd,port->socket_id,
1344 /* Fail to setup tx queue, return */
1345 if (rte_atomic16_cmpset(&(port->port_status),
1347 RTE_PORT_STOPPED) == 0)
1348 printf("Port %d can not be set back "
1349 "to stopped\n", pi);
1350 printf("Fail to configure port %d tx queues\n", pi);
1351 /* try to reconfigure queues next time */
1352 port->need_reconfig_queues = 1;
1355 /* setup rx queues */
1356 for (qi = 0; qi < nb_rxq; qi++) {
1357 if ((numa_support) &&
1358 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1359 struct rte_mempool * mp =
1360 mbuf_pool_find(rxring_numa[pi]);
1362 printf("Failed to setup RX queue:"
1363 "No mempool allocation"
1364 "on the socket %d\n",
1369 diag = rte_eth_rx_queue_setup(pi, qi,
1370 nb_rxd,rxring_numa[pi],
1371 &(port->rx_conf),mp);
1374 diag = rte_eth_rx_queue_setup(pi, qi,
1375 nb_rxd,port->socket_id,
1377 mbuf_pool_find(port->socket_id));
1383 /* Fail to setup rx queue, return */
1384 if (rte_atomic16_cmpset(&(port->port_status),
1386 RTE_PORT_STOPPED) == 0)
1387 printf("Port %d can not be set back "
1388 "to stopped\n", pi);
1389 printf("Fail to configure port %d rx queues\n", pi);
1390 /* try to reconfigure queues next time */
1391 port->need_reconfig_queues = 1;
1396 if (rte_eth_dev_start(pi) < 0) {
1397 printf("Fail to start port %d\n", pi);
1399 /* Fail to setup rx queue, return */
1400 if (rte_atomic16_cmpset(&(port->port_status),
1401 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1402 printf("Port %d can not be set back to "
1407 if (rte_atomic16_cmpset(&(port->port_status),
1408 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1409 printf("Port %d can not be set into started\n", pi);
1411 rte_eth_macaddr_get(pi, &mac_addr);
1412 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1413 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1414 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1415 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1417 /* at least one port started, need checking link status */
1418 need_check_link_status = 1;
1421 if (need_check_link_status && !no_link_check)
1422 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1424 printf("Please stop the ports first\n");
1431 stop_port(portid_t pid)
1434 struct rte_port *port;
1435 int need_check_link_status = 0;
1437 if (test_done == 0) {
1438 printf("Please stop forwarding first\n");
1445 printf("Stopping ports...\n");
1447 for (pi = 0; pi < nb_ports; pi++) {
1448 if (pid < nb_ports && pid != pi)
1452 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1453 RTE_PORT_HANDLING) == 0)
1456 rte_eth_dev_stop(pi);
1458 if (rte_atomic16_cmpset(&(port->port_status),
1459 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1460 printf("Port %d can not be set into stopped\n", pi);
1461 need_check_link_status = 1;
1463 if (need_check_link_status && !no_link_check)
1464 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1470 close_port(portid_t pid)
1473 struct rte_port *port;
1475 if (test_done == 0) {
1476 printf("Please stop forwarding first\n");
1480 printf("Closing ports...\n");
1482 for (pi = 0; pi < nb_ports; pi++) {
1483 if (pid < nb_ports && pid != pi)
1487 if (rte_atomic16_cmpset(&(port->port_status),
1488 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1489 printf("Port %d is now not stopped\n", pi);
1493 rte_eth_dev_close(pi);
1495 if (rte_atomic16_cmpset(&(port->port_status),
1496 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1497 printf("Port %d can not be set into stopped\n", pi);
1504 all_ports_stopped(void)
1507 struct rte_port *port;
1509 for (pi = 0; pi < nb_ports; pi++) {
1511 if (port->port_status != RTE_PORT_STOPPED)
1519 port_is_started(portid_t port_id)
1521 if (port_id_is_invalid(port_id))
1524 if (ports[port_id].port_status != RTE_PORT_STARTED)
1535 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1536 printf("Stopping port %d...", pt_id);
1538 rte_eth_dev_close(pt_id);
1544 typedef void (*cmd_func_t)(void);
1545 struct pmd_test_command {
1546 const char *cmd_name;
1547 cmd_func_t cmd_func;
1550 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1552 /* Check the link status of all ports in up to 9s, and print them finally */
1554 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1556 #define CHECK_INTERVAL 100 /* 100ms */
1557 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1558 uint8_t portid, count, all_ports_up, print_flag = 0;
1559 struct rte_eth_link link;
1561 printf("Checking link statuses...\n");
1563 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1565 for (portid = 0; portid < port_num; portid++) {
1566 if ((port_mask & (1 << portid)) == 0)
1568 memset(&link, 0, sizeof(link));
1569 rte_eth_link_get_nowait(portid, &link);
1570 /* print link status if flag set */
1571 if (print_flag == 1) {
1572 if (link.link_status)
1573 printf("Port %d Link Up - speed %u "
1574 "Mbps - %s\n", (uint8_t)portid,
1575 (unsigned)link.link_speed,
1576 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1577 ("full-duplex") : ("half-duplex\n"));
1579 printf("Port %d Link Down\n",
1583 /* clear all_ports_up flag if any link down */
1584 if (link.link_status == 0) {
1589 /* after finally printing all link status, get out */
1590 if (print_flag == 1)
1593 if (all_ports_up == 0) {
1595 rte_delay_ms(CHECK_INTERVAL);
1598 /* set the print_flag if all ports up or timeout */
1599 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1606 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1610 uint8_t mapping_found = 0;
1612 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1613 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1614 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1615 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1616 tx_queue_stats_mappings[i].queue_id,
1617 tx_queue_stats_mappings[i].stats_counter_id);
1624 port->tx_queue_stats_mapping_enabled = 1;
1629 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1633 uint8_t mapping_found = 0;
1635 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1636 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1637 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1638 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1639 rx_queue_stats_mappings[i].queue_id,
1640 rx_queue_stats_mappings[i].stats_counter_id);
1647 port->rx_queue_stats_mapping_enabled = 1;
1652 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1656 diag = set_tx_queue_stats_mapping_registers(pi, port);
1658 if (diag == -ENOTSUP) {
1659 port->tx_queue_stats_mapping_enabled = 0;
1660 printf("TX queue stats mapping not supported port id=%d\n", pi);
1663 rte_exit(EXIT_FAILURE,
1664 "set_tx_queue_stats_mapping_registers "
1665 "failed for port id=%d diag=%d\n",
1669 diag = set_rx_queue_stats_mapping_registers(pi, port);
1671 if (diag == -ENOTSUP) {
1672 port->rx_queue_stats_mapping_enabled = 0;
1673 printf("RX queue stats mapping not supported port id=%d\n", pi);
1676 rte_exit(EXIT_FAILURE,
1677 "set_rx_queue_stats_mapping_registers "
1678 "failed for port id=%d diag=%d\n",
1684 init_port_config(void)
1687 struct rte_port *port;
1689 for (pid = 0; pid < nb_ports; pid++) {
1691 port->dev_conf.rxmode = rx_mode;
1692 port->dev_conf.fdir_conf = fdir_conf;
1694 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1695 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1697 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1698 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1701 /* In SR-IOV mode, RSS mode is not available */
1702 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1703 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1704 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1706 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1709 port->rx_conf.rx_thresh = rx_thresh;
1710 port->rx_conf.rx_free_thresh = rx_free_thresh;
1711 port->rx_conf.rx_drop_en = rx_drop_en;
1712 port->tx_conf.tx_thresh = tx_thresh;
1713 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1714 port->tx_conf.tx_free_thresh = tx_free_thresh;
1715 port->tx_conf.txq_flags = txq_flags;
1717 rte_eth_macaddr_get(pid, &port->eth_addr);
1719 map_port_queue_stats_mapping_registers(pid, port);
1720 #ifdef RTE_NIC_BYPASS
1721 rte_eth_dev_bypass_init(pid);
1726 const uint16_t vlan_tags[] = {
1727 0, 1, 2, 3, 4, 5, 6, 7,
1728 8, 9, 10, 11, 12, 13, 14, 15,
1729 16, 17, 18, 19, 20, 21, 22, 23,
1730 24, 25, 26, 27, 28, 29, 30, 31
1734 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1739 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1740 * given above, and the number of traffic classes available for use.
1742 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1743 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1744 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1746 /* VMDQ+DCB RX and TX configrations */
1747 vmdq_rx_conf.enable_default_pool = 0;
1748 vmdq_rx_conf.default_pool = 0;
1749 vmdq_rx_conf.nb_queue_pools =
1750 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1751 vmdq_tx_conf.nb_queue_pools =
1752 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1754 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1755 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1756 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1757 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1759 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1760 vmdq_rx_conf.dcb_queue[i] = i;
1761 vmdq_tx_conf.dcb_queue[i] = i;
1764 /*set DCB mode of RX and TX of multiple queues*/
1765 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1766 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1767 if (dcb_conf->pfc_en)
1768 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1770 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1772 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1773 sizeof(struct rte_eth_vmdq_dcb_conf)));
1774 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1775 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1778 struct rte_eth_dcb_rx_conf rx_conf;
1779 struct rte_eth_dcb_tx_conf tx_conf;
1781 /* queue mapping configuration of DCB RX and TX */
1782 if (dcb_conf->num_tcs == ETH_4_TCS)
1783 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1785 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1787 rx_conf.nb_tcs = dcb_conf->num_tcs;
1788 tx_conf.nb_tcs = dcb_conf->num_tcs;
1790 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1791 rx_conf.dcb_queue[i] = i;
1792 tx_conf.dcb_queue[i] = i;
1794 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1795 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1796 if (dcb_conf->pfc_en)
1797 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1799 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1801 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1802 sizeof(struct rte_eth_dcb_rx_conf)));
1803 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1804 sizeof(struct rte_eth_dcb_tx_conf)));
1811 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1813 struct rte_eth_conf port_conf;
1814 struct rte_port *rte_port;
1819 /* rxq and txq configuration in dcb mode */
1822 rx_free_thresh = 64;
1824 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1825 /* Enter DCB configuration status */
1828 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1829 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1830 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1834 rte_port = &ports[pid];
1835 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1837 rte_port->rx_conf.rx_thresh = rx_thresh;
1838 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1839 rte_port->tx_conf.tx_thresh = tx_thresh;
1840 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1841 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1843 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1844 for (i = 0; i < nb_vlan; i++){
1845 rx_vft_set(pid, vlan_tags[i], 1);
1848 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1849 map_port_queue_stats_mapping_registers(pid, rte_port);
1851 rte_port->dcb_flag = 1;
1856 #ifdef RTE_EXEC_ENV_BAREMETAL
1861 main(int argc, char** argv)
1866 diag = rte_eal_init(argc, argv);
1868 rte_panic("Cannot init EAL\n");
1870 nb_ports = (portid_t) rte_eth_dev_count();
1872 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1874 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1875 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1876 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1877 "configuration file\n");
1879 set_def_fwd_config();
1881 rte_panic("Empty set of forwarding logical cores - check the "
1882 "core mask supplied in the command parameters\n");
1887 launch_args_parse(argc, argv);
1889 if (nb_rxq > nb_txq)
1890 printf("Warning: nb_rxq=%d enables RSS configuration, "
1891 "but nb_txq=%d will prevent to fully test it.\n",
1895 if (start_port(RTE_PORT_ALL) != 0)
1896 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1898 /* set all ports to promiscuous mode by default */
1899 for (port_id = 0; port_id < nb_ports; port_id++)
1900 rte_eth_promiscuous_enable(port_id);
1902 #ifdef RTE_LIBRTE_CMDLINE
1903 if (interactive == 1) {
1905 printf("Start automatic packet forwarding\n");
1906 start_packet_forwarding(0);
1915 printf("No commandline core given, start packet forwarding\n");
1916 start_packet_forwarding(0);
1917 printf("Press enter to exit\n");
1918 rc = read(0, &c, 1);