4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
203 * Configurable values of RX and TX ring threshold registers.
205 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
206 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
207 #define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
209 #define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
210 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
211 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
213 struct rte_eth_thresh rx_thresh = {
214 .pthresh = RX_PTHRESH,
215 .hthresh = RX_HTHRESH,
216 .wthresh = RX_WTHRESH,
219 struct rte_eth_thresh tx_thresh = {
220 .pthresh = TX_PTHRESH,
221 .hthresh = TX_HTHRESH,
222 .wthresh = TX_WTHRESH,
226 * Configurable value of RX free threshold.
228 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
231 * Configurable value of RX drop enable.
233 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
236 * Configurable value of TX free threshold.
238 uint16_t tx_free_thresh = 0; /* Use default values. */
241 * Configurable value of TX RS bit threshold.
243 uint16_t tx_rs_thresh = 0; /* Use default values. */
246 * Configurable value of TX queue flags.
248 uint32_t txq_flags = 0; /* No flags set. */
251 * Receive Side Scaling (RSS) configuration.
253 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
256 * Port topology configuration
258 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
261 * Avoids to flush all the RX streams before starts forwarding.
263 uint8_t no_flush_rx = 0; /* flush by default */
266 * Avoids to check link status when starting/stopping a port.
268 uint8_t no_link_check = 0; /* check by default */
271 * NIC bypass mode configuration options.
273 #ifdef RTE_NIC_BYPASS
275 /* The NIC bypass watchdog timeout. */
276 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
281 * Ethernet device configuration.
283 struct rte_eth_rxmode rx_mode = {
284 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
286 .header_split = 0, /**< Header Split disabled. */
287 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
288 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
289 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
290 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
291 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
292 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
295 struct rte_fdir_conf fdir_conf = {
296 .mode = RTE_FDIR_MODE_NONE,
297 .pballoc = RTE_FDIR_PBALLOC_64K,
298 .status = RTE_FDIR_REPORT_STATUS,
299 .flexbytes_offset = 0x6,
303 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
305 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
306 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
309 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
311 uint16_t nb_tx_queue_stats_mappings = 0;
312 uint16_t nb_rx_queue_stats_mappings = 0;
314 /* Forward function declarations */
315 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
316 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
319 * Check if all the ports are started.
320 * If yes, return positive value. If not, return zero.
322 static int all_ports_started(void);
325 * Setup default configuration.
328 set_default_fwd_lcores_config(void)
334 for (i = 0; i < RTE_MAX_LCORE; i++) {
335 if (! rte_lcore_is_enabled(i))
337 if (i == rte_get_master_lcore())
339 fwd_lcores_cpuids[nb_lc++] = i;
341 nb_lcores = (lcoreid_t) nb_lc;
342 nb_cfg_lcores = nb_lcores;
347 set_def_peer_eth_addrs(void)
351 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
352 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
353 peer_eth_addrs[i].addr_bytes[5] = i;
358 set_default_fwd_ports_config(void)
362 for (pt_id = 0; pt_id < nb_ports; pt_id++)
363 fwd_ports_ids[pt_id] = pt_id;
365 nb_cfg_ports = nb_ports;
366 nb_fwd_ports = nb_ports;
370 set_def_fwd_config(void)
372 set_default_fwd_lcores_config();
373 set_def_peer_eth_addrs();
374 set_default_fwd_ports_config();
378 * Configuration initialisation done once at init time.
380 struct mbuf_ctor_arg {
381 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
382 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
385 struct mbuf_pool_ctor_arg {
386 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
390 testpmd_mbuf_ctor(struct rte_mempool *mp,
393 __attribute__((unused)) unsigned i)
395 struct mbuf_ctor_arg *mb_ctor_arg;
398 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
399 mb = (struct rte_mbuf *) raw_mbuf;
401 mb->type = RTE_MBUF_PKT;
403 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
404 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
405 mb_ctor_arg->seg_buf_offset);
406 mb->buf_len = mb_ctor_arg->seg_buf_size;
407 mb->type = RTE_MBUF_PKT;
409 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
411 mb->pkt.vlan_macip.data = 0;
412 mb->pkt.hash.rss = 0;
416 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
419 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
420 struct rte_pktmbuf_pool_private *mbp_priv;
422 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
423 printf("%s(%s) private_data_size %d < %d\n",
424 __func__, mp->name, (int) mp->private_data_size,
425 (int) sizeof(struct rte_pktmbuf_pool_private));
428 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
429 mbp_priv = rte_mempool_get_priv(mp);
430 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
434 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
435 unsigned int socket_id)
437 char pool_name[RTE_MEMPOOL_NAMESIZE];
438 struct rte_mempool *rte_mp;
439 struct mbuf_pool_ctor_arg mbp_ctor_arg;
440 struct mbuf_ctor_arg mb_ctor_arg;
443 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
445 mb_ctor_arg.seg_buf_offset =
446 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
447 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
448 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
449 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
451 #ifdef RTE_LIBRTE_PMD_XENVIRT
452 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
453 (unsigned) mb_mempool_cache,
454 sizeof(struct rte_pktmbuf_pool_private),
455 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
456 testpmd_mbuf_ctor, &mb_ctor_arg,
463 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
464 (unsigned) mb_mempool_cache,
465 sizeof(struct rte_pktmbuf_pool_private),
466 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
467 testpmd_mbuf_ctor, &mb_ctor_arg,
470 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
471 (unsigned) mb_mempool_cache,
472 sizeof(struct rte_pktmbuf_pool_private),
473 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
474 testpmd_mbuf_ctor, &mb_ctor_arg,
479 if (rte_mp == NULL) {
480 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
481 "failed\n", socket_id);
482 } else if (verbose_level > 0) {
483 rte_mempool_dump(stdout, rte_mp);
488 * Check given socket id is valid or not with NUMA mode,
489 * if valid, return 0, else return -1
492 check_socket_id(const unsigned int socket_id)
494 static int warning_once = 0;
496 if (socket_id >= MAX_SOCKET) {
497 if (!warning_once && numa_support)
498 printf("Warning: NUMA should be configured manually by"
499 " using --port-numa-config and"
500 " --ring-numa-config parameters along with"
512 struct rte_port *port;
513 struct rte_mempool *mbp;
514 unsigned int nb_mbuf_per_pool;
516 uint8_t port_per_socket[MAX_SOCKET];
518 memset(port_per_socket,0,MAX_SOCKET);
519 /* Configuration of logical cores. */
520 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
521 sizeof(struct fwd_lcore *) * nb_lcores,
523 if (fwd_lcores == NULL) {
524 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
525 "failed\n", nb_lcores);
527 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
528 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
529 sizeof(struct fwd_lcore),
531 if (fwd_lcores[lc_id] == NULL) {
532 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
535 fwd_lcores[lc_id]->cpuid_idx = lc_id;
539 * Create pools of mbuf.
540 * If NUMA support is disabled, create a single pool of mbuf in
541 * socket 0 memory by default.
542 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
544 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
545 * nb_txd can be configured at run time.
547 if (param_total_num_mbufs)
548 nb_mbuf_per_pool = param_total_num_mbufs;
550 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
551 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
554 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
558 if (socket_num == UMA_NO_CONFIG)
559 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
561 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
565 /* Configuration of Ethernet ports. */
566 ports = rte_zmalloc("testpmd: ports",
567 sizeof(struct rte_port) * nb_ports,
570 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
571 "failed\n", nb_ports);
574 for (pid = 0; pid < nb_ports; pid++) {
576 rte_eth_dev_info_get(pid, &port->dev_info);
579 if (port_numa[pid] != NUMA_NO_CONFIG)
580 port_per_socket[port_numa[pid]]++;
582 uint32_t socket_id = rte_eth_dev_socket_id(pid);
584 /* if socket_id is invalid, set to 0 */
585 if (check_socket_id(socket_id) < 0)
587 port_per_socket[socket_id]++;
591 /* set flag to initialize port/queue */
592 port->need_reconfig = 1;
593 port->need_reconfig_queues = 1;
598 unsigned int nb_mbuf;
600 if (param_total_num_mbufs)
601 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
603 for (i = 0; i < MAX_SOCKET; i++) {
604 nb_mbuf = (nb_mbuf_per_pool *
607 mbuf_pool_create(mbuf_data_size,
614 * Records which Mbuf pool to use by each logical core, if needed.
616 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
619 mbp = mbuf_pool_find(0);
620 fwd_lcores[lc_id]->mbp = mbp;
623 /* Configuration of packet forwarding streams. */
624 if (init_fwd_streams() < 0)
625 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
630 reconfig(portid_t new_port_id)
632 struct rte_port *port;
634 /* Reconfiguration of Ethernet ports. */
635 ports = rte_realloc(ports,
636 sizeof(struct rte_port) * nb_ports,
639 rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
643 port = &ports[new_port_id];
644 rte_eth_dev_info_get(new_port_id, &port->dev_info);
646 /* set flag to initialize port/queue */
647 port->need_reconfig = 1;
648 port->need_reconfig_queues = 1;
655 init_fwd_streams(void)
658 struct rte_port *port;
659 streamid_t sm_id, nb_fwd_streams_new;
661 /* set socket id according to numa or not */
662 for (pid = 0; pid < nb_ports; pid++) {
664 if (nb_rxq > port->dev_info.max_rx_queues) {
665 printf("Fail: nb_rxq(%d) is greater than "
666 "max_rx_queues(%d)\n", nb_rxq,
667 port->dev_info.max_rx_queues);
670 if (nb_txq > port->dev_info.max_tx_queues) {
671 printf("Fail: nb_txq(%d) is greater than "
672 "max_tx_queues(%d)\n", nb_txq,
673 port->dev_info.max_tx_queues);
677 if (port_numa[pid] != NUMA_NO_CONFIG)
678 port->socket_id = port_numa[pid];
680 port->socket_id = rte_eth_dev_socket_id(pid);
682 /* if socket_id is invalid, set to 0 */
683 if (check_socket_id(port->socket_id) < 0)
688 if (socket_num == UMA_NO_CONFIG)
691 port->socket_id = socket_num;
695 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
696 if (nb_fwd_streams_new == nb_fwd_streams)
699 if (fwd_streams != NULL) {
700 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
701 if (fwd_streams[sm_id] == NULL)
703 rte_free(fwd_streams[sm_id]);
704 fwd_streams[sm_id] = NULL;
706 rte_free(fwd_streams);
711 nb_fwd_streams = nb_fwd_streams_new;
712 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
713 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
714 if (fwd_streams == NULL)
715 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
716 "failed\n", nb_fwd_streams);
718 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
719 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
720 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
721 if (fwd_streams[sm_id] == NULL)
722 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
729 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
731 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
733 unsigned int total_burst;
734 unsigned int nb_burst;
735 unsigned int burst_stats[3];
736 uint16_t pktnb_stats[3];
738 int burst_percent[3];
741 * First compute the total number of packet bursts and the
742 * two highest numbers of bursts of the same number of packets.
745 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
746 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
747 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
748 nb_burst = pbs->pkt_burst_spread[nb_pkt];
751 total_burst += nb_burst;
752 if (nb_burst > burst_stats[0]) {
753 burst_stats[1] = burst_stats[0];
754 pktnb_stats[1] = pktnb_stats[0];
755 burst_stats[0] = nb_burst;
756 pktnb_stats[0] = nb_pkt;
759 if (total_burst == 0)
761 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
762 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
763 burst_percent[0], (int) pktnb_stats[0]);
764 if (burst_stats[0] == total_burst) {
768 if (burst_stats[0] + burst_stats[1] == total_burst) {
769 printf(" + %d%% of %d pkts]\n",
770 100 - burst_percent[0], pktnb_stats[1]);
773 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
774 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
775 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
776 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
779 printf(" + %d%% of %d pkts + %d%% of others]\n",
780 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
782 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
785 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
787 struct rte_port *port;
790 static const char *fwd_stats_border = "----------------------";
792 port = &ports[port_id];
793 printf("\n %s Forward statistics for port %-2d %s\n",
794 fwd_stats_border, port_id, fwd_stats_border);
796 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
797 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
799 stats->ipackets, stats->imissed,
800 (uint64_t) (stats->ipackets + stats->imissed));
802 if (cur_fwd_eng == &csum_fwd_engine)
803 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
804 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
805 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
806 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
807 "RX-error: %-"PRIu64"\n",
808 stats->ibadcrc, stats->ibadlen, stats->ierrors);
809 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
812 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
814 stats->opackets, port->tx_dropped,
815 (uint64_t) (stats->opackets + port->tx_dropped));
818 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
820 stats->ipackets, stats->imissed,
821 (uint64_t) (stats->ipackets + stats->imissed));
823 if (cur_fwd_eng == &csum_fwd_engine)
824 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
825 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
826 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
827 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
828 " RX-error:%"PRIu64"\n",
829 stats->ibadcrc, stats->ibadlen, stats->ierrors);
830 printf(" RX-nombufs: %14"PRIu64"\n",
834 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
836 stats->opackets, port->tx_dropped,
837 (uint64_t) (stats->opackets + port->tx_dropped));
840 /* Display statistics of XON/XOFF pause frames, if any. */
841 if ((stats->tx_pause_xon | stats->rx_pause_xon |
842 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
843 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
844 stats->rx_pause_xoff, stats->rx_pause_xon);
845 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
846 stats->tx_pause_xoff, stats->tx_pause_xon);
849 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
851 pkt_burst_stats_display("RX",
852 &port->rx_stream->rx_burst_stats);
854 pkt_burst_stats_display("TX",
855 &port->tx_stream->tx_burst_stats);
858 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
859 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
863 if (port->rx_queue_stats_mapping_enabled) {
865 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
866 printf(" Stats reg %2d RX-packets:%14"PRIu64
867 " RX-errors:%14"PRIu64
868 " RX-bytes:%14"PRIu64"\n",
869 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
873 if (port->tx_queue_stats_mapping_enabled) {
874 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
875 printf(" Stats reg %2d TX-packets:%14"PRIu64
876 " TX-bytes:%14"PRIu64"\n",
877 i, stats->q_opackets[i], stats->q_obytes[i]);
881 printf(" %s--------------------------------%s\n",
882 fwd_stats_border, fwd_stats_border);
886 fwd_stream_stats_display(streamid_t stream_id)
888 struct fwd_stream *fs;
889 static const char *fwd_top_stats_border = "-------";
891 fs = fwd_streams[stream_id];
892 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
893 (fs->fwd_dropped == 0))
895 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
896 "TX Port=%2d/Queue=%2d %s\n",
897 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
898 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
899 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
900 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
902 /* if checksum mode */
903 if (cur_fwd_eng == &csum_fwd_engine) {
904 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
905 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
908 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
909 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
910 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
915 flush_fwd_rx_queues(void)
917 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
925 for (j = 0; j < 2; j++) {
926 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
927 for (rxq = 0; rxq < nb_rxq; rxq++) {
928 port_id = fwd_ports_ids[rxp];
930 nb_rx = rte_eth_rx_burst(port_id, rxq,
931 pkts_burst, MAX_PKT_BURST);
932 for (i = 0; i < nb_rx; i++)
933 rte_pktmbuf_free(pkts_burst[i]);
937 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
942 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
944 struct fwd_stream **fsm;
948 fsm = &fwd_streams[fc->stream_idx];
949 nb_fs = fc->stream_nb;
951 for (sm_id = 0; sm_id < nb_fs; sm_id++)
952 (*pkt_fwd)(fsm[sm_id]);
953 } while (! fc->stopped);
957 start_pkt_forward_on_core(void *fwd_arg)
959 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
960 cur_fwd_config.fwd_eng->packet_fwd);
965 * Run the TXONLY packet forwarding engine to send a single burst of packets.
966 * Used to start communication flows in network loopback test configurations.
969 run_one_txonly_burst_on_core(void *fwd_arg)
971 struct fwd_lcore *fwd_lc;
972 struct fwd_lcore tmp_lcore;
974 fwd_lc = (struct fwd_lcore *) fwd_arg;
976 tmp_lcore.stopped = 1;
977 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
982 * Launch packet forwarding:
983 * - Setup per-port forwarding context.
984 * - launch logical cores with their forwarding configuration.
987 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
989 port_fwd_begin_t port_fwd_begin;
994 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
995 if (port_fwd_begin != NULL) {
996 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
997 (*port_fwd_begin)(fwd_ports_ids[i]);
999 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1000 lc_id = fwd_lcores_cpuids[i];
1001 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1002 fwd_lcores[i]->stopped = 0;
1003 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1004 fwd_lcores[i], lc_id);
1006 printf("launch lcore %u failed - diag=%d\n",
1013 * Launch packet forwarding configuration.
1016 start_packet_forwarding(int with_tx_first)
1018 port_fwd_begin_t port_fwd_begin;
1019 port_fwd_end_t port_fwd_end;
1020 struct rte_port *port;
1025 if (all_ports_started() == 0) {
1026 printf("Not all ports were started\n");
1029 if (test_done == 0) {
1030 printf("Packet forwarding already started\n");
1034 for (i = 0; i < nb_fwd_ports; i++) {
1035 pt_id = fwd_ports_ids[i];
1036 port = &ports[pt_id];
1037 if (!port->dcb_flag) {
1038 printf("In DCB mode, all forwarding ports must "
1039 "be configured in this mode.\n");
1043 if (nb_fwd_lcores == 1) {
1044 printf("In DCB mode,the nb forwarding cores "
1045 "should be larger than 1.\n");
1052 flush_fwd_rx_queues();
1055 rxtx_config_display();
1057 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1058 pt_id = fwd_ports_ids[i];
1059 port = &ports[pt_id];
1060 rte_eth_stats_get(pt_id, &port->stats);
1061 port->tx_dropped = 0;
1063 map_port_queue_stats_mapping_registers(pt_id, port);
1065 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1066 fwd_streams[sm_id]->rx_packets = 0;
1067 fwd_streams[sm_id]->tx_packets = 0;
1068 fwd_streams[sm_id]->fwd_dropped = 0;
1069 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1070 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1072 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1073 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1074 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1075 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1076 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1078 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1079 fwd_streams[sm_id]->core_cycles = 0;
1082 if (with_tx_first) {
1083 port_fwd_begin = tx_only_engine.port_fwd_begin;
1084 if (port_fwd_begin != NULL) {
1085 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1086 (*port_fwd_begin)(fwd_ports_ids[i]);
1088 launch_packet_forwarding(run_one_txonly_burst_on_core);
1089 rte_eal_mp_wait_lcore();
1090 port_fwd_end = tx_only_engine.port_fwd_end;
1091 if (port_fwd_end != NULL) {
1092 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1093 (*port_fwd_end)(fwd_ports_ids[i]);
1096 launch_packet_forwarding(start_pkt_forward_on_core);
1100 stop_packet_forwarding(void)
1102 struct rte_eth_stats stats;
1103 struct rte_port *port;
1104 port_fwd_end_t port_fwd_end;
1109 uint64_t total_recv;
1110 uint64_t total_xmit;
1111 uint64_t total_rx_dropped;
1112 uint64_t total_tx_dropped;
1113 uint64_t total_rx_nombuf;
1114 uint64_t tx_dropped;
1115 uint64_t rx_bad_ip_csum;
1116 uint64_t rx_bad_l4_csum;
1117 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1118 uint64_t fwd_cycles;
1120 static const char *acc_stats_border = "+++++++++++++++";
1122 if (all_ports_started() == 0) {
1123 printf("Not all ports were started\n");
1127 printf("Packet forwarding not started\n");
1130 printf("Telling cores to stop...");
1131 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1132 fwd_lcores[lc_id]->stopped = 1;
1133 printf("\nWaiting for lcores to finish...\n");
1134 rte_eal_mp_wait_lcore();
1135 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1136 if (port_fwd_end != NULL) {
1137 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1138 pt_id = fwd_ports_ids[i];
1139 (*port_fwd_end)(pt_id);
1142 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1146 if (cur_fwd_config.nb_fwd_streams >
1147 cur_fwd_config.nb_fwd_ports) {
1148 fwd_stream_stats_display(sm_id);
1149 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1150 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1152 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1154 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1157 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1158 tx_dropped = (uint64_t) (tx_dropped +
1159 fwd_streams[sm_id]->fwd_dropped);
1160 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1163 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1164 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1165 fwd_streams[sm_id]->rx_bad_ip_csum);
1166 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1170 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1171 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1172 fwd_streams[sm_id]->rx_bad_l4_csum);
1173 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1176 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1177 fwd_cycles = (uint64_t) (fwd_cycles +
1178 fwd_streams[sm_id]->core_cycles);
1183 total_rx_dropped = 0;
1184 total_tx_dropped = 0;
1185 total_rx_nombuf = 0;
1186 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1187 pt_id = fwd_ports_ids[i];
1189 port = &ports[pt_id];
1190 rte_eth_stats_get(pt_id, &stats);
1191 stats.ipackets -= port->stats.ipackets;
1192 port->stats.ipackets = 0;
1193 stats.opackets -= port->stats.opackets;
1194 port->stats.opackets = 0;
1195 stats.ibytes -= port->stats.ibytes;
1196 port->stats.ibytes = 0;
1197 stats.obytes -= port->stats.obytes;
1198 port->stats.obytes = 0;
1199 stats.imissed -= port->stats.imissed;
1200 port->stats.imissed = 0;
1201 stats.oerrors -= port->stats.oerrors;
1202 port->stats.oerrors = 0;
1203 stats.rx_nombuf -= port->stats.rx_nombuf;
1204 port->stats.rx_nombuf = 0;
1205 stats.fdirmatch -= port->stats.fdirmatch;
1206 port->stats.rx_nombuf = 0;
1207 stats.fdirmiss -= port->stats.fdirmiss;
1208 port->stats.rx_nombuf = 0;
1210 total_recv += stats.ipackets;
1211 total_xmit += stats.opackets;
1212 total_rx_dropped += stats.imissed;
1213 total_tx_dropped += port->tx_dropped;
1214 total_rx_nombuf += stats.rx_nombuf;
1216 fwd_port_stats_display(pt_id, &stats);
1218 printf("\n %s Accumulated forward statistics for all ports"
1220 acc_stats_border, acc_stats_border);
1221 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1223 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1225 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1226 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1227 if (total_rx_nombuf > 0)
1228 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1229 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1231 acc_stats_border, acc_stats_border);
1232 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1234 printf("\n CPU cycles/packet=%u (total cycles="
1235 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1236 (unsigned int)(fwd_cycles / total_recv),
1237 fwd_cycles, total_recv);
1239 printf("\nDone.\n");
1244 dev_set_link_up(portid_t pid)
1246 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1247 printf("\nSet link up fail.\n");
1251 dev_set_link_down(portid_t pid)
1253 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1254 printf("\nSet link down fail.\n");
1258 all_ports_started(void)
1261 struct rte_port *port;
1263 for (pi = 0; pi < nb_ports; pi++) {
1265 /* Check if there is a port which is not started */
1266 if (port->port_status != RTE_PORT_STARTED)
1270 /* No port is not started */
1275 start_port(portid_t pid)
1277 int diag, need_check_link_status = 0;
1280 struct rte_port *port;
1281 struct ether_addr mac_addr;
1283 if (test_done == 0) {
1284 printf("Please stop forwarding first\n");
1288 if (init_fwd_streams() < 0) {
1289 printf("Fail from init_fwd_streams()\n");
1295 for (pi = 0; pi < nb_ports; pi++) {
1296 if (pid < nb_ports && pid != pi)
1300 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1301 RTE_PORT_HANDLING) == 0) {
1302 printf("Port %d is now not stopped\n", pi);
1306 if (port->need_reconfig > 0) {
1307 port->need_reconfig = 0;
1309 printf("Configuring Port %d (socket %u)\n", pi,
1311 /* configure port */
1312 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1315 if (rte_atomic16_cmpset(&(port->port_status),
1316 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1317 printf("Port %d can not be set back "
1318 "to stopped\n", pi);
1319 printf("Fail to configure port %d\n", pi);
1320 /* try to reconfigure port next time */
1321 port->need_reconfig = 1;
1325 if (port->need_reconfig_queues > 0) {
1326 port->need_reconfig_queues = 0;
1327 /* setup tx queues */
1328 for (qi = 0; qi < nb_txq; qi++) {
1329 if ((numa_support) &&
1330 (txring_numa[pi] != NUMA_NO_CONFIG))
1331 diag = rte_eth_tx_queue_setup(pi, qi,
1332 nb_txd,txring_numa[pi],
1335 diag = rte_eth_tx_queue_setup(pi, qi,
1336 nb_txd,port->socket_id,
1342 /* Fail to setup tx queue, return */
1343 if (rte_atomic16_cmpset(&(port->port_status),
1345 RTE_PORT_STOPPED) == 0)
1346 printf("Port %d can not be set back "
1347 "to stopped\n", pi);
1348 printf("Fail to configure port %d tx queues\n", pi);
1349 /* try to reconfigure queues next time */
1350 port->need_reconfig_queues = 1;
1353 /* setup rx queues */
1354 for (qi = 0; qi < nb_rxq; qi++) {
1355 if ((numa_support) &&
1356 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1357 struct rte_mempool * mp =
1358 mbuf_pool_find(rxring_numa[pi]);
1360 printf("Failed to setup RX queue:"
1361 "No mempool allocation"
1362 "on the socket %d\n",
1367 diag = rte_eth_rx_queue_setup(pi, qi,
1368 nb_rxd,rxring_numa[pi],
1369 &(port->rx_conf),mp);
1372 diag = rte_eth_rx_queue_setup(pi, qi,
1373 nb_rxd,port->socket_id,
1375 mbuf_pool_find(port->socket_id));
1381 /* Fail to setup rx queue, return */
1382 if (rte_atomic16_cmpset(&(port->port_status),
1384 RTE_PORT_STOPPED) == 0)
1385 printf("Port %d can not be set back "
1386 "to stopped\n", pi);
1387 printf("Fail to configure port %d rx queues\n", pi);
1388 /* try to reconfigure queues next time */
1389 port->need_reconfig_queues = 1;
1394 if (rte_eth_dev_start(pi) < 0) {
1395 printf("Fail to start port %d\n", pi);
1397 /* Fail to setup rx queue, return */
1398 if (rte_atomic16_cmpset(&(port->port_status),
1399 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1400 printf("Port %d can not be set back to "
1405 if (rte_atomic16_cmpset(&(port->port_status),
1406 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1407 printf("Port %d can not be set into started\n", pi);
1409 rte_eth_macaddr_get(pi, &mac_addr);
1410 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1411 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1412 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1413 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1415 /* at least one port started, need checking link status */
1416 need_check_link_status = 1;
1419 if (need_check_link_status && !no_link_check)
1420 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1422 printf("Please stop the ports first\n");
1429 stop_port(portid_t pid)
1432 struct rte_port *port;
1433 int need_check_link_status = 0;
1435 if (test_done == 0) {
1436 printf("Please stop forwarding first\n");
1443 printf("Stopping ports...\n");
1445 for (pi = 0; pi < nb_ports; pi++) {
1446 if (pid < nb_ports && pid != pi)
1450 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1451 RTE_PORT_HANDLING) == 0)
1454 rte_eth_dev_stop(pi);
1456 if (rte_atomic16_cmpset(&(port->port_status),
1457 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1458 printf("Port %d can not be set into stopped\n", pi);
1459 need_check_link_status = 1;
1461 if (need_check_link_status && !no_link_check)
1462 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1468 close_port(portid_t pid)
1471 struct rte_port *port;
1473 if (test_done == 0) {
1474 printf("Please stop forwarding first\n");
1478 printf("Closing ports...\n");
1480 for (pi = 0; pi < nb_ports; pi++) {
1481 if (pid < nb_ports && pid != pi)
1485 if (rte_atomic16_cmpset(&(port->port_status),
1486 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1487 printf("Port %d is now not stopped\n", pi);
1491 rte_eth_dev_close(pi);
1493 if (rte_atomic16_cmpset(&(port->port_status),
1494 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1495 printf("Port %d can not be set into stopped\n", pi);
1502 all_ports_stopped(void)
1505 struct rte_port *port;
1507 for (pi = 0; pi < nb_ports; pi++) {
1509 if (port->port_status != RTE_PORT_STOPPED)
1521 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1522 printf("Stopping port %d...", pt_id);
1524 rte_eth_dev_close(pt_id);
1530 typedef void (*cmd_func_t)(void);
1531 struct pmd_test_command {
1532 const char *cmd_name;
1533 cmd_func_t cmd_func;
1536 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1538 /* Check the link status of all ports in up to 9s, and print them finally */
1540 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1542 #define CHECK_INTERVAL 100 /* 100ms */
1543 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1544 uint8_t portid, count, all_ports_up, print_flag = 0;
1545 struct rte_eth_link link;
1547 printf("Checking link statuses...\n");
1549 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1551 for (portid = 0; portid < port_num; portid++) {
1552 if ((port_mask & (1 << portid)) == 0)
1554 memset(&link, 0, sizeof(link));
1555 rte_eth_link_get_nowait(portid, &link);
1556 /* print link status if flag set */
1557 if (print_flag == 1) {
1558 if (link.link_status)
1559 printf("Port %d Link Up - speed %u "
1560 "Mbps - %s\n", (uint8_t)portid,
1561 (unsigned)link.link_speed,
1562 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1563 ("full-duplex") : ("half-duplex\n"));
1565 printf("Port %d Link Down\n",
1569 /* clear all_ports_up flag if any link down */
1570 if (link.link_status == 0) {
1575 /* after finally printing all link status, get out */
1576 if (print_flag == 1)
1579 if (all_ports_up == 0) {
1581 rte_delay_ms(CHECK_INTERVAL);
1584 /* set the print_flag if all ports up or timeout */
1585 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1592 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1596 uint8_t mapping_found = 0;
1598 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1599 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1600 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1601 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1602 tx_queue_stats_mappings[i].queue_id,
1603 tx_queue_stats_mappings[i].stats_counter_id);
1610 port->tx_queue_stats_mapping_enabled = 1;
1615 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1619 uint8_t mapping_found = 0;
1621 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1622 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1623 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1624 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1625 rx_queue_stats_mappings[i].queue_id,
1626 rx_queue_stats_mappings[i].stats_counter_id);
1633 port->rx_queue_stats_mapping_enabled = 1;
1638 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1642 diag = set_tx_queue_stats_mapping_registers(pi, port);
1644 if (diag == -ENOTSUP) {
1645 port->tx_queue_stats_mapping_enabled = 0;
1646 printf("TX queue stats mapping not supported port id=%d\n", pi);
1649 rte_exit(EXIT_FAILURE,
1650 "set_tx_queue_stats_mapping_registers "
1651 "failed for port id=%d diag=%d\n",
1655 diag = set_rx_queue_stats_mapping_registers(pi, port);
1657 if (diag == -ENOTSUP) {
1658 port->rx_queue_stats_mapping_enabled = 0;
1659 printf("RX queue stats mapping not supported port id=%d\n", pi);
1662 rte_exit(EXIT_FAILURE,
1663 "set_rx_queue_stats_mapping_registers "
1664 "failed for port id=%d diag=%d\n",
1670 init_port_config(void)
1673 struct rte_port *port;
1675 for (pid = 0; pid < nb_ports; pid++) {
1677 port->dev_conf.rxmode = rx_mode;
1678 port->dev_conf.fdir_conf = fdir_conf;
1680 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1681 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1683 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1684 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1687 /* In SR-IOV mode, RSS mode is not available */
1688 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1689 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1690 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1692 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1695 port->rx_conf.rx_thresh = rx_thresh;
1696 port->rx_conf.rx_free_thresh = rx_free_thresh;
1697 port->rx_conf.rx_drop_en = rx_drop_en;
1698 port->tx_conf.tx_thresh = tx_thresh;
1699 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1700 port->tx_conf.tx_free_thresh = tx_free_thresh;
1701 port->tx_conf.txq_flags = txq_flags;
1703 rte_eth_macaddr_get(pid, &port->eth_addr);
1705 map_port_queue_stats_mapping_registers(pid, port);
1706 #ifdef RTE_NIC_BYPASS
1707 rte_eth_dev_bypass_init(pid);
1712 const uint16_t vlan_tags[] = {
1713 0, 1, 2, 3, 4, 5, 6, 7,
1714 8, 9, 10, 11, 12, 13, 14, 15,
1715 16, 17, 18, 19, 20, 21, 22, 23,
1716 24, 25, 26, 27, 28, 29, 30, 31
1720 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1725 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1726 * given above, and the number of traffic classes available for use.
1728 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1729 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1730 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1732 /* VMDQ+DCB RX and TX configrations */
1733 vmdq_rx_conf.enable_default_pool = 0;
1734 vmdq_rx_conf.default_pool = 0;
1735 vmdq_rx_conf.nb_queue_pools =
1736 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1737 vmdq_tx_conf.nb_queue_pools =
1738 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1740 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1741 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1742 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1743 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1745 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1746 vmdq_rx_conf.dcb_queue[i] = i;
1747 vmdq_tx_conf.dcb_queue[i] = i;
1750 /*set DCB mode of RX and TX of multiple queues*/
1751 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1752 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1753 if (dcb_conf->pfc_en)
1754 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1756 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1758 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1759 sizeof(struct rte_eth_vmdq_dcb_conf)));
1760 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1761 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1764 struct rte_eth_dcb_rx_conf rx_conf;
1765 struct rte_eth_dcb_tx_conf tx_conf;
1767 /* queue mapping configuration of DCB RX and TX */
1768 if (dcb_conf->num_tcs == ETH_4_TCS)
1769 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1771 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1773 rx_conf.nb_tcs = dcb_conf->num_tcs;
1774 tx_conf.nb_tcs = dcb_conf->num_tcs;
1776 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1777 rx_conf.dcb_queue[i] = i;
1778 tx_conf.dcb_queue[i] = i;
1780 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1781 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1782 if (dcb_conf->pfc_en)
1783 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1785 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1787 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1788 sizeof(struct rte_eth_dcb_rx_conf)));
1789 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1790 sizeof(struct rte_eth_dcb_tx_conf)));
1797 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1799 struct rte_eth_conf port_conf;
1800 struct rte_port *rte_port;
1805 /* rxq and txq configuration in dcb mode */
1808 rx_free_thresh = 64;
1810 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1811 /* Enter DCB configuration status */
1814 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1815 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1816 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1820 rte_port = &ports[pid];
1821 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1823 rte_port->rx_conf.rx_thresh = rx_thresh;
1824 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1825 rte_port->tx_conf.tx_thresh = tx_thresh;
1826 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1827 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1829 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1830 for (i = 0; i < nb_vlan; i++){
1831 rx_vft_set(pid, vlan_tags[i], 1);
1834 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1835 map_port_queue_stats_mapping_registers(pid, rte_port);
1837 rte_port->dcb_flag = 1;
1842 #ifdef RTE_EXEC_ENV_BAREMETAL
1847 main(int argc, char** argv)
1852 diag = rte_eal_init(argc, argv);
1854 rte_panic("Cannot init EAL\n");
1856 nb_ports = (portid_t) rte_eth_dev_count();
1858 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1860 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1861 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1862 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1863 "configuration file\n");
1865 set_def_fwd_config();
1867 rte_panic("Empty set of forwarding logical cores - check the "
1868 "core mask supplied in the command parameters\n");
1873 launch_args_parse(argc, argv);
1875 if (nb_rxq > nb_txq)
1876 printf("Warning: nb_rxq=%d enables RSS configuration, "
1877 "but nb_txq=%d will prevent to fully test it.\n",
1881 if (start_port(RTE_PORT_ALL) != 0)
1882 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1884 /* set all ports to promiscuous mode by default */
1885 for (port_id = 0; port_id < nb_ports; port_id++)
1886 rte_eth_promiscuous_enable(port_id);
1888 #ifdef RTE_LIBRTE_CMDLINE
1889 if (interactive == 1) {
1891 printf("Start automatic packet forwarding\n");
1892 start_packet_forwarding(0);
1901 printf("No commandline core given, start packet forwarding\n");
1902 start_packet_forwarding(0);
1903 printf("Press enter to exit\n");
1904 rc = read(0, &c, 1);