4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
152 #ifdef RTE_LIBRTE_IEEE1588
153 &ieee1588_fwd_engine,
158 struct fwd_config cur_fwd_config;
159 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
161 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
162 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
163 * specified on command-line. */
166 * Configuration of packet segments used by the "txonly" processing engine.
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170 TXONLY_DEF_PACKET_LEN,
172 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
175 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
177 /* current configuration is in DCB or not,0 means it is not in DCB mode */
178 uint8_t dcb_config = 0;
180 /* Whether the dcb is in testing status */
181 uint8_t dcb_test = 0;
183 /* DCB on and VT on mapping is default */
184 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
187 * Configurable number of RX/TX queues.
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 * Configurable number of RX/TX ring descriptors.
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 * Configurable values of RX and TX ring threshold registers.
203 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
204 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
205 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
207 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
208 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
209 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
211 struct rte_eth_thresh rx_thresh = {
212 .pthresh = RX_PTHRESH,
213 .hthresh = RX_HTHRESH,
214 .wthresh = RX_WTHRESH,
217 struct rte_eth_thresh tx_thresh = {
218 .pthresh = TX_PTHRESH,
219 .hthresh = TX_HTHRESH,
220 .wthresh = TX_WTHRESH,
224 * Configurable value of RX free threshold.
226 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
229 * Configurable value of RX drop enable.
231 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
234 * Configurable value of TX free threshold.
236 uint16_t tx_free_thresh = 0; /* Use default values. */
239 * Configurable value of TX RS bit threshold.
241 uint16_t tx_rs_thresh = 0; /* Use default values. */
244 * Configurable value of TX queue flags.
246 uint32_t txq_flags = 0; /* No flags set. */
249 * Receive Side Scaling (RSS) configuration.
251 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
254 * Port topology configuration
256 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
259 * Avoids to flush all the RX streams before starts forwarding.
261 uint8_t no_flush_rx = 0; /* flush by default */
264 * NIC bypass mode configuration options.
266 #ifdef RTE_NIC_BYPASS
268 /* The NIC bypass watchdog timeout. */
269 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
274 * Ethernet device configuration.
276 struct rte_eth_rxmode rx_mode = {
277 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
279 .header_split = 0, /**< Header Split disabled. */
280 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
281 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
282 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
283 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
284 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
285 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
288 struct rte_fdir_conf fdir_conf = {
289 .mode = RTE_FDIR_MODE_NONE,
290 .pballoc = RTE_FDIR_PBALLOC_64K,
291 .status = RTE_FDIR_REPORT_STATUS,
292 .flexbytes_offset = 0x6,
296 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
298 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
299 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
301 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
302 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
304 uint16_t nb_tx_queue_stats_mappings = 0;
305 uint16_t nb_rx_queue_stats_mappings = 0;
307 /* Forward function declarations */
308 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
309 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
312 * Check if all the ports are started.
313 * If yes, return positive value. If not, return zero.
315 static int all_ports_started(void);
318 * Setup default configuration.
321 set_default_fwd_lcores_config(void)
327 for (i = 0; i < RTE_MAX_LCORE; i++) {
328 if (! rte_lcore_is_enabled(i))
330 if (i == rte_get_master_lcore())
332 fwd_lcores_cpuids[nb_lc++] = i;
334 nb_lcores = (lcoreid_t) nb_lc;
335 nb_cfg_lcores = nb_lcores;
340 set_def_peer_eth_addrs(void)
344 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
345 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
346 peer_eth_addrs[i].addr_bytes[5] = i;
351 set_default_fwd_ports_config(void)
355 for (pt_id = 0; pt_id < nb_ports; pt_id++)
356 fwd_ports_ids[pt_id] = pt_id;
358 nb_cfg_ports = nb_ports;
359 nb_fwd_ports = nb_ports;
363 set_def_fwd_config(void)
365 set_default_fwd_lcores_config();
366 set_def_peer_eth_addrs();
367 set_default_fwd_ports_config();
371 * Configuration initialisation done once at init time.
373 struct mbuf_ctor_arg {
374 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
375 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
378 struct mbuf_pool_ctor_arg {
379 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
383 testpmd_mbuf_ctor(struct rte_mempool *mp,
386 __attribute__((unused)) unsigned i)
388 struct mbuf_ctor_arg *mb_ctor_arg;
391 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
392 mb = (struct rte_mbuf *) raw_mbuf;
394 mb->type = RTE_MBUF_PKT;
396 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
397 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
398 mb_ctor_arg->seg_buf_offset);
399 mb->buf_len = mb_ctor_arg->seg_buf_size;
400 mb->type = RTE_MBUF_PKT;
402 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
404 mb->pkt.vlan_macip.data = 0;
405 mb->pkt.hash.rss = 0;
409 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
412 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
413 struct rte_pktmbuf_pool_private *mbp_priv;
415 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
416 printf("%s(%s) private_data_size %d < %d\n",
417 __func__, mp->name, (int) mp->private_data_size,
418 (int) sizeof(struct rte_pktmbuf_pool_private));
421 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
422 mbp_priv = rte_mempool_get_priv(mp);
423 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
427 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
428 unsigned int socket_id)
430 char pool_name[RTE_MEMPOOL_NAMESIZE];
431 struct rte_mempool *rte_mp;
432 struct mbuf_pool_ctor_arg mbp_ctor_arg;
433 struct mbuf_ctor_arg mb_ctor_arg;
436 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
438 mb_ctor_arg.seg_buf_offset =
439 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
440 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
441 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
442 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
444 #ifdef RTE_LIBRTE_PMD_XENVIRT
445 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
446 (unsigned) mb_mempool_cache,
447 sizeof(struct rte_pktmbuf_pool_private),
448 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
449 testpmd_mbuf_ctor, &mb_ctor_arg,
456 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
457 (unsigned) mb_mempool_cache,
458 sizeof(struct rte_pktmbuf_pool_private),
459 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
460 testpmd_mbuf_ctor, &mb_ctor_arg,
463 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
464 (unsigned) mb_mempool_cache,
465 sizeof(struct rte_pktmbuf_pool_private),
466 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
467 testpmd_mbuf_ctor, &mb_ctor_arg,
472 if (rte_mp == NULL) {
473 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
474 "failed\n", socket_id);
475 } else if (verbose_level > 0) {
476 rte_mempool_dump(rte_mp);
481 * Check given socket id is valid or not with NUMA mode,
482 * if valid, return 0, else return -1
485 check_socket_id(const unsigned int socket_id)
487 static int warning_once = 0;
489 if (socket_id >= MAX_SOCKET) {
490 if (!warning_once && numa_support)
491 printf("Warning: NUMA should be configured manually by"
492 " using --port-numa-config and"
493 " --ring-numa-config parameters along with"
505 struct rte_port *port;
506 struct rte_mempool *mbp;
507 unsigned int nb_mbuf_per_pool;
509 uint8_t port_per_socket[MAX_SOCKET];
511 memset(port_per_socket,0,MAX_SOCKET);
512 /* Configuration of logical cores. */
513 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
514 sizeof(struct fwd_lcore *) * nb_lcores,
516 if (fwd_lcores == NULL) {
517 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
518 "failed\n", nb_lcores);
520 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
521 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
522 sizeof(struct fwd_lcore),
524 if (fwd_lcores[lc_id] == NULL) {
525 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
528 fwd_lcores[lc_id]->cpuid_idx = lc_id;
532 * Create pools of mbuf.
533 * If NUMA support is disabled, create a single pool of mbuf in
534 * socket 0 memory by default.
535 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
537 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
538 * nb_txd can be configured at run time.
540 if (param_total_num_mbufs)
541 nb_mbuf_per_pool = param_total_num_mbufs;
543 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
544 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
547 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
551 if (socket_num == UMA_NO_CONFIG)
552 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
554 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
558 /* Configuration of Ethernet ports. */
559 ports = rte_zmalloc("testpmd: ports",
560 sizeof(struct rte_port) * nb_ports,
563 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
564 "failed\n", nb_ports);
567 for (pid = 0; pid < nb_ports; pid++) {
569 rte_eth_dev_info_get(pid, &port->dev_info);
572 if (port_numa[pid] != NUMA_NO_CONFIG)
573 port_per_socket[port_numa[pid]]++;
575 uint32_t socket_id = rte_eth_dev_socket_id(pid);
577 /* if socket_id is invalid, set to 0 */
578 if (check_socket_id(socket_id) < 0)
580 port_per_socket[socket_id]++;
584 /* set flag to initialize port/queue */
585 port->need_reconfig = 1;
586 port->need_reconfig_queues = 1;
591 unsigned int nb_mbuf;
593 if (param_total_num_mbufs)
594 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
596 for (i = 0; i < MAX_SOCKET; i++) {
597 nb_mbuf = (nb_mbuf_per_pool *
600 mbuf_pool_create(mbuf_data_size,
607 * Records which Mbuf pool to use by each logical core, if needed.
609 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
610 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
612 mbp = mbuf_pool_find(0);
613 fwd_lcores[lc_id]->mbp = mbp;
616 /* Configuration of packet forwarding streams. */
617 if (init_fwd_streams() < 0)
618 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
622 init_fwd_streams(void)
625 struct rte_port *port;
626 streamid_t sm_id, nb_fwd_streams_new;
628 /* set socket id according to numa or not */
629 for (pid = 0; pid < nb_ports; pid++) {
631 if (nb_rxq > port->dev_info.max_rx_queues) {
632 printf("Fail: nb_rxq(%d) is greater than "
633 "max_rx_queues(%d)\n", nb_rxq,
634 port->dev_info.max_rx_queues);
637 if (nb_txq > port->dev_info.max_tx_queues) {
638 printf("Fail: nb_txq(%d) is greater than "
639 "max_tx_queues(%d)\n", nb_txq,
640 port->dev_info.max_tx_queues);
644 if (port_numa[pid] != NUMA_NO_CONFIG)
645 port->socket_id = port_numa[pid];
647 port->socket_id = rte_eth_dev_socket_id(pid);
649 /* if socket_id is invalid, set to 0 */
650 if (check_socket_id(port->socket_id) < 0)
655 if (socket_num == UMA_NO_CONFIG)
658 port->socket_id = socket_num;
662 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
663 if (nb_fwd_streams_new == nb_fwd_streams)
666 if (fwd_streams != NULL) {
667 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
668 if (fwd_streams[sm_id] == NULL)
670 rte_free(fwd_streams[sm_id]);
671 fwd_streams[sm_id] = NULL;
673 rte_free(fwd_streams);
678 nb_fwd_streams = nb_fwd_streams_new;
679 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
681 if (fwd_streams == NULL)
682 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
683 "failed\n", nb_fwd_streams);
685 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
686 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
687 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
688 if (fwd_streams[sm_id] == NULL)
689 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
698 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
700 unsigned int total_burst;
701 unsigned int nb_burst;
702 unsigned int burst_stats[3];
703 uint16_t pktnb_stats[3];
705 int burst_percent[3];
708 * First compute the total number of packet bursts and the
709 * two highest numbers of bursts of the same number of packets.
712 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
713 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
714 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
715 nb_burst = pbs->pkt_burst_spread[nb_pkt];
718 total_burst += nb_burst;
719 if (nb_burst > burst_stats[0]) {
720 burst_stats[1] = burst_stats[0];
721 pktnb_stats[1] = pktnb_stats[0];
722 burst_stats[0] = nb_burst;
723 pktnb_stats[0] = nb_pkt;
726 if (total_burst == 0)
728 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
729 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
730 burst_percent[0], (int) pktnb_stats[0]);
731 if (burst_stats[0] == total_burst) {
735 if (burst_stats[0] + burst_stats[1] == total_burst) {
736 printf(" + %d%% of %d pkts]\n",
737 100 - burst_percent[0], pktnb_stats[1]);
740 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
741 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
742 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
743 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
746 printf(" + %d%% of %d pkts + %d%% of others]\n",
747 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
749 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
752 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
754 struct rte_port *port;
757 static const char *fwd_stats_border = "----------------------";
759 port = &ports[port_id];
760 printf("\n %s Forward statistics for port %-2d %s\n",
761 fwd_stats_border, port_id, fwd_stats_border);
763 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
764 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
766 stats->ipackets, stats->ierrors,
767 (uint64_t) (stats->ipackets + stats->ierrors));
769 if (cur_fwd_eng == &csum_fwd_engine)
770 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
771 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
773 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
775 stats->opackets, port->tx_dropped,
776 (uint64_t) (stats->opackets + port->tx_dropped));
778 if (stats->rx_nombuf > 0)
779 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
783 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
785 stats->ipackets, stats->ierrors,
786 (uint64_t) (stats->ipackets + stats->ierrors));
788 if (cur_fwd_eng == &csum_fwd_engine)
789 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
790 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
792 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
794 stats->opackets, port->tx_dropped,
795 (uint64_t) (stats->opackets + port->tx_dropped));
797 if (stats->rx_nombuf > 0)
798 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
801 /* Display statistics of XON/XOFF pause frames, if any. */
802 if ((stats->tx_pause_xon | stats->rx_pause_xon |
803 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
804 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
805 stats->rx_pause_xoff, stats->rx_pause_xon);
806 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
807 stats->tx_pause_xoff, stats->tx_pause_xon);
810 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
812 pkt_burst_stats_display("RX",
813 &port->rx_stream->rx_burst_stats);
815 pkt_burst_stats_display("TX",
816 &port->tx_stream->tx_burst_stats);
819 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
820 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
824 if (port->rx_queue_stats_mapping_enabled) {
826 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
827 printf(" Stats reg %2d RX-packets:%14"PRIu64
828 " RX-errors:%14"PRIu64
829 " RX-bytes:%14"PRIu64"\n",
830 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
834 if (port->tx_queue_stats_mapping_enabled) {
835 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
836 printf(" Stats reg %2d TX-packets:%14"PRIu64
837 " TX-bytes:%14"PRIu64"\n",
838 i, stats->q_opackets[i], stats->q_obytes[i]);
842 printf(" %s--------------------------------%s\n",
843 fwd_stats_border, fwd_stats_border);
847 fwd_stream_stats_display(streamid_t stream_id)
849 struct fwd_stream *fs;
850 static const char *fwd_top_stats_border = "-------";
852 fs = fwd_streams[stream_id];
853 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
854 (fs->fwd_dropped == 0))
856 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
857 "TX Port=%2d/Queue=%2d %s\n",
858 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
859 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
860 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
861 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
863 /* if checksum mode */
864 if (cur_fwd_eng == &csum_fwd_engine) {
865 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
866 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
869 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
870 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
871 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
876 flush_fwd_rx_queues(void)
878 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
886 for (j = 0; j < 2; j++) {
887 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
888 for (rxq = 0; rxq < nb_rxq; rxq++) {
889 port_id = fwd_ports_ids[rxp];
891 nb_rx = rte_eth_rx_burst(port_id, rxq,
892 pkts_burst, MAX_PKT_BURST);
893 for (i = 0; i < nb_rx; i++)
894 rte_pktmbuf_free(pkts_burst[i]);
898 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
903 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
905 struct fwd_stream **fsm;
909 fsm = &fwd_streams[fc->stream_idx];
910 nb_fs = fc->stream_nb;
912 for (sm_id = 0; sm_id < nb_fs; sm_id++)
913 (*pkt_fwd)(fsm[sm_id]);
914 } while (! fc->stopped);
918 start_pkt_forward_on_core(void *fwd_arg)
920 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
921 cur_fwd_config.fwd_eng->packet_fwd);
926 * Run the TXONLY packet forwarding engine to send a single burst of packets.
927 * Used to start communication flows in network loopback test configurations.
930 run_one_txonly_burst_on_core(void *fwd_arg)
932 struct fwd_lcore *fwd_lc;
933 struct fwd_lcore tmp_lcore;
935 fwd_lc = (struct fwd_lcore *) fwd_arg;
937 tmp_lcore.stopped = 1;
938 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
943 * Launch packet forwarding:
944 * - Setup per-port forwarding context.
945 * - launch logical cores with their forwarding configuration.
948 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
950 port_fwd_begin_t port_fwd_begin;
955 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
956 if (port_fwd_begin != NULL) {
957 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
958 (*port_fwd_begin)(fwd_ports_ids[i]);
960 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
961 lc_id = fwd_lcores_cpuids[i];
962 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
963 fwd_lcores[i]->stopped = 0;
964 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
965 fwd_lcores[i], lc_id);
967 printf("launch lcore %u failed - diag=%d\n",
974 * Launch packet forwarding configuration.
977 start_packet_forwarding(int with_tx_first)
979 port_fwd_begin_t port_fwd_begin;
980 port_fwd_end_t port_fwd_end;
981 struct rte_port *port;
986 if (all_ports_started() == 0) {
987 printf("Not all ports were started\n");
990 if (test_done == 0) {
991 printf("Packet forwarding already started\n");
995 for (i = 0; i < nb_fwd_ports; i++) {
996 pt_id = fwd_ports_ids[i];
997 port = &ports[pt_id];
998 if (!port->dcb_flag) {
999 printf("In DCB mode, all forwarding ports must "
1000 "be configured in this mode.\n");
1004 if (nb_fwd_lcores == 1) {
1005 printf("In DCB mode,the nb forwarding cores "
1006 "should be larger than 1.\n");
1013 flush_fwd_rx_queues();
1016 rxtx_config_display();
1018 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1019 pt_id = fwd_ports_ids[i];
1020 port = &ports[pt_id];
1021 rte_eth_stats_get(pt_id, &port->stats);
1022 port->tx_dropped = 0;
1024 map_port_queue_stats_mapping_registers(pt_id, port);
1026 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1027 fwd_streams[sm_id]->rx_packets = 0;
1028 fwd_streams[sm_id]->tx_packets = 0;
1029 fwd_streams[sm_id]->fwd_dropped = 0;
1030 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1031 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1033 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1034 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1035 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1036 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1037 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1039 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1040 fwd_streams[sm_id]->core_cycles = 0;
1043 if (with_tx_first) {
1044 port_fwd_begin = tx_only_engine.port_fwd_begin;
1045 if (port_fwd_begin != NULL) {
1046 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1047 (*port_fwd_begin)(fwd_ports_ids[i]);
1049 launch_packet_forwarding(run_one_txonly_burst_on_core);
1050 rte_eal_mp_wait_lcore();
1051 port_fwd_end = tx_only_engine.port_fwd_end;
1052 if (port_fwd_end != NULL) {
1053 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1054 (*port_fwd_end)(fwd_ports_ids[i]);
1057 launch_packet_forwarding(start_pkt_forward_on_core);
1061 stop_packet_forwarding(void)
1063 struct rte_eth_stats stats;
1064 struct rte_port *port;
1065 port_fwd_end_t port_fwd_end;
1070 uint64_t total_recv;
1071 uint64_t total_xmit;
1072 uint64_t total_rx_dropped;
1073 uint64_t total_tx_dropped;
1074 uint64_t total_rx_nombuf;
1075 uint64_t tx_dropped;
1076 uint64_t rx_bad_ip_csum;
1077 uint64_t rx_bad_l4_csum;
1078 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1079 uint64_t fwd_cycles;
1081 static const char *acc_stats_border = "+++++++++++++++";
1083 if (all_ports_started() == 0) {
1084 printf("Not all ports were started\n");
1088 printf("Packet forwarding not started\n");
1091 printf("Telling cores to stop...");
1092 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1093 fwd_lcores[lc_id]->stopped = 1;
1094 printf("\nWaiting for lcores to finish...\n");
1095 rte_eal_mp_wait_lcore();
1096 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1097 if (port_fwd_end != NULL) {
1098 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1099 pt_id = fwd_ports_ids[i];
1100 (*port_fwd_end)(pt_id);
1103 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1106 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1107 if (cur_fwd_config.nb_fwd_streams >
1108 cur_fwd_config.nb_fwd_ports) {
1109 fwd_stream_stats_display(sm_id);
1110 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1111 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1113 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1115 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1118 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1119 tx_dropped = (uint64_t) (tx_dropped +
1120 fwd_streams[sm_id]->fwd_dropped);
1121 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1124 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1125 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1126 fwd_streams[sm_id]->rx_bad_ip_csum);
1127 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1131 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1132 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1133 fwd_streams[sm_id]->rx_bad_l4_csum);
1134 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1137 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1138 fwd_cycles = (uint64_t) (fwd_cycles +
1139 fwd_streams[sm_id]->core_cycles);
1144 total_rx_dropped = 0;
1145 total_tx_dropped = 0;
1146 total_rx_nombuf = 0;
1147 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1148 pt_id = fwd_ports_ids[i];
1150 port = &ports[pt_id];
1151 rte_eth_stats_get(pt_id, &stats);
1152 stats.ipackets -= port->stats.ipackets;
1153 port->stats.ipackets = 0;
1154 stats.opackets -= port->stats.opackets;
1155 port->stats.opackets = 0;
1156 stats.ibytes -= port->stats.ibytes;
1157 port->stats.ibytes = 0;
1158 stats.obytes -= port->stats.obytes;
1159 port->stats.obytes = 0;
1160 stats.ierrors -= port->stats.ierrors;
1161 port->stats.ierrors = 0;
1162 stats.oerrors -= port->stats.oerrors;
1163 port->stats.oerrors = 0;
1164 stats.rx_nombuf -= port->stats.rx_nombuf;
1165 port->stats.rx_nombuf = 0;
1166 stats.fdirmatch -= port->stats.fdirmatch;
1167 port->stats.rx_nombuf = 0;
1168 stats.fdirmiss -= port->stats.fdirmiss;
1169 port->stats.rx_nombuf = 0;
1171 total_recv += stats.ipackets;
1172 total_xmit += stats.opackets;
1173 total_rx_dropped += stats.ierrors;
1174 total_tx_dropped += port->tx_dropped;
1175 total_rx_nombuf += stats.rx_nombuf;
1177 fwd_port_stats_display(pt_id, &stats);
1179 printf("\n %s Accumulated forward statistics for all ports"
1181 acc_stats_border, acc_stats_border);
1182 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1184 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1186 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1187 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1188 if (total_rx_nombuf > 0)
1189 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1190 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1192 acc_stats_border, acc_stats_border);
1193 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1195 printf("\n CPU cycles/packet=%u (total cycles="
1196 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1197 (unsigned int)(fwd_cycles / total_recv),
1198 fwd_cycles, total_recv);
1200 printf("\nDone.\n");
1205 all_ports_started(void)
1208 struct rte_port *port;
1210 for (pi = 0; pi < nb_ports; pi++) {
1212 /* Check if there is a port which is not started */
1213 if (port->port_status != RTE_PORT_STARTED)
1217 /* No port is not started */
1222 start_port(portid_t pid)
1224 int diag, need_check_link_status = 0;
1227 struct rte_port *port;
1229 if (test_done == 0) {
1230 printf("Please stop forwarding first\n");
1234 if (init_fwd_streams() < 0) {
1235 printf("Fail from init_fwd_streams()\n");
1241 for (pi = 0; pi < nb_ports; pi++) {
1242 if (pid < nb_ports && pid != pi)
1246 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1247 RTE_PORT_HANDLING) == 0) {
1248 printf("Port %d is now not stopped\n", pi);
1252 if (port->need_reconfig > 0) {
1253 port->need_reconfig = 0;
1255 printf("Configuring Port %d (socket %u)\n", pi,
1257 /* configure port */
1258 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1261 if (rte_atomic16_cmpset(&(port->port_status),
1262 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1263 printf("Port %d can not be set back "
1264 "to stopped\n", pi);
1265 printf("Fail to configure port %d\n", pi);
1266 /* try to reconfigure port next time */
1267 port->need_reconfig = 1;
1271 if (port->need_reconfig_queues > 0) {
1272 port->need_reconfig_queues = 0;
1273 /* setup tx queues */
1274 for (qi = 0; qi < nb_txq; qi++) {
1275 if ((numa_support) &&
1276 (txring_numa[pi] != NUMA_NO_CONFIG))
1277 diag = rte_eth_tx_queue_setup(pi, qi,
1278 nb_txd,txring_numa[pi],
1281 diag = rte_eth_tx_queue_setup(pi, qi,
1282 nb_txd,port->socket_id,
1288 /* Fail to setup tx queue, return */
1289 if (rte_atomic16_cmpset(&(port->port_status),
1291 RTE_PORT_STOPPED) == 0)
1292 printf("Port %d can not be set back "
1293 "to stopped\n", pi);
1294 printf("Fail to configure port %d tx queues\n", pi);
1295 /* try to reconfigure queues next time */
1296 port->need_reconfig_queues = 1;
1299 /* setup rx queues */
1300 for (qi = 0; qi < nb_rxq; qi++) {
1301 if ((numa_support) &&
1302 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1303 struct rte_mempool * mp =
1304 mbuf_pool_find(rxring_numa[pi]);
1306 printf("Failed to setup RX queue:"
1307 "No mempool allocation"
1308 "on the socket %d\n",
1313 diag = rte_eth_rx_queue_setup(pi, qi,
1314 nb_rxd,rxring_numa[pi],
1315 &(port->rx_conf),mp);
1318 diag = rte_eth_rx_queue_setup(pi, qi,
1319 nb_rxd,port->socket_id,
1321 mbuf_pool_find(port->socket_id));
1327 /* Fail to setup rx queue, return */
1328 if (rte_atomic16_cmpset(&(port->port_status),
1330 RTE_PORT_STOPPED) == 0)
1331 printf("Port %d can not be set back "
1332 "to stopped\n", pi);
1333 printf("Fail to configure port %d rx queues\n", pi);
1334 /* try to reconfigure queues next time */
1335 port->need_reconfig_queues = 1;
1340 if (rte_eth_dev_start(pi) < 0) {
1341 printf("Fail to start port %d\n", pi);
1343 /* Fail to setup rx queue, return */
1344 if (rte_atomic16_cmpset(&(port->port_status),
1345 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1346 printf("Port %d can not be set back to "
1351 if (rte_atomic16_cmpset(&(port->port_status),
1352 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1353 printf("Port %d can not be set into started\n", pi);
1355 /* at least one port started, need checking link status */
1356 need_check_link_status = 1;
1359 if (need_check_link_status)
1360 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1362 printf("Please stop the ports first\n");
1369 stop_port(portid_t pid)
1372 struct rte_port *port;
1373 int need_check_link_status = 0;
1375 if (test_done == 0) {
1376 printf("Please stop forwarding first\n");
1383 printf("Stopping ports...\n");
1385 for (pi = 0; pi < nb_ports; pi++) {
1386 if (pid < nb_ports && pid != pi)
1390 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1391 RTE_PORT_HANDLING) == 0)
1394 rte_eth_dev_stop(pi);
1396 if (rte_atomic16_cmpset(&(port->port_status),
1397 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1398 printf("Port %d can not be set into stopped\n", pi);
1399 need_check_link_status = 1;
1401 if (need_check_link_status)
1402 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1408 close_port(portid_t pid)
1411 struct rte_port *port;
1413 if (test_done == 0) {
1414 printf("Please stop forwarding first\n");
1418 printf("Closing ports...\n");
1420 for (pi = 0; pi < nb_ports; pi++) {
1421 if (pid < nb_ports && pid != pi)
1425 if (rte_atomic16_cmpset(&(port->port_status),
1426 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1427 printf("Port %d is now not stopped\n", pi);
1431 rte_eth_dev_close(pi);
1433 if (rte_atomic16_cmpset(&(port->port_status),
1434 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1435 printf("Port %d can not be set into stopped\n", pi);
1442 all_ports_stopped(void)
1445 struct rte_port *port;
1447 for (pi = 0; pi < nb_ports; pi++) {
1449 if (port->port_status != RTE_PORT_STOPPED)
1461 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1462 printf("Stopping port %d...", pt_id);
1464 rte_eth_dev_close(pt_id);
1470 typedef void (*cmd_func_t)(void);
1471 struct pmd_test_command {
1472 const char *cmd_name;
1473 cmd_func_t cmd_func;
1476 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1478 /* Check the link status of all ports in up to 9s, and print them finally */
1480 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1482 #define CHECK_INTERVAL 100 /* 100ms */
1483 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1484 uint8_t portid, count, all_ports_up, print_flag = 0;
1485 struct rte_eth_link link;
1487 printf("Checking link statuses...\n");
1489 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1491 for (portid = 0; portid < port_num; portid++) {
1492 if ((port_mask & (1 << portid)) == 0)
1494 memset(&link, 0, sizeof(link));
1495 rte_eth_link_get_nowait(portid, &link);
1496 /* print link status if flag set */
1497 if (print_flag == 1) {
1498 if (link.link_status)
1499 printf("Port %d Link Up - speed %u "
1500 "Mbps - %s\n", (uint8_t)portid,
1501 (unsigned)link.link_speed,
1502 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1503 ("full-duplex") : ("half-duplex\n"));
1505 printf("Port %d Link Down\n",
1509 /* clear all_ports_up flag if any link down */
1510 if (link.link_status == 0) {
1515 /* after finally printing all link status, get out */
1516 if (print_flag == 1)
1519 if (all_ports_up == 0) {
1521 rte_delay_ms(CHECK_INTERVAL);
1524 /* set the print_flag if all ports up or timeout */
1525 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1532 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1536 uint8_t mapping_found = 0;
1538 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1539 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1540 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1541 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1542 tx_queue_stats_mappings[i].queue_id,
1543 tx_queue_stats_mappings[i].stats_counter_id);
1550 port->tx_queue_stats_mapping_enabled = 1;
1555 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1559 uint8_t mapping_found = 0;
1561 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1562 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1563 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1564 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1565 rx_queue_stats_mappings[i].queue_id,
1566 rx_queue_stats_mappings[i].stats_counter_id);
1573 port->rx_queue_stats_mapping_enabled = 1;
1578 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1582 diag = set_tx_queue_stats_mapping_registers(pi, port);
1584 if (diag == -ENOTSUP) {
1585 port->tx_queue_stats_mapping_enabled = 0;
1586 printf("TX queue stats mapping not supported port id=%d\n", pi);
1589 rte_exit(EXIT_FAILURE,
1590 "set_tx_queue_stats_mapping_registers "
1591 "failed for port id=%d diag=%d\n",
1595 diag = set_rx_queue_stats_mapping_registers(pi, port);
1597 if (diag == -ENOTSUP) {
1598 port->rx_queue_stats_mapping_enabled = 0;
1599 printf("RX queue stats mapping not supported port id=%d\n", pi);
1602 rte_exit(EXIT_FAILURE,
1603 "set_rx_queue_stats_mapping_registers "
1604 "failed for port id=%d diag=%d\n",
1610 init_port_config(void)
1613 struct rte_port *port;
1615 for (pid = 0; pid < nb_ports; pid++) {
1617 port->dev_conf.rxmode = rx_mode;
1618 port->dev_conf.fdir_conf = fdir_conf;
1620 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1621 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1623 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1624 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1627 /* In SR-IOV mode, RSS mode is not available */
1628 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1629 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1630 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1632 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1635 port->rx_conf.rx_thresh = rx_thresh;
1636 port->rx_conf.rx_free_thresh = rx_free_thresh;
1637 port->rx_conf.rx_drop_en = rx_drop_en;
1638 port->tx_conf.tx_thresh = tx_thresh;
1639 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1640 port->tx_conf.tx_free_thresh = tx_free_thresh;
1641 port->tx_conf.txq_flags = txq_flags;
1643 rte_eth_macaddr_get(pid, &port->eth_addr);
1645 map_port_queue_stats_mapping_registers(pid, port);
1646 #ifdef RTE_NIC_BYPASS
1647 rte_eth_dev_bypass_init(pid);
1652 const uint16_t vlan_tags[] = {
1653 0, 1, 2, 3, 4, 5, 6, 7,
1654 8, 9, 10, 11, 12, 13, 14, 15,
1655 16, 17, 18, 19, 20, 21, 22, 23,
1656 24, 25, 26, 27, 28, 29, 30, 31
1660 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1665 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1666 * given above, and the number of traffic classes available for use.
1668 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1669 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1670 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1672 /* VMDQ+DCB RX and TX configrations */
1673 vmdq_rx_conf.enable_default_pool = 0;
1674 vmdq_rx_conf.default_pool = 0;
1675 vmdq_rx_conf.nb_queue_pools =
1676 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1677 vmdq_tx_conf.nb_queue_pools =
1678 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1680 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1681 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1682 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1683 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1685 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1686 vmdq_rx_conf.dcb_queue[i] = i;
1687 vmdq_tx_conf.dcb_queue[i] = i;
1690 /*set DCB mode of RX and TX of multiple queues*/
1691 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1692 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1693 if (dcb_conf->pfc_en)
1694 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1696 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1698 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1699 sizeof(struct rte_eth_vmdq_dcb_conf)));
1700 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1701 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1704 struct rte_eth_dcb_rx_conf rx_conf;
1705 struct rte_eth_dcb_tx_conf tx_conf;
1707 /* queue mapping configuration of DCB RX and TX */
1708 if (dcb_conf->num_tcs == ETH_4_TCS)
1709 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1711 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1713 rx_conf.nb_tcs = dcb_conf->num_tcs;
1714 tx_conf.nb_tcs = dcb_conf->num_tcs;
1716 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1717 rx_conf.dcb_queue[i] = i;
1718 tx_conf.dcb_queue[i] = i;
1720 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1721 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1722 if (dcb_conf->pfc_en)
1723 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1725 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1727 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1728 sizeof(struct rte_eth_dcb_rx_conf)));
1729 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1730 sizeof(struct rte_eth_dcb_tx_conf)));
1737 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1739 struct rte_eth_conf port_conf;
1740 struct rte_port *rte_port;
1745 /* rxq and txq configuration in dcb mode */
1748 rx_free_thresh = 64;
1750 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1751 /* Enter DCB configuration status */
1754 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1755 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1756 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1760 rte_port = &ports[pid];
1761 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1763 rte_port->rx_conf.rx_thresh = rx_thresh;
1764 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1765 rte_port->tx_conf.tx_thresh = tx_thresh;
1766 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1767 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1769 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1770 for (i = 0; i < nb_vlan; i++){
1771 rx_vft_set(pid, vlan_tags[i], 1);
1774 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1775 map_port_queue_stats_mapping_registers(pid, rte_port);
1777 rte_port->dcb_flag = 1;
1782 #ifdef RTE_EXEC_ENV_BAREMETAL
1787 main(int argc, char** argv)
1792 diag = rte_eal_init(argc, argv);
1794 rte_panic("Cannot init EAL\n");
1796 if (rte_pmd_init_all())
1797 rte_panic("Cannot init PMD\n");
1799 if (rte_eal_pci_probe())
1800 rte_panic("Cannot probe PCI\n");
1802 nb_ports = (portid_t) rte_eth_dev_count();
1804 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1806 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1807 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1808 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1809 "configuration file\n");
1811 set_def_fwd_config();
1813 rte_panic("Empty set of forwarding logical cores - check the "
1814 "core mask supplied in the command parameters\n");
1819 launch_args_parse(argc, argv);
1821 if (nb_rxq > nb_txq)
1822 printf("Warning: nb_rxq=%d enables RSS configuration, "
1823 "but nb_txq=%d will prevent to fully test it.\n",
1827 if (start_port(RTE_PORT_ALL) != 0)
1828 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1830 /* set all ports to promiscuous mode by default */
1831 for (port_id = 0; port_id < nb_ports; port_id++)
1832 rte_eth_promiscuous_enable(port_id);
1834 #ifdef RTE_LIBRTE_CMDLINE
1835 if (interactive == 1) {
1837 printf("Start automatic packet forwarding\n");
1838 start_packet_forwarding(0);
1847 printf("No commandline core given, start packet forwarding\n");
1848 start_packet_forwarding(0);
1849 printf("Press enter to exit\n");
1850 rc = read(0, &c, 1);