4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
88 * NUMA support configuration.
89 * When set, the NUMA support attempts to dispatch the allocation of the
90 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91 * probed ports among the CPU sockets 0 and 1.
92 * Otherwise, all memory is allocated from CPU socket 0.
94 uint8_t numa_support = 0; /**< No numa support by default */
97 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100 uint8_t socket_num = UMA_NO_CONFIG;
103 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108 * Record the Ethernet address of peer target ports to which packets are
110 * Must be instanciated with the ethernet addresses of peer traffic generator
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
117 * Probed Target Environment.
119 struct rte_port *ports; /**< For all probed ethernet ports. */
120 portid_t nb_ports; /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
125 * Test Forwarding Configuration.
126 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t nb_cfg_ports; /**< Number of configured ports. */
132 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
141 * Forwarding engines.
143 struct fwd_engine * fwd_engines[] = {
146 &mac_retry_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151 &ieee1588_fwd_engine,
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
161 * specified on command-line. */
164 * Configuration of packet segments used by the "txonly" processing engine.
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168 TXONLY_DEF_PACKET_LEN,
170 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
185 * Configurable number of RX/TX queues.
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 * Configurable number of RX/TX ring descriptors.
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 * Configurable values of RX and TX ring threshold registers.
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
209 struct rte_eth_thresh rx_thresh = {
210 .pthresh = RX_PTHRESH,
211 .hthresh = RX_HTHRESH,
212 .wthresh = RX_WTHRESH,
215 struct rte_eth_thresh tx_thresh = {
216 .pthresh = TX_PTHRESH,
217 .hthresh = TX_HTHRESH,
218 .wthresh = TX_WTHRESH,
222 * Configurable value of RX free threshold.
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
227 * Configurable value of RX drop enable.
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
232 * Configurable value of TX free threshold.
234 uint16_t tx_free_thresh = 0; /* Use default values. */
237 * Configurable value of TX RS bit threshold.
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
242 * Configurable value of TX queue flags.
244 uint32_t txq_flags = 0; /* No flags set. */
247 * Receive Side Scaling (RSS) configuration.
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
252 * Port topology configuration
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
257 * Avoids to flush all the RX streams before starts forwarding.
259 uint8_t no_flush_rx = 0; /* flush by default */
262 * NIC bypass mode configuration options.
264 #ifdef RTE_NIC_BYPASS
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 * Ethernet device configuration.
274 struct rte_eth_rxmode rx_mode = {
275 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 .header_split = 0, /**< Header Split disabled. */
278 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
281 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
283 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
286 struct rte_fdir_conf fdir_conf = {
287 .mode = RTE_FDIR_MODE_NONE,
288 .pballoc = RTE_FDIR_PBALLOC_64K,
289 .status = RTE_FDIR_REPORT_STATUS,
290 .flexbytes_offset = 0x6,
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
310 * Check if all the ports are started.
311 * If yes, return positive value. If not, return zero.
313 static int all_ports_started(void);
316 * Setup default configuration.
319 set_default_fwd_lcores_config(void)
325 for (i = 0; i < RTE_MAX_LCORE; i++) {
326 if (! rte_lcore_is_enabled(i))
328 if (i == rte_get_master_lcore())
330 fwd_lcores_cpuids[nb_lc++] = i;
332 nb_lcores = (lcoreid_t) nb_lc;
333 nb_cfg_lcores = nb_lcores;
338 set_def_peer_eth_addrs(void)
342 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344 peer_eth_addrs[i].addr_bytes[5] = i;
349 set_default_fwd_ports_config(void)
353 for (pt_id = 0; pt_id < nb_ports; pt_id++)
354 fwd_ports_ids[pt_id] = pt_id;
356 nb_cfg_ports = nb_ports;
357 nb_fwd_ports = nb_ports;
361 set_def_fwd_config(void)
363 set_default_fwd_lcores_config();
364 set_def_peer_eth_addrs();
365 set_default_fwd_ports_config();
369 * Configuration initialisation done once at init time.
371 struct mbuf_ctor_arg {
372 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
376 struct mbuf_pool_ctor_arg {
377 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
384 __attribute__((unused)) unsigned i)
386 struct mbuf_ctor_arg *mb_ctor_arg;
389 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390 mb = (struct rte_mbuf *) raw_mbuf;
392 mb->type = RTE_MBUF_PKT;
394 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396 mb_ctor_arg->seg_buf_offset);
397 mb->buf_len = mb_ctor_arg->seg_buf_size;
398 mb->type = RTE_MBUF_PKT;
400 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
402 mb->pkt.vlan_macip.data = 0;
403 mb->pkt.hash.rss = 0;
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
410 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
411 struct rte_pktmbuf_pool_private *mbp_priv;
413 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414 printf("%s(%s) private_data_size %d < %d\n",
415 __func__, mp->name, (int) mp->private_data_size,
416 (int) sizeof(struct rte_pktmbuf_pool_private));
419 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420 mbp_priv = rte_mempool_get_priv(mp);
421 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426 unsigned int socket_id)
428 char pool_name[RTE_MEMPOOL_NAMESIZE];
429 struct rte_mempool *rte_mp;
430 struct mbuf_pool_ctor_arg mbp_ctor_arg;
431 struct mbuf_ctor_arg mb_ctor_arg;
434 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
436 mb_ctor_arg.seg_buf_offset =
437 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444 (unsigned) mb_mempool_cache,
445 sizeof(struct rte_pktmbuf_pool_private),
446 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447 testpmd_mbuf_ctor, &mb_ctor_arg,
454 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455 (unsigned) mb_mempool_cache,
456 sizeof(struct rte_pktmbuf_pool_private),
457 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458 testpmd_mbuf_ctor, &mb_ctor_arg,
461 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462 (unsigned) mb_mempool_cache,
463 sizeof(struct rte_pktmbuf_pool_private),
464 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 testpmd_mbuf_ctor, &mb_ctor_arg,
470 if (rte_mp == NULL) {
471 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472 "failed\n", socket_id);
473 } else if (verbose_level > 0) {
474 rte_mempool_dump(rte_mp);
479 * Check given socket id is valid or not with NUMA mode,
480 * if valid, return 0, else return -1
483 check_socket_id(const unsigned int socket_id)
485 static int warning_once = 0;
487 if (socket_id >= MAX_SOCKET) {
488 if (!warning_once && numa_support)
489 printf("Warning: NUMA should be configured manually by"
490 " using --port-numa-config and"
491 " --ring-numa-config parameters along with"
503 struct rte_port *port;
504 struct rte_mempool *mbp;
505 unsigned int nb_mbuf_per_pool;
507 uint8_t port_per_socket[MAX_SOCKET];
509 memset(port_per_socket,0,MAX_SOCKET);
510 /* Configuration of logical cores. */
511 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
512 sizeof(struct fwd_lcore *) * nb_lcores,
514 if (fwd_lcores == NULL) {
515 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
516 "failed\n", nb_lcores);
518 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
519 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
520 sizeof(struct fwd_lcore),
522 if (fwd_lcores[lc_id] == NULL) {
523 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
526 fwd_lcores[lc_id]->cpuid_idx = lc_id;
530 * Create pools of mbuf.
531 * If NUMA support is disabled, create a single pool of mbuf in
532 * socket 0 memory by default.
533 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
535 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
536 * nb_txd can be configured at run time.
538 if (param_total_num_mbufs)
539 nb_mbuf_per_pool = param_total_num_mbufs;
541 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
542 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
545 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
549 if (socket_num == UMA_NO_CONFIG)
550 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
552 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
556 /* Configuration of Ethernet ports. */
557 ports = rte_zmalloc("testpmd: ports",
558 sizeof(struct rte_port) * nb_ports,
561 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
562 "failed\n", nb_ports);
565 for (pid = 0; pid < nb_ports; pid++) {
567 rte_eth_dev_info_get(pid, &port->dev_info);
570 if (port_numa[pid] != NUMA_NO_CONFIG)
571 port_per_socket[port_numa[pid]]++;
573 uint32_t socket_id = rte_eth_dev_socket_id(pid);
575 /* if socket_id is invalid, set to 0 */
576 if (check_socket_id(socket_id) < 0)
578 port_per_socket[socket_id]++;
582 /* set flag to initialize port/queue */
583 port->need_reconfig = 1;
584 port->need_reconfig_queues = 1;
589 unsigned int nb_mbuf;
591 if (param_total_num_mbufs)
592 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
594 for (i = 0; i < MAX_SOCKET; i++) {
595 nb_mbuf = (nb_mbuf_per_pool *
598 mbuf_pool_create(mbuf_data_size,
605 * Records which Mbuf pool to use by each logical core, if needed.
607 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
608 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
610 mbp = mbuf_pool_find(0);
611 fwd_lcores[lc_id]->mbp = mbp;
614 /* Configuration of packet forwarding streams. */
615 if (init_fwd_streams() < 0)
616 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
620 init_fwd_streams(void)
623 struct rte_port *port;
624 streamid_t sm_id, nb_fwd_streams_new;
626 /* set socket id according to numa or not */
627 for (pid = 0; pid < nb_ports; pid++) {
629 if (nb_rxq > port->dev_info.max_rx_queues) {
630 printf("Fail: nb_rxq(%d) is greater than "
631 "max_rx_queues(%d)\n", nb_rxq,
632 port->dev_info.max_rx_queues);
635 if (nb_txq > port->dev_info.max_tx_queues) {
636 printf("Fail: nb_txq(%d) is greater than "
637 "max_tx_queues(%d)\n", nb_txq,
638 port->dev_info.max_tx_queues);
642 if (port_numa[pid] != NUMA_NO_CONFIG)
643 port->socket_id = port_numa[pid];
645 port->socket_id = rte_eth_dev_socket_id(pid);
647 /* if socket_id is invalid, set to 0 */
648 if (check_socket_id(port->socket_id) < 0)
653 if (socket_num == UMA_NO_CONFIG)
656 port->socket_id = socket_num;
660 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
661 if (nb_fwd_streams_new == nb_fwd_streams)
664 if (fwd_streams != NULL) {
665 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
666 if (fwd_streams[sm_id] == NULL)
668 rte_free(fwd_streams[sm_id]);
669 fwd_streams[sm_id] = NULL;
671 rte_free(fwd_streams);
676 nb_fwd_streams = nb_fwd_streams_new;
677 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
678 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
679 if (fwd_streams == NULL)
680 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
681 "failed\n", nb_fwd_streams);
683 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
684 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
685 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
686 if (fwd_streams[sm_id] == NULL)
687 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
694 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
696 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
698 unsigned int total_burst;
699 unsigned int nb_burst;
700 unsigned int burst_stats[3];
701 uint16_t pktnb_stats[3];
703 int burst_percent[3];
706 * First compute the total number of packet bursts and the
707 * two highest numbers of bursts of the same number of packets.
710 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
711 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
712 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
713 nb_burst = pbs->pkt_burst_spread[nb_pkt];
716 total_burst += nb_burst;
717 if (nb_burst > burst_stats[0]) {
718 burst_stats[1] = burst_stats[0];
719 pktnb_stats[1] = pktnb_stats[0];
720 burst_stats[0] = nb_burst;
721 pktnb_stats[0] = nb_pkt;
724 if (total_burst == 0)
726 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
727 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
728 burst_percent[0], (int) pktnb_stats[0]);
729 if (burst_stats[0] == total_burst) {
733 if (burst_stats[0] + burst_stats[1] == total_burst) {
734 printf(" + %d%% of %d pkts]\n",
735 100 - burst_percent[0], pktnb_stats[1]);
738 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
739 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
740 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
741 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
744 printf(" + %d%% of %d pkts + %d%% of others]\n",
745 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
747 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
750 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
752 struct rte_port *port;
755 static const char *fwd_stats_border = "----------------------";
757 port = &ports[port_id];
758 printf("\n %s Forward statistics for port %-2d %s\n",
759 fwd_stats_border, port_id, fwd_stats_border);
761 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
762 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
764 stats->ipackets, stats->ierrors,
765 (uint64_t) (stats->ipackets + stats->ierrors));
767 if (cur_fwd_eng == &csum_fwd_engine)
768 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
769 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
771 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
773 stats->opackets, port->tx_dropped,
774 (uint64_t) (stats->opackets + port->tx_dropped));
776 if (stats->rx_nombuf > 0)
777 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
781 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
783 stats->ipackets, stats->ierrors,
784 (uint64_t) (stats->ipackets + stats->ierrors));
786 if (cur_fwd_eng == &csum_fwd_engine)
787 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
788 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
790 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
792 stats->opackets, port->tx_dropped,
793 (uint64_t) (stats->opackets + port->tx_dropped));
795 if (stats->rx_nombuf > 0)
796 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
799 /* Display statistics of XON/XOFF pause frames, if any. */
800 if ((stats->tx_pause_xon | stats->rx_pause_xon |
801 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
802 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
803 stats->rx_pause_xoff, stats->rx_pause_xon);
804 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
805 stats->tx_pause_xoff, stats->tx_pause_xon);
808 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
810 pkt_burst_stats_display("RX",
811 &port->rx_stream->rx_burst_stats);
813 pkt_burst_stats_display("TX",
814 &port->tx_stream->tx_burst_stats);
817 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
818 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
822 if (port->rx_queue_stats_mapping_enabled) {
824 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825 printf(" Stats reg %2d RX-packets:%14"PRIu64
826 " RX-errors:%14"PRIu64
827 " RX-bytes:%14"PRIu64"\n",
828 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
832 if (port->tx_queue_stats_mapping_enabled) {
833 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834 printf(" Stats reg %2d TX-packets:%14"PRIu64
835 " TX-bytes:%14"PRIu64"\n",
836 i, stats->q_opackets[i], stats->q_obytes[i]);
840 printf(" %s--------------------------------%s\n",
841 fwd_stats_border, fwd_stats_border);
845 fwd_stream_stats_display(streamid_t stream_id)
847 struct fwd_stream *fs;
848 static const char *fwd_top_stats_border = "-------";
850 fs = fwd_streams[stream_id];
851 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852 (fs->fwd_dropped == 0))
854 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855 "TX Port=%2d/Queue=%2d %s\n",
856 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
861 /* if checksum mode */
862 if (cur_fwd_eng == &csum_fwd_engine) {
863 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
864 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
867 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
874 flush_fwd_rx_queues(void)
876 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
884 for (j = 0; j < 2; j++) {
885 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
886 for (rxq = 0; rxq < nb_rxq; rxq++) {
887 port_id = fwd_ports_ids[rxp];
889 nb_rx = rte_eth_rx_burst(port_id, rxq,
890 pkts_burst, MAX_PKT_BURST);
891 for (i = 0; i < nb_rx; i++)
892 rte_pktmbuf_free(pkts_burst[i]);
896 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
901 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
903 struct fwd_stream **fsm;
907 fsm = &fwd_streams[fc->stream_idx];
908 nb_fs = fc->stream_nb;
910 for (sm_id = 0; sm_id < nb_fs; sm_id++)
911 (*pkt_fwd)(fsm[sm_id]);
912 } while (! fc->stopped);
916 start_pkt_forward_on_core(void *fwd_arg)
918 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
919 cur_fwd_config.fwd_eng->packet_fwd);
924 * Run the TXONLY packet forwarding engine to send a single burst of packets.
925 * Used to start communication flows in network loopback test configurations.
928 run_one_txonly_burst_on_core(void *fwd_arg)
930 struct fwd_lcore *fwd_lc;
931 struct fwd_lcore tmp_lcore;
933 fwd_lc = (struct fwd_lcore *) fwd_arg;
935 tmp_lcore.stopped = 1;
936 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
941 * Launch packet forwarding:
942 * - Setup per-port forwarding context.
943 * - launch logical cores with their forwarding configuration.
946 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
948 port_fwd_begin_t port_fwd_begin;
953 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
954 if (port_fwd_begin != NULL) {
955 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
956 (*port_fwd_begin)(fwd_ports_ids[i]);
958 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
959 lc_id = fwd_lcores_cpuids[i];
960 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
961 fwd_lcores[i]->stopped = 0;
962 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
963 fwd_lcores[i], lc_id);
965 printf("launch lcore %u failed - diag=%d\n",
972 * Launch packet forwarding configuration.
975 start_packet_forwarding(int with_tx_first)
977 port_fwd_begin_t port_fwd_begin;
978 port_fwd_end_t port_fwd_end;
979 struct rte_port *port;
984 if (all_ports_started() == 0) {
985 printf("Not all ports were started\n");
988 if (test_done == 0) {
989 printf("Packet forwarding already started\n");
993 for (i = 0; i < nb_fwd_ports; i++) {
994 pt_id = fwd_ports_ids[i];
995 port = &ports[pt_id];
996 if (!port->dcb_flag) {
997 printf("In DCB mode, all forwarding ports must "
998 "be configured in this mode.\n");
1002 if (nb_fwd_lcores == 1) {
1003 printf("In DCB mode,the nb forwarding cores "
1004 "should be larger than 1.\n");
1011 flush_fwd_rx_queues();
1014 rxtx_config_display();
1016 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1017 pt_id = fwd_ports_ids[i];
1018 port = &ports[pt_id];
1019 rte_eth_stats_get(pt_id, &port->stats);
1020 port->tx_dropped = 0;
1022 map_port_queue_stats_mapping_registers(pt_id, port);
1024 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1025 fwd_streams[sm_id]->rx_packets = 0;
1026 fwd_streams[sm_id]->tx_packets = 0;
1027 fwd_streams[sm_id]->fwd_dropped = 0;
1028 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1029 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1031 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1032 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1033 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1034 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1035 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1037 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1038 fwd_streams[sm_id]->core_cycles = 0;
1041 if (with_tx_first) {
1042 port_fwd_begin = tx_only_engine.port_fwd_begin;
1043 if (port_fwd_begin != NULL) {
1044 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1045 (*port_fwd_begin)(fwd_ports_ids[i]);
1047 launch_packet_forwarding(run_one_txonly_burst_on_core);
1048 rte_eal_mp_wait_lcore();
1049 port_fwd_end = tx_only_engine.port_fwd_end;
1050 if (port_fwd_end != NULL) {
1051 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1052 (*port_fwd_end)(fwd_ports_ids[i]);
1055 launch_packet_forwarding(start_pkt_forward_on_core);
1059 stop_packet_forwarding(void)
1061 struct rte_eth_stats stats;
1062 struct rte_port *port;
1063 port_fwd_end_t port_fwd_end;
1068 uint64_t total_recv;
1069 uint64_t total_xmit;
1070 uint64_t total_rx_dropped;
1071 uint64_t total_tx_dropped;
1072 uint64_t total_rx_nombuf;
1073 uint64_t tx_dropped;
1074 uint64_t rx_bad_ip_csum;
1075 uint64_t rx_bad_l4_csum;
1076 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1077 uint64_t fwd_cycles;
1079 static const char *acc_stats_border = "+++++++++++++++";
1081 if (all_ports_started() == 0) {
1082 printf("Not all ports were started\n");
1086 printf("Packet forwarding not started\n");
1089 printf("Telling cores to stop...");
1090 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1091 fwd_lcores[lc_id]->stopped = 1;
1092 printf("\nWaiting for lcores to finish...\n");
1093 rte_eal_mp_wait_lcore();
1094 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1095 if (port_fwd_end != NULL) {
1096 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1097 pt_id = fwd_ports_ids[i];
1098 (*port_fwd_end)(pt_id);
1101 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1104 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1105 if (cur_fwd_config.nb_fwd_streams >
1106 cur_fwd_config.nb_fwd_ports) {
1107 fwd_stream_stats_display(sm_id);
1108 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1109 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1111 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1113 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1116 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1117 tx_dropped = (uint64_t) (tx_dropped +
1118 fwd_streams[sm_id]->fwd_dropped);
1119 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1122 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1123 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1124 fwd_streams[sm_id]->rx_bad_ip_csum);
1125 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1129 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1130 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1131 fwd_streams[sm_id]->rx_bad_l4_csum);
1132 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1135 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1136 fwd_cycles = (uint64_t) (fwd_cycles +
1137 fwd_streams[sm_id]->core_cycles);
1142 total_rx_dropped = 0;
1143 total_tx_dropped = 0;
1144 total_rx_nombuf = 0;
1145 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1146 pt_id = fwd_ports_ids[i];
1148 port = &ports[pt_id];
1149 rte_eth_stats_get(pt_id, &stats);
1150 stats.ipackets -= port->stats.ipackets;
1151 port->stats.ipackets = 0;
1152 stats.opackets -= port->stats.opackets;
1153 port->stats.opackets = 0;
1154 stats.ibytes -= port->stats.ibytes;
1155 port->stats.ibytes = 0;
1156 stats.obytes -= port->stats.obytes;
1157 port->stats.obytes = 0;
1158 stats.ierrors -= port->stats.ierrors;
1159 port->stats.ierrors = 0;
1160 stats.oerrors -= port->stats.oerrors;
1161 port->stats.oerrors = 0;
1162 stats.rx_nombuf -= port->stats.rx_nombuf;
1163 port->stats.rx_nombuf = 0;
1164 stats.fdirmatch -= port->stats.fdirmatch;
1165 port->stats.rx_nombuf = 0;
1166 stats.fdirmiss -= port->stats.fdirmiss;
1167 port->stats.rx_nombuf = 0;
1169 total_recv += stats.ipackets;
1170 total_xmit += stats.opackets;
1171 total_rx_dropped += stats.ierrors;
1172 total_tx_dropped += port->tx_dropped;
1173 total_rx_nombuf += stats.rx_nombuf;
1175 fwd_port_stats_display(pt_id, &stats);
1177 printf("\n %s Accumulated forward statistics for all ports"
1179 acc_stats_border, acc_stats_border);
1180 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1182 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1184 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1185 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1186 if (total_rx_nombuf > 0)
1187 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1188 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1190 acc_stats_border, acc_stats_border);
1191 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1193 printf("\n CPU cycles/packet=%u (total cycles="
1194 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1195 (unsigned int)(fwd_cycles / total_recv),
1196 fwd_cycles, total_recv);
1198 printf("\nDone.\n");
1203 all_ports_started(void)
1206 struct rte_port *port;
1208 for (pi = 0; pi < nb_ports; pi++) {
1210 /* Check if there is a port which is not started */
1211 if (port->port_status != RTE_PORT_STARTED)
1215 /* No port is not started */
1220 start_port(portid_t pid)
1222 int diag, need_check_link_status = 0;
1225 struct rte_port *port;
1227 if (test_done == 0) {
1228 printf("Please stop forwarding first\n");
1232 if (init_fwd_streams() < 0) {
1233 printf("Fail from init_fwd_streams()\n");
1239 for (pi = 0; pi < nb_ports; pi++) {
1240 if (pid < nb_ports && pid != pi)
1244 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1245 RTE_PORT_HANDLING) == 0) {
1246 printf("Port %d is now not stopped\n", pi);
1250 if (port->need_reconfig > 0) {
1251 port->need_reconfig = 0;
1253 printf("Configuring Port %d (socket %d)\n", pi,
1255 /* configure port */
1256 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1259 if (rte_atomic16_cmpset(&(port->port_status),
1260 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1261 printf("Port %d can not be set back "
1262 "to stopped\n", pi);
1263 printf("Fail to configure port %d\n", pi);
1264 /* try to reconfigure port next time */
1265 port->need_reconfig = 1;
1269 if (port->need_reconfig_queues > 0) {
1270 port->need_reconfig_queues = 0;
1271 /* setup tx queues */
1272 for (qi = 0; qi < nb_txq; qi++) {
1273 if ((numa_support) &&
1274 (txring_numa[pi] != NUMA_NO_CONFIG))
1275 diag = rte_eth_tx_queue_setup(pi, qi,
1276 nb_txd,txring_numa[pi],
1279 diag = rte_eth_tx_queue_setup(pi, qi,
1280 nb_txd,port->socket_id,
1286 /* Fail to setup tx queue, return */
1287 if (rte_atomic16_cmpset(&(port->port_status),
1289 RTE_PORT_STOPPED) == 0)
1290 printf("Port %d can not be set back "
1291 "to stopped\n", pi);
1292 printf("Fail to configure port %d tx queues\n", pi);
1293 /* try to reconfigure queues next time */
1294 port->need_reconfig_queues = 1;
1297 /* setup rx queues */
1298 for (qi = 0; qi < nb_rxq; qi++) {
1299 if ((numa_support) &&
1300 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1301 struct rte_mempool * mp =
1302 mbuf_pool_find(rxring_numa[pi]);
1304 printf("Failed to setup RX queue:"
1305 "No mempool allocation"
1306 "on the socket %d\n",
1311 diag = rte_eth_rx_queue_setup(pi, qi,
1312 nb_rxd,rxring_numa[pi],
1313 &(port->rx_conf),mp);
1316 diag = rte_eth_rx_queue_setup(pi, qi,
1317 nb_rxd,port->socket_id,
1319 mbuf_pool_find(port->socket_id));
1325 /* Fail to setup rx queue, return */
1326 if (rte_atomic16_cmpset(&(port->port_status),
1328 RTE_PORT_STOPPED) == 0)
1329 printf("Port %d can not be set back "
1330 "to stopped\n", pi);
1331 printf("Fail to configure port %d rx queues\n", pi);
1332 /* try to reconfigure queues next time */
1333 port->need_reconfig_queues = 1;
1338 if (rte_eth_dev_start(pi) < 0) {
1339 printf("Fail to start port %d\n", pi);
1341 /* Fail to setup rx queue, return */
1342 if (rte_atomic16_cmpset(&(port->port_status),
1343 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1344 printf("Port %d can not be set back to "
1349 if (rte_atomic16_cmpset(&(port->port_status),
1350 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1351 printf("Port %d can not be set into started\n", pi);
1353 /* at least one port started, need checking link status */
1354 need_check_link_status = 1;
1357 if (need_check_link_status)
1358 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1360 printf("Please stop the ports first\n");
1367 stop_port(portid_t pid)
1370 struct rte_port *port;
1371 int need_check_link_status = 0;
1373 if (test_done == 0) {
1374 printf("Please stop forwarding first\n");
1381 printf("Stopping ports...\n");
1383 for (pi = 0; pi < nb_ports; pi++) {
1384 if (pid < nb_ports && pid != pi)
1388 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1389 RTE_PORT_HANDLING) == 0)
1392 rte_eth_dev_stop(pi);
1394 if (rte_atomic16_cmpset(&(port->port_status),
1395 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1396 printf("Port %d can not be set into stopped\n", pi);
1397 need_check_link_status = 1;
1399 if (need_check_link_status)
1400 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1406 close_port(portid_t pid)
1409 struct rte_port *port;
1411 if (test_done == 0) {
1412 printf("Please stop forwarding first\n");
1416 printf("Closing ports...\n");
1418 for (pi = 0; pi < nb_ports; pi++) {
1419 if (pid < nb_ports && pid != pi)
1423 if (rte_atomic16_cmpset(&(port->port_status),
1424 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1425 printf("Port %d is now not stopped\n", pi);
1429 rte_eth_dev_close(pi);
1431 if (rte_atomic16_cmpset(&(port->port_status),
1432 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1433 printf("Port %d can not be set into stopped\n", pi);
1440 all_ports_stopped(void)
1443 struct rte_port *port;
1445 for (pi = 0; pi < nb_ports; pi++) {
1447 if (port->port_status != RTE_PORT_STOPPED)
1459 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1460 printf("Stopping port %d...", pt_id);
1462 rte_eth_dev_close(pt_id);
1468 typedef void (*cmd_func_t)(void);
1469 struct pmd_test_command {
1470 const char *cmd_name;
1471 cmd_func_t cmd_func;
1474 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1476 /* Check the link status of all ports in up to 9s, and print them finally */
1478 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1480 #define CHECK_INTERVAL 100 /* 100ms */
1481 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1482 uint8_t portid, count, all_ports_up, print_flag = 0;
1483 struct rte_eth_link link;
1485 printf("Checking link statuses...\n");
1487 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1489 for (portid = 0; portid < port_num; portid++) {
1490 if ((port_mask & (1 << portid)) == 0)
1492 memset(&link, 0, sizeof(link));
1493 rte_eth_link_get_nowait(portid, &link);
1494 /* print link status if flag set */
1495 if (print_flag == 1) {
1496 if (link.link_status)
1497 printf("Port %d Link Up - speed %u "
1498 "Mbps - %s\n", (uint8_t)portid,
1499 (unsigned)link.link_speed,
1500 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1501 ("full-duplex") : ("half-duplex\n"));
1503 printf("Port %d Link Down\n",
1507 /* clear all_ports_up flag if any link down */
1508 if (link.link_status == 0) {
1513 /* after finally printing all link status, get out */
1514 if (print_flag == 1)
1517 if (all_ports_up == 0) {
1519 rte_delay_ms(CHECK_INTERVAL);
1522 /* set the print_flag if all ports up or timeout */
1523 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1530 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1534 uint8_t mapping_found = 0;
1536 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1537 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1538 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1539 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1540 tx_queue_stats_mappings[i].queue_id,
1541 tx_queue_stats_mappings[i].stats_counter_id);
1548 port->tx_queue_stats_mapping_enabled = 1;
1553 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1557 uint8_t mapping_found = 0;
1559 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1560 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1561 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1562 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1563 rx_queue_stats_mappings[i].queue_id,
1564 rx_queue_stats_mappings[i].stats_counter_id);
1571 port->rx_queue_stats_mapping_enabled = 1;
1576 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1580 diag = set_tx_queue_stats_mapping_registers(pi, port);
1582 if (diag == -ENOTSUP) {
1583 port->tx_queue_stats_mapping_enabled = 0;
1584 printf("TX queue stats mapping not supported port id=%d\n", pi);
1587 rte_exit(EXIT_FAILURE,
1588 "set_tx_queue_stats_mapping_registers "
1589 "failed for port id=%d diag=%d\n",
1593 diag = set_rx_queue_stats_mapping_registers(pi, port);
1595 if (diag == -ENOTSUP) {
1596 port->rx_queue_stats_mapping_enabled = 0;
1597 printf("RX queue stats mapping not supported port id=%d\n", pi);
1600 rte_exit(EXIT_FAILURE,
1601 "set_rx_queue_stats_mapping_registers "
1602 "failed for port id=%d diag=%d\n",
1608 init_port_config(void)
1611 struct rte_port *port;
1613 for (pid = 0; pid < nb_ports; pid++) {
1615 port->dev_conf.rxmode = rx_mode;
1616 port->dev_conf.fdir_conf = fdir_conf;
1618 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1619 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1621 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1622 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1625 /* In SR-IOV mode, RSS mode is not available */
1626 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1627 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1628 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1630 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1633 port->rx_conf.rx_thresh = rx_thresh;
1634 port->rx_conf.rx_free_thresh = rx_free_thresh;
1635 port->rx_conf.rx_drop_en = rx_drop_en;
1636 port->tx_conf.tx_thresh = tx_thresh;
1637 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1638 port->tx_conf.tx_free_thresh = tx_free_thresh;
1639 port->tx_conf.txq_flags = txq_flags;
1641 rte_eth_macaddr_get(pid, &port->eth_addr);
1643 map_port_queue_stats_mapping_registers(pid, port);
1644 #ifdef RTE_NIC_BYPASS
1645 rte_eth_dev_bypass_init(pid);
1650 const uint16_t vlan_tags[] = {
1651 0, 1, 2, 3, 4, 5, 6, 7,
1652 8, 9, 10, 11, 12, 13, 14, 15,
1653 16, 17, 18, 19, 20, 21, 22, 23,
1654 24, 25, 26, 27, 28, 29, 30, 31
1658 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1663 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1664 * given above, and the number of traffic classes available for use.
1666 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1667 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1668 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1670 /* VMDQ+DCB RX and TX configrations */
1671 vmdq_rx_conf.enable_default_pool = 0;
1672 vmdq_rx_conf.default_pool = 0;
1673 vmdq_rx_conf.nb_queue_pools =
1674 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1675 vmdq_tx_conf.nb_queue_pools =
1676 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1678 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1679 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1680 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1681 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1683 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1684 vmdq_rx_conf.dcb_queue[i] = i;
1685 vmdq_tx_conf.dcb_queue[i] = i;
1688 /*set DCB mode of RX and TX of multiple queues*/
1689 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1690 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1691 if (dcb_conf->pfc_en)
1692 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1694 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1696 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1697 sizeof(struct rte_eth_vmdq_dcb_conf)));
1698 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1699 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1702 struct rte_eth_dcb_rx_conf rx_conf;
1703 struct rte_eth_dcb_tx_conf tx_conf;
1705 /* queue mapping configuration of DCB RX and TX */
1706 if (dcb_conf->num_tcs == ETH_4_TCS)
1707 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1709 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1711 rx_conf.nb_tcs = dcb_conf->num_tcs;
1712 tx_conf.nb_tcs = dcb_conf->num_tcs;
1714 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1715 rx_conf.dcb_queue[i] = i;
1716 tx_conf.dcb_queue[i] = i;
1718 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1719 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1720 if (dcb_conf->pfc_en)
1721 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1723 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1725 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1726 sizeof(struct rte_eth_dcb_rx_conf)));
1727 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1728 sizeof(struct rte_eth_dcb_tx_conf)));
1735 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1737 struct rte_eth_conf port_conf;
1738 struct rte_port *rte_port;
1743 /* rxq and txq configuration in dcb mode */
1746 rx_free_thresh = 64;
1748 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1749 /* Enter DCB configuration status */
1752 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1753 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1754 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1758 rte_port = &ports[pid];
1759 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1761 rte_port->rx_conf.rx_thresh = rx_thresh;
1762 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1763 rte_port->tx_conf.tx_thresh = tx_thresh;
1764 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1765 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1767 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1768 for (i = 0; i < nb_vlan; i++){
1769 rx_vft_set(pid, vlan_tags[i], 1);
1772 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1773 map_port_queue_stats_mapping_registers(pid, rte_port);
1775 rte_port->dcb_flag = 1;
1780 #ifdef RTE_EXEC_ENV_BAREMETAL
1785 main(int argc, char** argv)
1790 diag = rte_eal_init(argc, argv);
1792 rte_panic("Cannot init EAL\n");
1794 if (rte_pmd_init_all())
1795 rte_panic("Cannot init PMD\n");
1797 if (rte_eal_pci_probe())
1798 rte_panic("Cannot probe PCI\n");
1800 nb_ports = (portid_t) rte_eth_dev_count();
1802 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1804 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1805 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1806 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1807 "configuration file\n");
1809 set_def_fwd_config();
1811 rte_panic("Empty set of forwarding logical cores - check the "
1812 "core mask supplied in the command parameters\n");
1817 launch_args_parse(argc, argv);
1819 if (nb_rxq > nb_txq)
1820 printf("Warning: nb_rxq=%d enables RSS configuration, "
1821 "but nb_txq=%d will prevent to fully test it.\n",
1825 if (start_port(RTE_PORT_ALL) != 0)
1826 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1828 /* set all ports to promiscuous mode by default */
1829 for (port_id = 0; port_id < nb_ports; port_id++)
1830 rte_eth_promiscuous_enable(port_id);
1832 #ifdef RTE_LIBRTE_CMDLINE
1833 if (interactive == 1)
1841 printf("No commandline core given, start packet forwarding\n");
1842 start_packet_forwarding(0);
1843 printf("Press enter to exit\n");
1844 rc = read(0, &c, 1);