4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
88 * NUMA support configuration.
89 * When set, the NUMA support attempts to dispatch the allocation of the
90 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91 * probed ports among the CPU sockets 0 and 1.
92 * Otherwise, all memory is allocated from CPU socket 0.
94 uint8_t numa_support = 0; /**< No numa support by default */
97 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
100 uint8_t socket_num = UMA_NO_CONFIG;
103 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108 * Record the Ethernet address of peer target ports to which packets are
110 * Must be instanciated with the ethernet addresses of peer traffic generator
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
117 * Probed Target Environment.
119 struct rte_port *ports; /**< For all probed ethernet ports. */
120 portid_t nb_ports; /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
125 * Test Forwarding Configuration.
126 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t nb_cfg_ports; /**< Number of configured ports. */
132 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
141 * Forwarding engines.
143 struct fwd_engine * fwd_engines[] = {
146 &mac_retry_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151 &ieee1588_fwd_engine,
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
161 * specified on command-line. */
164 * Configuration of packet segments used by the "txonly" processing engine.
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168 TXONLY_DEF_PACKET_LEN,
170 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
185 * Configurable number of RX/TX queues.
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 * Configurable number of RX/TX ring descriptors.
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 * Configurable values of RX and TX ring threshold registers.
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
209 struct rte_eth_thresh rx_thresh = {
210 .pthresh = RX_PTHRESH,
211 .hthresh = RX_HTHRESH,
212 .wthresh = RX_WTHRESH,
215 struct rte_eth_thresh tx_thresh = {
216 .pthresh = TX_PTHRESH,
217 .hthresh = TX_HTHRESH,
218 .wthresh = TX_WTHRESH,
222 * Configurable value of RX free threshold.
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
227 * Configurable value of RX drop enable.
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
232 * Configurable value of TX free threshold.
234 uint16_t tx_free_thresh = 0; /* Use default values. */
237 * Configurable value of TX RS bit threshold.
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
242 * Configurable value of TX queue flags.
244 uint32_t txq_flags = 0; /* No flags set. */
247 * Receive Side Scaling (RSS) configuration.
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
252 * Port topology configuration
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
257 * Avoids to flush all the RX streams before starts forwarding.
259 uint8_t no_flush_rx = 0; /* flush by default */
262 * NIC bypass mode configuration options.
264 #ifdef RTE_NIC_BYPASS
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 * Ethernet device configuration.
274 struct rte_eth_rxmode rx_mode = {
275 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 .header_split = 0, /**< Header Split disabled. */
278 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
281 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
283 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
286 struct rte_fdir_conf fdir_conf = {
287 .mode = RTE_FDIR_MODE_NONE,
288 .pballoc = RTE_FDIR_PBALLOC_64K,
289 .status = RTE_FDIR_REPORT_STATUS,
290 .flexbytes_offset = 0x6,
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
310 * Check if all the ports are started.
311 * If yes, return positive value. If not, return zero.
313 static int all_ports_started(void);
316 * Setup default configuration.
319 set_default_fwd_lcores_config(void)
325 for (i = 0; i < RTE_MAX_LCORE; i++) {
326 if (! rte_lcore_is_enabled(i))
328 if (i == rte_get_master_lcore())
330 fwd_lcores_cpuids[nb_lc++] = i;
332 nb_lcores = (lcoreid_t) nb_lc;
333 nb_cfg_lcores = nb_lcores;
338 set_def_peer_eth_addrs(void)
342 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344 peer_eth_addrs[i].addr_bytes[5] = i;
349 set_default_fwd_ports_config(void)
353 for (pt_id = 0; pt_id < nb_ports; pt_id++)
354 fwd_ports_ids[pt_id] = pt_id;
356 nb_cfg_ports = nb_ports;
357 nb_fwd_ports = nb_ports;
361 set_def_fwd_config(void)
363 set_default_fwd_lcores_config();
364 set_def_peer_eth_addrs();
365 set_default_fwd_ports_config();
369 * Configuration initialisation done once at init time.
371 struct mbuf_ctor_arg {
372 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
376 struct mbuf_pool_ctor_arg {
377 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
384 __attribute__((unused)) unsigned i)
386 struct mbuf_ctor_arg *mb_ctor_arg;
389 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390 mb = (struct rte_mbuf *) raw_mbuf;
392 mb->type = RTE_MBUF_PKT;
394 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396 mb_ctor_arg->seg_buf_offset);
397 mb->buf_len = mb_ctor_arg->seg_buf_size;
398 mb->type = RTE_MBUF_PKT;
400 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
402 mb->pkt.vlan_macip.data = 0;
403 mb->pkt.hash.rss = 0;
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
410 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
411 struct rte_pktmbuf_pool_private *mbp_priv;
413 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414 printf("%s(%s) private_data_size %d < %d\n",
415 __func__, mp->name, (int) mp->private_data_size,
416 (int) sizeof(struct rte_pktmbuf_pool_private));
419 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420 mbp_priv = rte_mempool_get_priv(mp);
421 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426 unsigned int socket_id)
428 char pool_name[RTE_MEMPOOL_NAMESIZE];
429 struct rte_mempool *rte_mp;
430 struct mbuf_pool_ctor_arg mbp_ctor_arg;
431 struct mbuf_ctor_arg mb_ctor_arg;
434 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
436 mb_ctor_arg.seg_buf_offset =
437 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444 (unsigned) mb_mempool_cache,
445 sizeof(struct rte_pktmbuf_pool_private),
446 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447 testpmd_mbuf_ctor, &mb_ctor_arg,
454 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455 (unsigned) mb_mempool_cache,
456 sizeof(struct rte_pktmbuf_pool_private),
457 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458 testpmd_mbuf_ctor, &mb_ctor_arg,
461 rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462 (unsigned) mb_mempool_cache,
463 sizeof(struct rte_pktmbuf_pool_private),
464 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 testpmd_mbuf_ctor, &mb_ctor_arg,
470 if (rte_mp == NULL) {
471 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472 "failed\n", socket_id);
473 } else if (verbose_level > 0) {
474 rte_mempool_dump(rte_mp);
479 * Check given socket id is valid or not with NUMA mode,
480 * if valid, return 0, else return -1
483 check_socket_id(const unsigned int socket_id)
485 static int warning_once = 0;
487 if (socket_id >= MAX_SOCKET) {
488 if (!warning_once && numa_support)
489 printf("Warning: NUMA should be configured manually by"
490 " using --port-numa-config and"
491 " --ring-numa-config parameters along with"
503 struct rte_port *port;
504 struct rte_mempool *mbp;
505 unsigned int nb_mbuf_per_pool;
507 uint8_t port_per_socket[MAX_SOCKET];
509 memset(port_per_socket,0,MAX_SOCKET);
510 /* Configuration of logical cores. */
511 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
512 sizeof(struct fwd_lcore *) * nb_lcores,
514 if (fwd_lcores == NULL) {
515 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
516 "failed\n", nb_lcores);
518 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
519 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
520 sizeof(struct fwd_lcore),
522 if (fwd_lcores[lc_id] == NULL) {
523 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
526 fwd_lcores[lc_id]->cpuid_idx = lc_id;
530 * Create pools of mbuf.
531 * If NUMA support is disabled, create a single pool of mbuf in
532 * socket 0 memory by default.
533 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
535 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
536 * nb_txd can be configured at run time.
538 if (param_total_num_mbufs)
539 nb_mbuf_per_pool = param_total_num_mbufs;
541 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
542 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
545 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
549 if (socket_num == UMA_NO_CONFIG)
550 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
552 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
556 * Records which Mbuf pool to use by each logical core, if needed.
558 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
559 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
561 mbp = mbuf_pool_find(0);
562 fwd_lcores[lc_id]->mbp = mbp;
565 /* Configuration of Ethernet ports. */
566 ports = rte_zmalloc("testpmd: ports",
567 sizeof(struct rte_port) * nb_ports,
570 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
571 "failed\n", nb_ports);
574 for (pid = 0; pid < nb_ports; pid++) {
576 rte_eth_dev_info_get(pid, &port->dev_info);
579 if (port_numa[pid] != NUMA_NO_CONFIG)
580 port_per_socket[port_numa[pid]]++;
582 uint32_t socket_id = rte_eth_dev_socket_id(pid);
584 /* if socket_id is invalid, set to 0 */
585 if (check_socket_id(socket_id) < 0)
587 port_per_socket[socket_id]++;
591 /* set flag to initialize port/queue */
592 port->need_reconfig = 1;
593 port->need_reconfig_queues = 1;
598 unsigned int nb_mbuf;
600 if (param_total_num_mbufs)
601 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
603 for (i = 0; i < MAX_SOCKET; i++) {
604 nb_mbuf = (nb_mbuf_per_pool *
607 mbuf_pool_create(mbuf_data_size,
612 /* Configuration of packet forwarding streams. */
613 if (init_fwd_streams() < 0)
614 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
618 init_fwd_streams(void)
621 struct rte_port *port;
622 streamid_t sm_id, nb_fwd_streams_new;
624 /* set socket id according to numa or not */
625 for (pid = 0; pid < nb_ports; pid++) {
627 if (nb_rxq > port->dev_info.max_rx_queues) {
628 printf("Fail: nb_rxq(%d) is greater than "
629 "max_rx_queues(%d)\n", nb_rxq,
630 port->dev_info.max_rx_queues);
633 if (nb_txq > port->dev_info.max_tx_queues) {
634 printf("Fail: nb_txq(%d) is greater than "
635 "max_tx_queues(%d)\n", nb_txq,
636 port->dev_info.max_tx_queues);
640 if (port_numa[pid] != NUMA_NO_CONFIG)
641 port->socket_id = port_numa[pid];
643 port->socket_id = rte_eth_dev_socket_id(pid);
645 /* if socket_id is invalid, set to 0 */
646 if (check_socket_id(port->socket_id) < 0)
651 if (socket_num == UMA_NO_CONFIG)
654 port->socket_id = socket_num;
658 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
659 if (nb_fwd_streams_new == nb_fwd_streams)
662 if (fwd_streams != NULL) {
663 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
664 if (fwd_streams[sm_id] == NULL)
666 rte_free(fwd_streams[sm_id]);
667 fwd_streams[sm_id] = NULL;
669 rte_free(fwd_streams);
674 nb_fwd_streams = nb_fwd_streams_new;
675 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
676 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
677 if (fwd_streams == NULL)
678 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
679 "failed\n", nb_fwd_streams);
681 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
682 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
683 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
684 if (fwd_streams[sm_id] == NULL)
685 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
692 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
694 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
696 unsigned int total_burst;
697 unsigned int nb_burst;
698 unsigned int burst_stats[3];
699 uint16_t pktnb_stats[3];
701 int burst_percent[3];
704 * First compute the total number of packet bursts and the
705 * two highest numbers of bursts of the same number of packets.
708 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
709 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
710 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
711 nb_burst = pbs->pkt_burst_spread[nb_pkt];
714 total_burst += nb_burst;
715 if (nb_burst > burst_stats[0]) {
716 burst_stats[1] = burst_stats[0];
717 pktnb_stats[1] = pktnb_stats[0];
718 burst_stats[0] = nb_burst;
719 pktnb_stats[0] = nb_pkt;
722 if (total_burst == 0)
724 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
725 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
726 burst_percent[0], (int) pktnb_stats[0]);
727 if (burst_stats[0] == total_burst) {
731 if (burst_stats[0] + burst_stats[1] == total_burst) {
732 printf(" + %d%% of %d pkts]\n",
733 100 - burst_percent[0], pktnb_stats[1]);
736 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
737 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
738 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
739 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
742 printf(" + %d%% of %d pkts + %d%% of others]\n",
743 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
745 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
748 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
750 struct rte_port *port;
753 static const char *fwd_stats_border = "----------------------";
755 port = &ports[port_id];
756 printf("\n %s Forward statistics for port %-2d %s\n",
757 fwd_stats_border, port_id, fwd_stats_border);
759 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
760 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
762 stats->ipackets, stats->ierrors,
763 (uint64_t) (stats->ipackets + stats->ierrors));
765 if (cur_fwd_eng == &csum_fwd_engine)
766 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
767 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
769 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
771 stats->opackets, port->tx_dropped,
772 (uint64_t) (stats->opackets + port->tx_dropped));
774 if (stats->rx_nombuf > 0)
775 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
779 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
781 stats->ipackets, stats->ierrors,
782 (uint64_t) (stats->ipackets + stats->ierrors));
784 if (cur_fwd_eng == &csum_fwd_engine)
785 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
786 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
788 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
790 stats->opackets, port->tx_dropped,
791 (uint64_t) (stats->opackets + port->tx_dropped));
793 if (stats->rx_nombuf > 0)
794 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
797 /* Display statistics of XON/XOFF pause frames, if any. */
798 if ((stats->tx_pause_xon | stats->rx_pause_xon |
799 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
800 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
801 stats->rx_pause_xoff, stats->rx_pause_xon);
802 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
803 stats->tx_pause_xoff, stats->tx_pause_xon);
806 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
808 pkt_burst_stats_display("RX",
809 &port->rx_stream->rx_burst_stats);
811 pkt_burst_stats_display("TX",
812 &port->tx_stream->tx_burst_stats);
815 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
816 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
820 if (port->rx_queue_stats_mapping_enabled) {
822 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
823 printf(" Stats reg %2d RX-packets:%14"PRIu64
824 " RX-errors:%14"PRIu64
825 " RX-bytes:%14"PRIu64"\n",
826 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
830 if (port->tx_queue_stats_mapping_enabled) {
831 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
832 printf(" Stats reg %2d TX-packets:%14"PRIu64
833 " TX-bytes:%14"PRIu64"\n",
834 i, stats->q_opackets[i], stats->q_obytes[i]);
838 printf(" %s--------------------------------%s\n",
839 fwd_stats_border, fwd_stats_border);
843 fwd_stream_stats_display(streamid_t stream_id)
845 struct fwd_stream *fs;
846 static const char *fwd_top_stats_border = "-------";
848 fs = fwd_streams[stream_id];
849 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
850 (fs->fwd_dropped == 0))
852 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
853 "TX Port=%2d/Queue=%2d %s\n",
854 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
855 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
856 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
857 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
859 /* if checksum mode */
860 if (cur_fwd_eng == &csum_fwd_engine) {
861 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
862 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
865 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
866 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
867 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
872 flush_fwd_rx_queues(void)
874 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
882 for (j = 0; j < 2; j++) {
883 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
884 for (rxq = 0; rxq < nb_rxq; rxq++) {
885 port_id = fwd_ports_ids[rxp];
887 nb_rx = rte_eth_rx_burst(port_id, rxq,
888 pkts_burst, MAX_PKT_BURST);
889 for (i = 0; i < nb_rx; i++)
890 rte_pktmbuf_free(pkts_burst[i]);
894 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
899 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
901 struct fwd_stream **fsm;
905 fsm = &fwd_streams[fc->stream_idx];
906 nb_fs = fc->stream_nb;
908 for (sm_id = 0; sm_id < nb_fs; sm_id++)
909 (*pkt_fwd)(fsm[sm_id]);
910 } while (! fc->stopped);
914 start_pkt_forward_on_core(void *fwd_arg)
916 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
917 cur_fwd_config.fwd_eng->packet_fwd);
922 * Run the TXONLY packet forwarding engine to send a single burst of packets.
923 * Used to start communication flows in network loopback test configurations.
926 run_one_txonly_burst_on_core(void *fwd_arg)
928 struct fwd_lcore *fwd_lc;
929 struct fwd_lcore tmp_lcore;
931 fwd_lc = (struct fwd_lcore *) fwd_arg;
933 tmp_lcore.stopped = 1;
934 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
939 * Launch packet forwarding:
940 * - Setup per-port forwarding context.
941 * - launch logical cores with their forwarding configuration.
944 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
946 port_fwd_begin_t port_fwd_begin;
951 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
952 if (port_fwd_begin != NULL) {
953 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
954 (*port_fwd_begin)(fwd_ports_ids[i]);
956 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
957 lc_id = fwd_lcores_cpuids[i];
958 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
959 fwd_lcores[i]->stopped = 0;
960 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
961 fwd_lcores[i], lc_id);
963 printf("launch lcore %u failed - diag=%d\n",
970 * Launch packet forwarding configuration.
973 start_packet_forwarding(int with_tx_first)
975 port_fwd_begin_t port_fwd_begin;
976 port_fwd_end_t port_fwd_end;
977 struct rte_port *port;
982 if (all_ports_started() == 0) {
983 printf("Not all ports were started\n");
986 if (test_done == 0) {
987 printf("Packet forwarding already started\n");
991 for (i = 0; i < nb_fwd_ports; i++) {
992 pt_id = fwd_ports_ids[i];
993 port = &ports[pt_id];
994 if (!port->dcb_flag) {
995 printf("In DCB mode, all forwarding ports must "
996 "be configured in this mode.\n");
1000 if (nb_fwd_lcores == 1) {
1001 printf("In DCB mode,the nb forwarding cores "
1002 "should be larger than 1.\n");
1009 flush_fwd_rx_queues();
1012 rxtx_config_display();
1014 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1015 pt_id = fwd_ports_ids[i];
1016 port = &ports[pt_id];
1017 rte_eth_stats_get(pt_id, &port->stats);
1018 port->tx_dropped = 0;
1020 map_port_queue_stats_mapping_registers(pt_id, port);
1022 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1023 fwd_streams[sm_id]->rx_packets = 0;
1024 fwd_streams[sm_id]->tx_packets = 0;
1025 fwd_streams[sm_id]->fwd_dropped = 0;
1026 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1027 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1029 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1030 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1031 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1032 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1033 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1035 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1036 fwd_streams[sm_id]->core_cycles = 0;
1039 if (with_tx_first) {
1040 port_fwd_begin = tx_only_engine.port_fwd_begin;
1041 if (port_fwd_begin != NULL) {
1042 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1043 (*port_fwd_begin)(fwd_ports_ids[i]);
1045 launch_packet_forwarding(run_one_txonly_burst_on_core);
1046 rte_eal_mp_wait_lcore();
1047 port_fwd_end = tx_only_engine.port_fwd_end;
1048 if (port_fwd_end != NULL) {
1049 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1050 (*port_fwd_end)(fwd_ports_ids[i]);
1053 launch_packet_forwarding(start_pkt_forward_on_core);
1057 stop_packet_forwarding(void)
1059 struct rte_eth_stats stats;
1060 struct rte_port *port;
1061 port_fwd_end_t port_fwd_end;
1066 uint64_t total_recv;
1067 uint64_t total_xmit;
1068 uint64_t total_rx_dropped;
1069 uint64_t total_tx_dropped;
1070 uint64_t total_rx_nombuf;
1071 uint64_t tx_dropped;
1072 uint64_t rx_bad_ip_csum;
1073 uint64_t rx_bad_l4_csum;
1074 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1075 uint64_t fwd_cycles;
1077 static const char *acc_stats_border = "+++++++++++++++";
1079 if (all_ports_started() == 0) {
1080 printf("Not all ports were started\n");
1084 printf("Packet forwarding not started\n");
1087 printf("Telling cores to stop...");
1088 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1089 fwd_lcores[lc_id]->stopped = 1;
1090 printf("\nWaiting for lcores to finish...\n");
1091 rte_eal_mp_wait_lcore();
1092 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1093 if (port_fwd_end != NULL) {
1094 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1095 pt_id = fwd_ports_ids[i];
1096 (*port_fwd_end)(pt_id);
1099 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1102 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1103 if (cur_fwd_config.nb_fwd_streams >
1104 cur_fwd_config.nb_fwd_ports) {
1105 fwd_stream_stats_display(sm_id);
1106 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1107 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1109 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1111 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1114 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1115 tx_dropped = (uint64_t) (tx_dropped +
1116 fwd_streams[sm_id]->fwd_dropped);
1117 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1120 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1121 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1122 fwd_streams[sm_id]->rx_bad_ip_csum);
1123 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1127 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1128 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1129 fwd_streams[sm_id]->rx_bad_l4_csum);
1130 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1133 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1134 fwd_cycles = (uint64_t) (fwd_cycles +
1135 fwd_streams[sm_id]->core_cycles);
1140 total_rx_dropped = 0;
1141 total_tx_dropped = 0;
1142 total_rx_nombuf = 0;
1143 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1144 pt_id = fwd_ports_ids[i];
1146 port = &ports[pt_id];
1147 rte_eth_stats_get(pt_id, &stats);
1148 stats.ipackets -= port->stats.ipackets;
1149 port->stats.ipackets = 0;
1150 stats.opackets -= port->stats.opackets;
1151 port->stats.opackets = 0;
1152 stats.ibytes -= port->stats.ibytes;
1153 port->stats.ibytes = 0;
1154 stats.obytes -= port->stats.obytes;
1155 port->stats.obytes = 0;
1156 stats.ierrors -= port->stats.ierrors;
1157 port->stats.ierrors = 0;
1158 stats.oerrors -= port->stats.oerrors;
1159 port->stats.oerrors = 0;
1160 stats.rx_nombuf -= port->stats.rx_nombuf;
1161 port->stats.rx_nombuf = 0;
1162 stats.fdirmatch -= port->stats.fdirmatch;
1163 port->stats.rx_nombuf = 0;
1164 stats.fdirmiss -= port->stats.fdirmiss;
1165 port->stats.rx_nombuf = 0;
1167 total_recv += stats.ipackets;
1168 total_xmit += stats.opackets;
1169 total_rx_dropped += stats.ierrors;
1170 total_tx_dropped += port->tx_dropped;
1171 total_rx_nombuf += stats.rx_nombuf;
1173 fwd_port_stats_display(pt_id, &stats);
1175 printf("\n %s Accumulated forward statistics for all ports"
1177 acc_stats_border, acc_stats_border);
1178 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1180 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1182 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1183 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1184 if (total_rx_nombuf > 0)
1185 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1186 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1188 acc_stats_border, acc_stats_border);
1189 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1191 printf("\n CPU cycles/packet=%u (total cycles="
1192 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1193 (unsigned int)(fwd_cycles / total_recv),
1194 fwd_cycles, total_recv);
1196 printf("\nDone.\n");
1201 all_ports_started(void)
1204 struct rte_port *port;
1206 for (pi = 0; pi < nb_ports; pi++) {
1208 /* Check if there is a port which is not started */
1209 if (port->port_status != RTE_PORT_STARTED)
1213 /* No port is not started */
1218 start_port(portid_t pid)
1220 int diag, need_check_link_status = 0;
1223 struct rte_port *port;
1225 if (test_done == 0) {
1226 printf("Please stop forwarding first\n");
1230 if (init_fwd_streams() < 0) {
1231 printf("Fail from init_fwd_streams()\n");
1237 for (pi = 0; pi < nb_ports; pi++) {
1238 if (pid < nb_ports && pid != pi)
1242 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1243 RTE_PORT_HANDLING) == 0) {
1244 printf("Port %d is now not stopped\n", pi);
1248 if (port->need_reconfig > 0) {
1249 port->need_reconfig = 0;
1251 printf("Configuring Port %d (socket %d)\n", pi,
1253 /* configure port */
1254 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1257 if (rte_atomic16_cmpset(&(port->port_status),
1258 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1259 printf("Port %d can not be set back "
1260 "to stopped\n", pi);
1261 printf("Fail to configure port %d\n", pi);
1262 /* try to reconfigure port next time */
1263 port->need_reconfig = 1;
1267 if (port->need_reconfig_queues > 0) {
1268 port->need_reconfig_queues = 0;
1269 /* setup tx queues */
1270 for (qi = 0; qi < nb_txq; qi++) {
1271 if ((numa_support) &&
1272 (txring_numa[pi] != NUMA_NO_CONFIG))
1273 diag = rte_eth_tx_queue_setup(pi, qi,
1274 nb_txd,txring_numa[pi],
1277 diag = rte_eth_tx_queue_setup(pi, qi,
1278 nb_txd,port->socket_id,
1284 /* Fail to setup tx queue, return */
1285 if (rte_atomic16_cmpset(&(port->port_status),
1287 RTE_PORT_STOPPED) == 0)
1288 printf("Port %d can not be set back "
1289 "to stopped\n", pi);
1290 printf("Fail to configure port %d tx queues\n", pi);
1291 /* try to reconfigure queues next time */
1292 port->need_reconfig_queues = 1;
1295 /* setup rx queues */
1296 for (qi = 0; qi < nb_rxq; qi++) {
1297 if ((numa_support) &&
1298 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1299 struct rte_mempool * mp =
1300 mbuf_pool_find(rxring_numa[pi]);
1302 printf("Failed to setup RX queue:"
1303 "No mempool allocation"
1304 "on the socket %d\n",
1309 diag = rte_eth_rx_queue_setup(pi, qi,
1310 nb_rxd,rxring_numa[pi],
1311 &(port->rx_conf),mp);
1314 diag = rte_eth_rx_queue_setup(pi, qi,
1315 nb_rxd,port->socket_id,
1317 mbuf_pool_find(port->socket_id));
1323 /* Fail to setup rx queue, return */
1324 if (rte_atomic16_cmpset(&(port->port_status),
1326 RTE_PORT_STOPPED) == 0)
1327 printf("Port %d can not be set back "
1328 "to stopped\n", pi);
1329 printf("Fail to configure port %d rx queues\n", pi);
1330 /* try to reconfigure queues next time */
1331 port->need_reconfig_queues = 1;
1336 if (rte_eth_dev_start(pi) < 0) {
1337 printf("Fail to start port %d\n", pi);
1339 /* Fail to setup rx queue, return */
1340 if (rte_atomic16_cmpset(&(port->port_status),
1341 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1342 printf("Port %d can not be set back to "
1347 if (rte_atomic16_cmpset(&(port->port_status),
1348 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1349 printf("Port %d can not be set into started\n", pi);
1351 /* at least one port started, need checking link status */
1352 need_check_link_status = 1;
1355 if (need_check_link_status)
1356 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1358 printf("Please stop the ports first\n");
1365 stop_port(portid_t pid)
1368 struct rte_port *port;
1369 int need_check_link_status = 0;
1371 if (test_done == 0) {
1372 printf("Please stop forwarding first\n");
1379 printf("Stopping ports...\n");
1381 for (pi = 0; pi < nb_ports; pi++) {
1382 if (pid < nb_ports && pid != pi)
1386 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1387 RTE_PORT_HANDLING) == 0)
1390 rte_eth_dev_stop(pi);
1392 if (rte_atomic16_cmpset(&(port->port_status),
1393 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1394 printf("Port %d can not be set into stopped\n", pi);
1395 need_check_link_status = 1;
1397 if (need_check_link_status)
1398 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1404 close_port(portid_t pid)
1407 struct rte_port *port;
1409 if (test_done == 0) {
1410 printf("Please stop forwarding first\n");
1414 printf("Closing ports...\n");
1416 for (pi = 0; pi < nb_ports; pi++) {
1417 if (pid < nb_ports && pid != pi)
1421 if (rte_atomic16_cmpset(&(port->port_status),
1422 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1423 printf("Port %d is now not stopped\n", pi);
1427 rte_eth_dev_close(pi);
1429 if (rte_atomic16_cmpset(&(port->port_status),
1430 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1431 printf("Port %d can not be set into stopped\n", pi);
1438 all_ports_stopped(void)
1441 struct rte_port *port;
1443 for (pi = 0; pi < nb_ports; pi++) {
1445 if (port->port_status != RTE_PORT_STOPPED)
1457 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1458 printf("Stopping port %d...", pt_id);
1460 rte_eth_dev_close(pt_id);
1466 typedef void (*cmd_func_t)(void);
1467 struct pmd_test_command {
1468 const char *cmd_name;
1469 cmd_func_t cmd_func;
1472 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1474 /* Check the link status of all ports in up to 9s, and print them finally */
1476 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1478 #define CHECK_INTERVAL 100 /* 100ms */
1479 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1480 uint8_t portid, count, all_ports_up, print_flag = 0;
1481 struct rte_eth_link link;
1483 printf("Checking link statuses...\n");
1485 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1487 for (portid = 0; portid < port_num; portid++) {
1488 if ((port_mask & (1 << portid)) == 0)
1490 memset(&link, 0, sizeof(link));
1491 rte_eth_link_get_nowait(portid, &link);
1492 /* print link status if flag set */
1493 if (print_flag == 1) {
1494 if (link.link_status)
1495 printf("Port %d Link Up - speed %u "
1496 "Mbps - %s\n", (uint8_t)portid,
1497 (unsigned)link.link_speed,
1498 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1499 ("full-duplex") : ("half-duplex\n"));
1501 printf("Port %d Link Down\n",
1505 /* clear all_ports_up flag if any link down */
1506 if (link.link_status == 0) {
1511 /* after finally printing all link status, get out */
1512 if (print_flag == 1)
1515 if (all_ports_up == 0) {
1517 rte_delay_ms(CHECK_INTERVAL);
1520 /* set the print_flag if all ports up or timeout */
1521 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1528 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1532 uint8_t mapping_found = 0;
1534 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1535 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1536 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1537 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1538 tx_queue_stats_mappings[i].queue_id,
1539 tx_queue_stats_mappings[i].stats_counter_id);
1546 port->tx_queue_stats_mapping_enabled = 1;
1551 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1555 uint8_t mapping_found = 0;
1557 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1558 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1559 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1560 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1561 rx_queue_stats_mappings[i].queue_id,
1562 rx_queue_stats_mappings[i].stats_counter_id);
1569 port->rx_queue_stats_mapping_enabled = 1;
1574 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1578 diag = set_tx_queue_stats_mapping_registers(pi, port);
1580 if (diag == -ENOTSUP) {
1581 port->tx_queue_stats_mapping_enabled = 0;
1582 printf("TX queue stats mapping not supported port id=%d\n", pi);
1585 rte_exit(EXIT_FAILURE,
1586 "set_tx_queue_stats_mapping_registers "
1587 "failed for port id=%d diag=%d\n",
1591 diag = set_rx_queue_stats_mapping_registers(pi, port);
1593 if (diag == -ENOTSUP) {
1594 port->rx_queue_stats_mapping_enabled = 0;
1595 printf("RX queue stats mapping not supported port id=%d\n", pi);
1598 rte_exit(EXIT_FAILURE,
1599 "set_rx_queue_stats_mapping_registers "
1600 "failed for port id=%d diag=%d\n",
1606 init_port_config(void)
1609 struct rte_port *port;
1611 for (pid = 0; pid < nb_ports; pid++) {
1613 port->dev_conf.rxmode = rx_mode;
1614 port->dev_conf.fdir_conf = fdir_conf;
1616 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1617 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1619 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1620 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1623 /* In SR-IOV mode, RSS mode is not available */
1624 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1625 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1626 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1628 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1631 port->rx_conf.rx_thresh = rx_thresh;
1632 port->rx_conf.rx_free_thresh = rx_free_thresh;
1633 port->rx_conf.rx_drop_en = rx_drop_en;
1634 port->tx_conf.tx_thresh = tx_thresh;
1635 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1636 port->tx_conf.tx_free_thresh = tx_free_thresh;
1637 port->tx_conf.txq_flags = txq_flags;
1639 rte_eth_macaddr_get(pid, &port->eth_addr);
1641 map_port_queue_stats_mapping_registers(pid, port);
1642 #ifdef RTE_NIC_BYPASS
1643 rte_eth_dev_bypass_init(pid);
1648 const uint16_t vlan_tags[] = {
1649 0, 1, 2, 3, 4, 5, 6, 7,
1650 8, 9, 10, 11, 12, 13, 14, 15,
1651 16, 17, 18, 19, 20, 21, 22, 23,
1652 24, 25, 26, 27, 28, 29, 30, 31
1656 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1661 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1662 * given above, and the number of traffic classes available for use.
1664 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1665 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1666 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1668 /* VMDQ+DCB RX and TX configrations */
1669 vmdq_rx_conf.enable_default_pool = 0;
1670 vmdq_rx_conf.default_pool = 0;
1671 vmdq_rx_conf.nb_queue_pools =
1672 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1673 vmdq_tx_conf.nb_queue_pools =
1674 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1676 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1677 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1678 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1679 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1681 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1682 vmdq_rx_conf.dcb_queue[i] = i;
1683 vmdq_tx_conf.dcb_queue[i] = i;
1686 /*set DCB mode of RX and TX of multiple queues*/
1687 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1688 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1689 if (dcb_conf->pfc_en)
1690 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1692 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1694 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1695 sizeof(struct rte_eth_vmdq_dcb_conf)));
1696 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1697 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1700 struct rte_eth_dcb_rx_conf rx_conf;
1701 struct rte_eth_dcb_tx_conf tx_conf;
1703 /* queue mapping configuration of DCB RX and TX */
1704 if (dcb_conf->num_tcs == ETH_4_TCS)
1705 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1707 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1709 rx_conf.nb_tcs = dcb_conf->num_tcs;
1710 tx_conf.nb_tcs = dcb_conf->num_tcs;
1712 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1713 rx_conf.dcb_queue[i] = i;
1714 tx_conf.dcb_queue[i] = i;
1716 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1717 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1718 if (dcb_conf->pfc_en)
1719 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1721 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1723 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1724 sizeof(struct rte_eth_dcb_rx_conf)));
1725 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1726 sizeof(struct rte_eth_dcb_tx_conf)));
1733 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1735 struct rte_eth_conf port_conf;
1736 struct rte_port *rte_port;
1741 /* rxq and txq configuration in dcb mode */
1744 rx_free_thresh = 64;
1746 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1747 /* Enter DCB configuration status */
1750 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1751 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1752 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1756 rte_port = &ports[pid];
1757 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1759 rte_port->rx_conf.rx_thresh = rx_thresh;
1760 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1761 rte_port->tx_conf.tx_thresh = tx_thresh;
1762 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1763 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1765 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1766 for (i = 0; i < nb_vlan; i++){
1767 rx_vft_set(pid, vlan_tags[i], 1);
1770 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1771 map_port_queue_stats_mapping_registers(pid, rte_port);
1773 rte_port->dcb_flag = 1;
1778 #ifdef RTE_EXEC_ENV_BAREMETAL
1783 main(int argc, char** argv)
1788 diag = rte_eal_init(argc, argv);
1790 rte_panic("Cannot init EAL\n");
1792 if (rte_pmd_init_all())
1793 rte_panic("Cannot init PMD\n");
1795 if (rte_eal_pci_probe())
1796 rte_panic("Cannot probe PCI\n");
1798 nb_ports = (portid_t) rte_eth_dev_count();
1800 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1802 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1803 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1804 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1805 "configuration file\n");
1807 set_def_fwd_config();
1809 rte_panic("Empty set of forwarding logical cores - check the "
1810 "core mask supplied in the command parameters\n");
1815 launch_args_parse(argc, argv);
1817 if (nb_rxq > nb_txq)
1818 printf("Warning: nb_rxq=%d enables RSS configuration, "
1819 "but nb_txq=%d will prevent to fully test it.\n",
1823 if (start_port(RTE_PORT_ALL) != 0)
1824 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1826 /* set all ports to promiscuous mode by default */
1827 for (port_id = 0; port_id < nb_ports; port_id++)
1828 rte_eth_promiscuous_enable(port_id);
1830 #ifdef RTE_LIBRTE_CMDLINE
1831 if (interactive == 1)
1839 printf("No commandline core given, start packet forwarding\n");
1840 start_packet_forwarding(0);
1841 printf("Press enter to exit\n");
1842 rc = read(0, &c, 1);