4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
177 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 /* current configuration is in DCB or not,0 means it is not in DCB mode */
180 uint8_t dcb_config = 0;
182 /* Whether the dcb is in testing status */
183 uint8_t dcb_test = 0;
185 /* DCB on and VT on mapping is default */
186 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
290 .vlan_tci_mask = 0x0,
292 .src_ip = 0xFFFFFFFF,
293 .dst_ip = 0xFFFFFFFF,
296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 .src_port_mask = 0xFFFF,
300 .dst_port_mask = 0xFFFF,
305 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
307 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
308 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
310 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
311 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
313 uint16_t nb_tx_queue_stats_mappings = 0;
314 uint16_t nb_rx_queue_stats_mappings = 0;
316 /* Forward function declarations */
317 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
318 static void check_all_ports_link_status(uint32_t port_mask);
321 * Check if all the ports are started.
322 * If yes, return positive value. If not, return zero.
324 static int all_ports_started(void);
327 * Find next enabled port
330 find_next_port(portid_t p, struct rte_port *ports, int size)
333 rte_exit(-EINVAL, "failed to find a next port id\n");
335 while ((p < size) && (ports[p].enabled == 0))
341 * Setup default configuration.
344 set_default_fwd_lcores_config(void)
350 for (i = 0; i < RTE_MAX_LCORE; i++) {
351 if (! rte_lcore_is_enabled(i))
353 if (i == rte_get_master_lcore())
355 fwd_lcores_cpuids[nb_lc++] = i;
357 nb_lcores = (lcoreid_t) nb_lc;
358 nb_cfg_lcores = nb_lcores;
363 set_def_peer_eth_addrs(void)
367 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
368 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
369 peer_eth_addrs[i].addr_bytes[5] = i;
374 set_default_fwd_ports_config(void)
378 for (pt_id = 0; pt_id < nb_ports; pt_id++)
379 fwd_ports_ids[pt_id] = pt_id;
381 nb_cfg_ports = nb_ports;
382 nb_fwd_ports = nb_ports;
386 set_def_fwd_config(void)
388 set_default_fwd_lcores_config();
389 set_def_peer_eth_addrs();
390 set_default_fwd_ports_config();
394 * Configuration initialisation done once at init time.
397 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
398 unsigned int socket_id)
400 char pool_name[RTE_MEMPOOL_NAMESIZE];
401 struct rte_mempool *rte_mp;
404 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
405 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
407 #ifdef RTE_LIBRTE_PMD_XENVIRT
408 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
409 (unsigned) mb_mempool_cache,
410 sizeof(struct rte_pktmbuf_pool_private),
411 rte_pktmbuf_pool_init, NULL,
412 rte_pktmbuf_init, NULL,
419 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
420 (unsigned) mb_mempool_cache,
421 sizeof(struct rte_pktmbuf_pool_private),
422 rte_pktmbuf_pool_init, NULL,
423 rte_pktmbuf_init, NULL,
426 /* wrapper to rte_mempool_create() */
427 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
428 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
432 if (rte_mp == NULL) {
433 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
434 "failed\n", socket_id);
435 } else if (verbose_level > 0) {
436 rte_mempool_dump(stdout, rte_mp);
441 * Check given socket id is valid or not with NUMA mode,
442 * if valid, return 0, else return -1
445 check_socket_id(const unsigned int socket_id)
447 static int warning_once = 0;
449 if (socket_id >= MAX_SOCKET) {
450 if (!warning_once && numa_support)
451 printf("Warning: NUMA should be configured manually by"
452 " using --port-numa-config and"
453 " --ring-numa-config parameters along with"
465 struct rte_port *port;
466 struct rte_mempool *mbp;
467 unsigned int nb_mbuf_per_pool;
469 uint8_t port_per_socket[MAX_SOCKET];
471 memset(port_per_socket,0,MAX_SOCKET);
472 /* Configuration of logical cores. */
473 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
474 sizeof(struct fwd_lcore *) * nb_lcores,
475 RTE_CACHE_LINE_SIZE);
476 if (fwd_lcores == NULL) {
477 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
478 "failed\n", nb_lcores);
480 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
481 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
482 sizeof(struct fwd_lcore),
483 RTE_CACHE_LINE_SIZE);
484 if (fwd_lcores[lc_id] == NULL) {
485 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
488 fwd_lcores[lc_id]->cpuid_idx = lc_id;
492 * Create pools of mbuf.
493 * If NUMA support is disabled, create a single pool of mbuf in
494 * socket 0 memory by default.
495 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
497 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
498 * nb_txd can be configured at run time.
500 if (param_total_num_mbufs)
501 nb_mbuf_per_pool = param_total_num_mbufs;
503 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
504 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
508 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
512 if (socket_num == UMA_NO_CONFIG)
513 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
515 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
519 FOREACH_PORT(pid, ports) {
521 rte_eth_dev_info_get(pid, &port->dev_info);
524 if (port_numa[pid] != NUMA_NO_CONFIG)
525 port_per_socket[port_numa[pid]]++;
527 uint32_t socket_id = rte_eth_dev_socket_id(pid);
529 /* if socket_id is invalid, set to 0 */
530 if (check_socket_id(socket_id) < 0)
532 port_per_socket[socket_id]++;
536 /* set flag to initialize port/queue */
537 port->need_reconfig = 1;
538 port->need_reconfig_queues = 1;
543 unsigned int nb_mbuf;
545 if (param_total_num_mbufs)
546 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
548 for (i = 0; i < MAX_SOCKET; i++) {
549 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
551 mbuf_pool_create(mbuf_data_size,
558 * Records which Mbuf pool to use by each logical core, if needed.
560 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
561 mbp = mbuf_pool_find(
562 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
565 mbp = mbuf_pool_find(0);
566 fwd_lcores[lc_id]->mbp = mbp;
569 /* Configuration of packet forwarding streams. */
570 if (init_fwd_streams() < 0)
571 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
576 reconfig(portid_t new_port_id, unsigned socket_id)
578 struct rte_port *port;
580 /* Reconfiguration of Ethernet ports. */
581 port = &ports[new_port_id];
582 rte_eth_dev_info_get(new_port_id, &port->dev_info);
584 /* set flag to initialize port/queue */
585 port->need_reconfig = 1;
586 port->need_reconfig_queues = 1;
587 port->socket_id = socket_id;
594 init_fwd_streams(void)
597 struct rte_port *port;
598 streamid_t sm_id, nb_fwd_streams_new;
600 /* set socket id according to numa or not */
601 FOREACH_PORT(pid, ports) {
603 if (nb_rxq > port->dev_info.max_rx_queues) {
604 printf("Fail: nb_rxq(%d) is greater than "
605 "max_rx_queues(%d)\n", nb_rxq,
606 port->dev_info.max_rx_queues);
609 if (nb_txq > port->dev_info.max_tx_queues) {
610 printf("Fail: nb_txq(%d) is greater than "
611 "max_tx_queues(%d)\n", nb_txq,
612 port->dev_info.max_tx_queues);
616 if (port_numa[pid] != NUMA_NO_CONFIG)
617 port->socket_id = port_numa[pid];
619 port->socket_id = rte_eth_dev_socket_id(pid);
621 /* if socket_id is invalid, set to 0 */
622 if (check_socket_id(port->socket_id) < 0)
627 if (socket_num == UMA_NO_CONFIG)
630 port->socket_id = socket_num;
634 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
635 if (nb_fwd_streams_new == nb_fwd_streams)
638 if (fwd_streams != NULL) {
639 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
640 if (fwd_streams[sm_id] == NULL)
642 rte_free(fwd_streams[sm_id]);
643 fwd_streams[sm_id] = NULL;
645 rte_free(fwd_streams);
650 nb_fwd_streams = nb_fwd_streams_new;
651 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
652 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
653 if (fwd_streams == NULL)
654 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
655 "failed\n", nb_fwd_streams);
657 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
658 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
659 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
660 if (fwd_streams[sm_id] == NULL)
661 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
668 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
670 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
672 unsigned int total_burst;
673 unsigned int nb_burst;
674 unsigned int burst_stats[3];
675 uint16_t pktnb_stats[3];
677 int burst_percent[3];
680 * First compute the total number of packet bursts and the
681 * two highest numbers of bursts of the same number of packets.
684 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
685 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
686 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
687 nb_burst = pbs->pkt_burst_spread[nb_pkt];
690 total_burst += nb_burst;
691 if (nb_burst > burst_stats[0]) {
692 burst_stats[1] = burst_stats[0];
693 pktnb_stats[1] = pktnb_stats[0];
694 burst_stats[0] = nb_burst;
695 pktnb_stats[0] = nb_pkt;
698 if (total_burst == 0)
700 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
701 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
702 burst_percent[0], (int) pktnb_stats[0]);
703 if (burst_stats[0] == total_burst) {
707 if (burst_stats[0] + burst_stats[1] == total_burst) {
708 printf(" + %d%% of %d pkts]\n",
709 100 - burst_percent[0], pktnb_stats[1]);
712 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
713 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
714 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
715 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
718 printf(" + %d%% of %d pkts + %d%% of others]\n",
719 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
721 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
724 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
726 struct rte_port *port;
729 static const char *fwd_stats_border = "----------------------";
731 port = &ports[port_id];
732 printf("\n %s Forward statistics for port %-2d %s\n",
733 fwd_stats_border, port_id, fwd_stats_border);
735 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
736 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
738 stats->ipackets, stats->imissed,
739 (uint64_t) (stats->ipackets + stats->imissed));
741 if (cur_fwd_eng == &csum_fwd_engine)
742 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
743 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
744 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
745 printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
746 "RX-error: %-"PRIu64"\n",
747 stats->ibadcrc, stats->ibadlen, stats->ierrors);
748 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
751 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
753 stats->opackets, port->tx_dropped,
754 (uint64_t) (stats->opackets + port->tx_dropped));
757 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
759 stats->ipackets, stats->imissed,
760 (uint64_t) (stats->ipackets + stats->imissed));
762 if (cur_fwd_eng == &csum_fwd_engine)
763 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
764 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
765 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
766 printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
767 " RX-error:%"PRIu64"\n",
768 stats->ibadcrc, stats->ibadlen, stats->ierrors);
769 printf(" RX-nombufs: %14"PRIu64"\n",
773 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
775 stats->opackets, port->tx_dropped,
776 (uint64_t) (stats->opackets + port->tx_dropped));
779 /* Display statistics of XON/XOFF pause frames, if any. */
780 if ((stats->tx_pause_xon | stats->rx_pause_xon |
781 stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
782 printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
783 stats->rx_pause_xoff, stats->rx_pause_xon);
784 printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
785 stats->tx_pause_xoff, stats->tx_pause_xon);
788 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
790 pkt_burst_stats_display("RX",
791 &port->rx_stream->rx_burst_stats);
793 pkt_burst_stats_display("TX",
794 &port->tx_stream->tx_burst_stats);
797 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
798 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
802 if (port->rx_queue_stats_mapping_enabled) {
804 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
805 printf(" Stats reg %2d RX-packets:%14"PRIu64
806 " RX-errors:%14"PRIu64
807 " RX-bytes:%14"PRIu64"\n",
808 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
812 if (port->tx_queue_stats_mapping_enabled) {
813 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814 printf(" Stats reg %2d TX-packets:%14"PRIu64
815 " TX-bytes:%14"PRIu64"\n",
816 i, stats->q_opackets[i], stats->q_obytes[i]);
820 printf(" %s--------------------------------%s\n",
821 fwd_stats_border, fwd_stats_border);
825 fwd_stream_stats_display(streamid_t stream_id)
827 struct fwd_stream *fs;
828 static const char *fwd_top_stats_border = "-------";
830 fs = fwd_streams[stream_id];
831 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
832 (fs->fwd_dropped == 0))
834 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
835 "TX Port=%2d/Queue=%2d %s\n",
836 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
837 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
838 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
839 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
841 /* if checksum mode */
842 if (cur_fwd_eng == &csum_fwd_engine) {
843 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
844 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
847 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
848 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
849 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
854 flush_fwd_rx_queues(void)
856 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
864 for (j = 0; j < 2; j++) {
865 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
866 for (rxq = 0; rxq < nb_rxq; rxq++) {
867 port_id = fwd_ports_ids[rxp];
869 nb_rx = rte_eth_rx_burst(port_id, rxq,
870 pkts_burst, MAX_PKT_BURST);
871 for (i = 0; i < nb_rx; i++)
872 rte_pktmbuf_free(pkts_burst[i]);
876 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
881 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
883 struct fwd_stream **fsm;
887 fsm = &fwd_streams[fc->stream_idx];
888 nb_fs = fc->stream_nb;
890 for (sm_id = 0; sm_id < nb_fs; sm_id++)
891 (*pkt_fwd)(fsm[sm_id]);
892 } while (! fc->stopped);
896 start_pkt_forward_on_core(void *fwd_arg)
898 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
899 cur_fwd_config.fwd_eng->packet_fwd);
904 * Run the TXONLY packet forwarding engine to send a single burst of packets.
905 * Used to start communication flows in network loopback test configurations.
908 run_one_txonly_burst_on_core(void *fwd_arg)
910 struct fwd_lcore *fwd_lc;
911 struct fwd_lcore tmp_lcore;
913 fwd_lc = (struct fwd_lcore *) fwd_arg;
915 tmp_lcore.stopped = 1;
916 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
921 * Launch packet forwarding:
922 * - Setup per-port forwarding context.
923 * - launch logical cores with their forwarding configuration.
926 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
928 port_fwd_begin_t port_fwd_begin;
933 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
934 if (port_fwd_begin != NULL) {
935 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
936 (*port_fwd_begin)(fwd_ports_ids[i]);
938 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
939 lc_id = fwd_lcores_cpuids[i];
940 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
941 fwd_lcores[i]->stopped = 0;
942 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
943 fwd_lcores[i], lc_id);
945 printf("launch lcore %u failed - diag=%d\n",
952 * Launch packet forwarding configuration.
955 start_packet_forwarding(int with_tx_first)
957 port_fwd_begin_t port_fwd_begin;
958 port_fwd_end_t port_fwd_end;
959 struct rte_port *port;
964 if (all_ports_started() == 0) {
965 printf("Not all ports were started\n");
968 if (test_done == 0) {
969 printf("Packet forwarding already started\n");
973 for (i = 0; i < nb_fwd_ports; i++) {
974 pt_id = fwd_ports_ids[i];
975 port = &ports[pt_id];
976 if (!port->dcb_flag) {
977 printf("In DCB mode, all forwarding ports must "
978 "be configured in this mode.\n");
982 if (nb_fwd_lcores == 1) {
983 printf("In DCB mode,the nb forwarding cores "
984 "should be larger than 1.\n");
991 flush_fwd_rx_queues();
994 rxtx_config_display();
996 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
997 pt_id = fwd_ports_ids[i];
998 port = &ports[pt_id];
999 rte_eth_stats_get(pt_id, &port->stats);
1000 port->tx_dropped = 0;
1002 map_port_queue_stats_mapping_registers(pt_id, port);
1004 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1005 fwd_streams[sm_id]->rx_packets = 0;
1006 fwd_streams[sm_id]->tx_packets = 0;
1007 fwd_streams[sm_id]->fwd_dropped = 0;
1008 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1009 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1011 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1012 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1013 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1014 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1015 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1017 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1018 fwd_streams[sm_id]->core_cycles = 0;
1021 if (with_tx_first) {
1022 port_fwd_begin = tx_only_engine.port_fwd_begin;
1023 if (port_fwd_begin != NULL) {
1024 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1025 (*port_fwd_begin)(fwd_ports_ids[i]);
1027 launch_packet_forwarding(run_one_txonly_burst_on_core);
1028 rte_eal_mp_wait_lcore();
1029 port_fwd_end = tx_only_engine.port_fwd_end;
1030 if (port_fwd_end != NULL) {
1031 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1032 (*port_fwd_end)(fwd_ports_ids[i]);
1035 launch_packet_forwarding(start_pkt_forward_on_core);
1039 stop_packet_forwarding(void)
1041 struct rte_eth_stats stats;
1042 struct rte_port *port;
1043 port_fwd_end_t port_fwd_end;
1048 uint64_t total_recv;
1049 uint64_t total_xmit;
1050 uint64_t total_rx_dropped;
1051 uint64_t total_tx_dropped;
1052 uint64_t total_rx_nombuf;
1053 uint64_t tx_dropped;
1054 uint64_t rx_bad_ip_csum;
1055 uint64_t rx_bad_l4_csum;
1056 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1057 uint64_t fwd_cycles;
1059 static const char *acc_stats_border = "+++++++++++++++";
1061 if (all_ports_started() == 0) {
1062 printf("Not all ports were started\n");
1066 printf("Packet forwarding not started\n");
1069 printf("Telling cores to stop...");
1070 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1071 fwd_lcores[lc_id]->stopped = 1;
1072 printf("\nWaiting for lcores to finish...\n");
1073 rte_eal_mp_wait_lcore();
1074 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1075 if (port_fwd_end != NULL) {
1076 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1077 pt_id = fwd_ports_ids[i];
1078 (*port_fwd_end)(pt_id);
1081 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1084 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1085 if (cur_fwd_config.nb_fwd_streams >
1086 cur_fwd_config.nb_fwd_ports) {
1087 fwd_stream_stats_display(sm_id);
1088 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1089 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1091 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1093 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1096 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1097 tx_dropped = (uint64_t) (tx_dropped +
1098 fwd_streams[sm_id]->fwd_dropped);
1099 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1102 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1103 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1104 fwd_streams[sm_id]->rx_bad_ip_csum);
1105 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1109 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1110 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1111 fwd_streams[sm_id]->rx_bad_l4_csum);
1112 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1115 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1116 fwd_cycles = (uint64_t) (fwd_cycles +
1117 fwd_streams[sm_id]->core_cycles);
1122 total_rx_dropped = 0;
1123 total_tx_dropped = 0;
1124 total_rx_nombuf = 0;
1125 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1126 pt_id = fwd_ports_ids[i];
1128 port = &ports[pt_id];
1129 rte_eth_stats_get(pt_id, &stats);
1130 stats.ipackets -= port->stats.ipackets;
1131 port->stats.ipackets = 0;
1132 stats.opackets -= port->stats.opackets;
1133 port->stats.opackets = 0;
1134 stats.ibytes -= port->stats.ibytes;
1135 port->stats.ibytes = 0;
1136 stats.obytes -= port->stats.obytes;
1137 port->stats.obytes = 0;
1138 stats.imissed -= port->stats.imissed;
1139 port->stats.imissed = 0;
1140 stats.oerrors -= port->stats.oerrors;
1141 port->stats.oerrors = 0;
1142 stats.rx_nombuf -= port->stats.rx_nombuf;
1143 port->stats.rx_nombuf = 0;
1144 stats.fdirmatch -= port->stats.fdirmatch;
1145 port->stats.rx_nombuf = 0;
1146 stats.fdirmiss -= port->stats.fdirmiss;
1147 port->stats.rx_nombuf = 0;
1149 total_recv += stats.ipackets;
1150 total_xmit += stats.opackets;
1151 total_rx_dropped += stats.imissed;
1152 total_tx_dropped += port->tx_dropped;
1153 total_rx_nombuf += stats.rx_nombuf;
1155 fwd_port_stats_display(pt_id, &stats);
1157 printf("\n %s Accumulated forward statistics for all ports"
1159 acc_stats_border, acc_stats_border);
1160 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1162 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1164 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1165 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1166 if (total_rx_nombuf > 0)
1167 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1168 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1170 acc_stats_border, acc_stats_border);
1171 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1173 printf("\n CPU cycles/packet=%u (total cycles="
1174 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1175 (unsigned int)(fwd_cycles / total_recv),
1176 fwd_cycles, total_recv);
1178 printf("\nDone.\n");
1183 dev_set_link_up(portid_t pid)
1185 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1186 printf("\nSet link up fail.\n");
1190 dev_set_link_down(portid_t pid)
1192 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1193 printf("\nSet link down fail.\n");
1197 all_ports_started(void)
1200 struct rte_port *port;
1202 FOREACH_PORT(pi, ports) {
1204 /* Check if there is a port which is not started */
1205 if (port->port_status != RTE_PORT_STARTED)
1209 /* No port is not started */
1214 all_ports_stopped(void)
1217 struct rte_port *port;
1219 FOREACH_PORT(pi, ports) {
1221 if (port->port_status != RTE_PORT_STOPPED)
1229 port_is_started(portid_t port_id)
1231 if (port_id_is_invalid(port_id, ENABLED_WARN))
1234 if (ports[port_id].port_status != RTE_PORT_STARTED)
1241 port_is_closed(portid_t port_id)
1243 if (port_id_is_invalid(port_id, ENABLED_WARN))
1246 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1253 start_port(portid_t pid)
1255 int diag, need_check_link_status = -1;
1258 struct rte_port *port;
1259 struct ether_addr mac_addr;
1261 if (test_done == 0) {
1262 printf("Please stop forwarding first\n");
1266 if (port_id_is_invalid(pid, ENABLED_WARN))
1269 if (init_fwd_streams() < 0) {
1270 printf("Fail from init_fwd_streams()\n");
1276 FOREACH_PORT(pi, ports) {
1277 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1280 need_check_link_status = 0;
1282 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1283 RTE_PORT_HANDLING) == 0) {
1284 printf("Port %d is now not stopped\n", pi);
1288 if (port->need_reconfig > 0) {
1289 port->need_reconfig = 0;
1291 printf("Configuring Port %d (socket %u)\n", pi,
1293 /* configure port */
1294 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1297 if (rte_atomic16_cmpset(&(port->port_status),
1298 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1299 printf("Port %d can not be set back "
1300 "to stopped\n", pi);
1301 printf("Fail to configure port %d\n", pi);
1302 /* try to reconfigure port next time */
1303 port->need_reconfig = 1;
1307 if (port->need_reconfig_queues > 0) {
1308 port->need_reconfig_queues = 0;
1309 /* setup tx queues */
1310 for (qi = 0; qi < nb_txq; qi++) {
1311 if ((numa_support) &&
1312 (txring_numa[pi] != NUMA_NO_CONFIG))
1313 diag = rte_eth_tx_queue_setup(pi, qi,
1314 nb_txd,txring_numa[pi],
1317 diag = rte_eth_tx_queue_setup(pi, qi,
1318 nb_txd,port->socket_id,
1324 /* Fail to setup tx queue, return */
1325 if (rte_atomic16_cmpset(&(port->port_status),
1327 RTE_PORT_STOPPED) == 0)
1328 printf("Port %d can not be set back "
1329 "to stopped\n", pi);
1330 printf("Fail to configure port %d tx queues\n", pi);
1331 /* try to reconfigure queues next time */
1332 port->need_reconfig_queues = 1;
1335 /* setup rx queues */
1336 for (qi = 0; qi < nb_rxq; qi++) {
1337 if ((numa_support) &&
1338 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1339 struct rte_mempool * mp =
1340 mbuf_pool_find(rxring_numa[pi]);
1342 printf("Failed to setup RX queue:"
1343 "No mempool allocation"
1344 "on the socket %d\n",
1349 diag = rte_eth_rx_queue_setup(pi, qi,
1350 nb_rxd,rxring_numa[pi],
1351 &(port->rx_conf),mp);
1354 diag = rte_eth_rx_queue_setup(pi, qi,
1355 nb_rxd,port->socket_id,
1357 mbuf_pool_find(port->socket_id));
1363 /* Fail to setup rx queue, return */
1364 if (rte_atomic16_cmpset(&(port->port_status),
1366 RTE_PORT_STOPPED) == 0)
1367 printf("Port %d can not be set back "
1368 "to stopped\n", pi);
1369 printf("Fail to configure port %d rx queues\n", pi);
1370 /* try to reconfigure queues next time */
1371 port->need_reconfig_queues = 1;
1376 if (rte_eth_dev_start(pi) < 0) {
1377 printf("Fail to start port %d\n", pi);
1379 /* Fail to setup rx queue, return */
1380 if (rte_atomic16_cmpset(&(port->port_status),
1381 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1382 printf("Port %d can not be set back to "
1387 if (rte_atomic16_cmpset(&(port->port_status),
1388 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1389 printf("Port %d can not be set into started\n", pi);
1391 rte_eth_macaddr_get(pi, &mac_addr);
1392 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1393 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1394 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1395 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1397 /* at least one port started, need checking link status */
1398 need_check_link_status = 1;
1401 if (need_check_link_status == 1 && !no_link_check)
1402 check_all_ports_link_status(RTE_PORT_ALL);
1403 else if (need_check_link_status == 0)
1404 printf("Please stop the ports first\n");
1411 stop_port(portid_t pid)
1414 struct rte_port *port;
1415 int need_check_link_status = 0;
1417 if (test_done == 0) {
1418 printf("Please stop forwarding first\n");
1426 if (port_id_is_invalid(pid, ENABLED_WARN))
1429 printf("Stopping ports...\n");
1431 FOREACH_PORT(pi, ports) {
1432 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1436 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1437 RTE_PORT_HANDLING) == 0)
1440 rte_eth_dev_stop(pi);
1442 if (rte_atomic16_cmpset(&(port->port_status),
1443 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1444 printf("Port %d can not be set into stopped\n", pi);
1445 need_check_link_status = 1;
1447 if (need_check_link_status && !no_link_check)
1448 check_all_ports_link_status(RTE_PORT_ALL);
1454 close_port(portid_t pid)
1457 struct rte_port *port;
1459 if (test_done == 0) {
1460 printf("Please stop forwarding first\n");
1464 if (port_id_is_invalid(pid, ENABLED_WARN))
1467 printf("Closing ports...\n");
1469 FOREACH_PORT(pi, ports) {
1470 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1474 if (rte_atomic16_cmpset(&(port->port_status),
1475 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1476 printf("Port %d is now not stopped\n", pi);
1480 rte_eth_dev_close(pi);
1482 if (rte_atomic16_cmpset(&(port->port_status),
1483 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1484 printf("Port %d can not be set into stopped\n", pi);
1491 attach_port(char *identifier)
1493 portid_t i, j, pi = 0;
1495 printf("Attaching a new port...\n");
1497 if (identifier == NULL) {
1498 printf("Invalid parameters are specified\n");
1502 if (test_done == 0) {
1503 printf("Please stop forwarding first\n");
1507 if (rte_eth_dev_attach(identifier, &pi))
1510 ports[pi].enabled = 1;
1511 reconfig(pi, rte_eth_dev_socket_id(pi));
1512 rte_eth_promiscuous_enable(pi);
1514 nb_ports = rte_eth_dev_count();
1516 /* set_default_fwd_ports_config(); */
1517 bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1519 FOREACH_PORT(j, ports) {
1520 fwd_ports_ids[i] = j;
1523 nb_cfg_ports = nb_ports;
1526 ports[pi].port_status = RTE_PORT_STOPPED;
1528 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1533 detach_port(uint8_t port_id)
1536 char name[RTE_ETH_NAME_MAX_LEN];
1538 printf("Detaching a port...\n");
1540 if (!port_is_closed(port_id)) {
1541 printf("Please close port first\n");
1545 rte_eth_promiscuous_disable(port_id);
1547 if (rte_eth_dev_detach(port_id, name))
1550 ports[port_id].enabled = 0;
1551 nb_ports = rte_eth_dev_count();
1553 /* set_default_fwd_ports_config(); */
1554 bzero(fwd_ports_ids, sizeof(fwd_ports_ids));
1556 FOREACH_PORT(pi, ports) {
1557 fwd_ports_ids[i] = pi;
1560 nb_cfg_ports = nb_ports;
1563 printf("Port '%s' is detached. Now total ports is %d\n",
1575 stop_packet_forwarding();
1577 FOREACH_PORT(pt_id, ports) {
1578 printf("Stopping port %d...", pt_id);
1580 rte_eth_dev_close(pt_id);
1586 typedef void (*cmd_func_t)(void);
1587 struct pmd_test_command {
1588 const char *cmd_name;
1589 cmd_func_t cmd_func;
1592 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1594 /* Check the link status of all ports in up to 9s, and print them finally */
1596 check_all_ports_link_status(uint32_t port_mask)
1598 #define CHECK_INTERVAL 100 /* 100ms */
1599 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1600 uint8_t portid, count, all_ports_up, print_flag = 0;
1601 struct rte_eth_link link;
1603 printf("Checking link statuses...\n");
1605 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1607 FOREACH_PORT(portid, ports) {
1608 if ((port_mask & (1 << portid)) == 0)
1610 memset(&link, 0, sizeof(link));
1611 rte_eth_link_get_nowait(portid, &link);
1612 /* print link status if flag set */
1613 if (print_flag == 1) {
1614 if (link.link_status)
1615 printf("Port %d Link Up - speed %u "
1616 "Mbps - %s\n", (uint8_t)portid,
1617 (unsigned)link.link_speed,
1618 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1619 ("full-duplex") : ("half-duplex\n"));
1621 printf("Port %d Link Down\n",
1625 /* clear all_ports_up flag if any link down */
1626 if (link.link_status == 0) {
1631 /* after finally printing all link status, get out */
1632 if (print_flag == 1)
1635 if (all_ports_up == 0) {
1637 rte_delay_ms(CHECK_INTERVAL);
1640 /* set the print_flag if all ports up or timeout */
1641 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1648 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1652 uint8_t mapping_found = 0;
1654 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1655 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1656 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1657 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1658 tx_queue_stats_mappings[i].queue_id,
1659 tx_queue_stats_mappings[i].stats_counter_id);
1666 port->tx_queue_stats_mapping_enabled = 1;
1671 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1675 uint8_t mapping_found = 0;
1677 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1678 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1679 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1680 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1681 rx_queue_stats_mappings[i].queue_id,
1682 rx_queue_stats_mappings[i].stats_counter_id);
1689 port->rx_queue_stats_mapping_enabled = 1;
1694 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1698 diag = set_tx_queue_stats_mapping_registers(pi, port);
1700 if (diag == -ENOTSUP) {
1701 port->tx_queue_stats_mapping_enabled = 0;
1702 printf("TX queue stats mapping not supported port id=%d\n", pi);
1705 rte_exit(EXIT_FAILURE,
1706 "set_tx_queue_stats_mapping_registers "
1707 "failed for port id=%d diag=%d\n",
1711 diag = set_rx_queue_stats_mapping_registers(pi, port);
1713 if (diag == -ENOTSUP) {
1714 port->rx_queue_stats_mapping_enabled = 0;
1715 printf("RX queue stats mapping not supported port id=%d\n", pi);
1718 rte_exit(EXIT_FAILURE,
1719 "set_rx_queue_stats_mapping_registers "
1720 "failed for port id=%d diag=%d\n",
1726 rxtx_port_config(struct rte_port *port)
1728 port->rx_conf = port->dev_info.default_rxconf;
1729 port->tx_conf = port->dev_info.default_txconf;
1731 /* Check if any RX/TX parameters have been passed */
1732 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1733 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1735 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1736 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1738 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1739 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1741 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1742 port->rx_conf.rx_free_thresh = rx_free_thresh;
1744 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1745 port->rx_conf.rx_drop_en = rx_drop_en;
1747 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1748 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1750 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1751 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1753 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1754 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1756 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1757 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1759 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1760 port->tx_conf.tx_free_thresh = tx_free_thresh;
1762 if (txq_flags != RTE_PMD_PARAM_UNSET)
1763 port->tx_conf.txq_flags = txq_flags;
1767 init_port_config(void)
1770 struct rte_port *port;
1772 FOREACH_PORT(pid, ports) {
1774 port->dev_conf.rxmode = rx_mode;
1775 port->dev_conf.fdir_conf = fdir_conf;
1777 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1778 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1780 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1781 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1784 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1785 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1786 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1788 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1791 if (port->dev_info.max_vfs != 0) {
1792 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1793 port->dev_conf.rxmode.mq_mode =
1796 port->dev_conf.rxmode.mq_mode =
1799 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1802 rxtx_port_config(port);
1804 rte_eth_macaddr_get(pid, &port->eth_addr);
1806 map_port_queue_stats_mapping_registers(pid, port);
1807 #ifdef RTE_NIC_BYPASS
1808 rte_eth_dev_bypass_init(pid);
1813 const uint16_t vlan_tags[] = {
1814 0, 1, 2, 3, 4, 5, 6, 7,
1815 8, 9, 10, 11, 12, 13, 14, 15,
1816 16, 17, 18, 19, 20, 21, 22, 23,
1817 24, 25, 26, 27, 28, 29, 30, 31
1821 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1826 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1827 * given above, and the number of traffic classes available for use.
1829 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1830 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1831 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1833 /* VMDQ+DCB RX and TX configrations */
1834 vmdq_rx_conf.enable_default_pool = 0;
1835 vmdq_rx_conf.default_pool = 0;
1836 vmdq_rx_conf.nb_queue_pools =
1837 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1838 vmdq_tx_conf.nb_queue_pools =
1839 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1841 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1842 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1843 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1844 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1846 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1847 vmdq_rx_conf.dcb_queue[i] = i;
1848 vmdq_tx_conf.dcb_queue[i] = i;
1851 /*set DCB mode of RX and TX of multiple queues*/
1852 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1853 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1854 if (dcb_conf->pfc_en)
1855 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1857 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1859 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1860 sizeof(struct rte_eth_vmdq_dcb_conf)));
1861 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1862 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1865 struct rte_eth_dcb_rx_conf rx_conf;
1866 struct rte_eth_dcb_tx_conf tx_conf;
1868 /* queue mapping configuration of DCB RX and TX */
1869 if (dcb_conf->num_tcs == ETH_4_TCS)
1870 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1872 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1874 rx_conf.nb_tcs = dcb_conf->num_tcs;
1875 tx_conf.nb_tcs = dcb_conf->num_tcs;
1877 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1878 rx_conf.dcb_queue[i] = i;
1879 tx_conf.dcb_queue[i] = i;
1881 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1882 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1883 if (dcb_conf->pfc_en)
1884 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1886 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1888 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1889 sizeof(struct rte_eth_dcb_rx_conf)));
1890 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1891 sizeof(struct rte_eth_dcb_tx_conf)));
1898 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1900 struct rte_eth_conf port_conf;
1901 struct rte_port *rte_port;
1906 /* rxq and txq configuration in dcb mode */
1909 rx_free_thresh = 64;
1911 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1912 /* Enter DCB configuration status */
1915 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1916 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1917 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1921 rte_port = &ports[pid];
1922 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1924 rxtx_port_config(rte_port);
1926 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1927 for (i = 0; i < nb_vlan; i++){
1928 rx_vft_set(pid, vlan_tags[i], 1);
1931 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1932 map_port_queue_stats_mapping_registers(pid, rte_port);
1934 rte_port->dcb_flag = 1;
1944 /* Configuration of Ethernet ports. */
1945 ports = rte_zmalloc("testpmd: ports",
1946 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1947 RTE_CACHE_LINE_SIZE);
1948 if (ports == NULL) {
1949 rte_exit(EXIT_FAILURE,
1950 "rte_zmalloc(%d struct rte_port) failed\n",
1954 /* enabled allocated ports */
1955 for (pid = 0; pid < nb_ports; pid++)
1956 ports[pid].enabled = 1;
1960 main(int argc, char** argv)
1965 diag = rte_eal_init(argc, argv);
1967 rte_panic("Cannot init EAL\n");
1969 nb_ports = (portid_t) rte_eth_dev_count();
1971 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
1973 /* allocate port structures, and init them */
1976 set_def_fwd_config();
1978 rte_panic("Empty set of forwarding logical cores - check the "
1979 "core mask supplied in the command parameters\n");
1984 launch_args_parse(argc, argv);
1986 if (nb_rxq > nb_txq)
1987 printf("Warning: nb_rxq=%d enables RSS configuration, "
1988 "but nb_txq=%d will prevent to fully test it.\n",
1992 if (start_port(RTE_PORT_ALL) != 0)
1993 rte_exit(EXIT_FAILURE, "Start ports failed\n");
1995 /* set all ports to promiscuous mode by default */
1996 FOREACH_PORT(port_id, ports)
1997 rte_eth_promiscuous_enable(port_id);
1999 #ifdef RTE_LIBRTE_CMDLINE
2000 if (interactive == 1) {
2002 printf("Start automatic packet forwarding\n");
2003 start_packet_forwarding(0);
2012 printf("No commandline core given, start packet forwarding\n");
2013 start_packet_forwarding(0);
2014 printf("Press enter to exit\n");
2015 rc = read(0, &c, 1);