4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
290 .vlan_tci_mask = 0x0,
292 .src_ip = 0xFFFFFFFF,
293 .dst_ip = 0xFFFFFFFF,
296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 .src_port_mask = 0xFFFF,
300 .dst_port_mask = 0xFFFF,
301 .mac_addr_byte_mask = 0xFF,
302 .tunnel_type_mask = 1,
303 .tunnel_id_mask = 0xFFFFFFFF,
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
319 unsigned max_socket = 0;
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
326 * Check if all the ports are started.
327 * If yes, return positive value. If not, return zero.
329 static int all_ports_started(void);
332 * Find next enabled port
335 find_next_port(portid_t p, struct rte_port *ports, int size)
338 rte_exit(-EINVAL, "failed to find a next port id\n");
340 while ((p < size) && (ports[p].enabled == 0))
346 * Setup default configuration.
349 set_default_fwd_lcores_config(void)
353 unsigned int sock_num;
356 for (i = 0; i < RTE_MAX_LCORE; i++) {
357 sock_num = rte_lcore_to_socket_id(i) + 1;
358 if (sock_num > max_socket) {
359 if (sock_num > RTE_MAX_NUMA_NODES)
360 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361 max_socket = sock_num;
363 if (!rte_lcore_is_enabled(i))
365 if (i == rte_get_master_lcore())
367 fwd_lcores_cpuids[nb_lc++] = i;
369 nb_lcores = (lcoreid_t) nb_lc;
370 nb_cfg_lcores = nb_lcores;
375 set_def_peer_eth_addrs(void)
379 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381 peer_eth_addrs[i].addr_bytes[5] = i;
386 set_default_fwd_ports_config(void)
390 for (pt_id = 0; pt_id < nb_ports; pt_id++)
391 fwd_ports_ids[pt_id] = pt_id;
393 nb_cfg_ports = nb_ports;
394 nb_fwd_ports = nb_ports;
398 set_def_fwd_config(void)
400 set_default_fwd_lcores_config();
401 set_def_peer_eth_addrs();
402 set_default_fwd_ports_config();
406 * Configuration initialisation done once at init time.
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410 unsigned int socket_id)
412 char pool_name[RTE_MEMPOOL_NAMESIZE];
413 struct rte_mempool *rte_mp;
416 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
419 #ifdef RTE_LIBRTE_PMD_XENVIRT
420 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
421 (unsigned) mb_mempool_cache,
422 sizeof(struct rte_pktmbuf_pool_private),
423 rte_pktmbuf_pool_init, NULL,
424 rte_pktmbuf_init, NULL,
431 rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
432 (unsigned) mb_mempool_cache,
433 sizeof(struct rte_pktmbuf_pool_private),
434 rte_pktmbuf_pool_init, NULL,
435 rte_pktmbuf_init, NULL,
438 /* wrapper to rte_mempool_create() */
439 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
440 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
444 if (rte_mp == NULL) {
445 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
446 "failed\n", socket_id);
447 } else if (verbose_level > 0) {
448 rte_mempool_dump(stdout, rte_mp);
453 * Check given socket id is valid or not with NUMA mode,
454 * if valid, return 0, else return -1
457 check_socket_id(const unsigned int socket_id)
459 static int warning_once = 0;
461 if (socket_id >= max_socket) {
462 if (!warning_once && numa_support)
463 printf("Warning: NUMA should be configured manually by"
464 " using --port-numa-config and"
465 " --ring-numa-config parameters along with"
477 struct rte_port *port;
478 struct rte_mempool *mbp;
479 unsigned int nb_mbuf_per_pool;
481 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
483 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
484 /* Configuration of logical cores. */
485 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
486 sizeof(struct fwd_lcore *) * nb_lcores,
487 RTE_CACHE_LINE_SIZE);
488 if (fwd_lcores == NULL) {
489 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
490 "failed\n", nb_lcores);
492 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
493 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
494 sizeof(struct fwd_lcore),
495 RTE_CACHE_LINE_SIZE);
496 if (fwd_lcores[lc_id] == NULL) {
497 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
500 fwd_lcores[lc_id]->cpuid_idx = lc_id;
504 * Create pools of mbuf.
505 * If NUMA support is disabled, create a single pool of mbuf in
506 * socket 0 memory by default.
507 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
509 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
510 * nb_txd can be configured at run time.
512 if (param_total_num_mbufs)
513 nb_mbuf_per_pool = param_total_num_mbufs;
515 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
516 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
520 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
524 if (socket_num == UMA_NO_CONFIG)
525 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
527 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
531 FOREACH_PORT(pid, ports) {
533 rte_eth_dev_info_get(pid, &port->dev_info);
536 if (port_numa[pid] != NUMA_NO_CONFIG)
537 port_per_socket[port_numa[pid]]++;
539 uint32_t socket_id = rte_eth_dev_socket_id(pid);
541 /* if socket_id is invalid, set to 0 */
542 if (check_socket_id(socket_id) < 0)
544 port_per_socket[socket_id]++;
548 /* set flag to initialize port/queue */
549 port->need_reconfig = 1;
550 port->need_reconfig_queues = 1;
555 unsigned int nb_mbuf;
557 if (param_total_num_mbufs)
558 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
560 for (i = 0; i < max_socket; i++) {
561 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
563 mbuf_pool_create(mbuf_data_size,
570 * Records which Mbuf pool to use by each logical core, if needed.
572 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
573 mbp = mbuf_pool_find(
574 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
577 mbp = mbuf_pool_find(0);
578 fwd_lcores[lc_id]->mbp = mbp;
581 /* Configuration of packet forwarding streams. */
582 if (init_fwd_streams() < 0)
583 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
588 reconfig(portid_t new_port_id, unsigned socket_id)
590 struct rte_port *port;
592 /* Reconfiguration of Ethernet ports. */
593 port = &ports[new_port_id];
594 rte_eth_dev_info_get(new_port_id, &port->dev_info);
596 /* set flag to initialize port/queue */
597 port->need_reconfig = 1;
598 port->need_reconfig_queues = 1;
599 port->socket_id = socket_id;
606 init_fwd_streams(void)
609 struct rte_port *port;
610 streamid_t sm_id, nb_fwd_streams_new;
612 /* set socket id according to numa or not */
613 FOREACH_PORT(pid, ports) {
615 if (nb_rxq > port->dev_info.max_rx_queues) {
616 printf("Fail: nb_rxq(%d) is greater than "
617 "max_rx_queues(%d)\n", nb_rxq,
618 port->dev_info.max_rx_queues);
621 if (nb_txq > port->dev_info.max_tx_queues) {
622 printf("Fail: nb_txq(%d) is greater than "
623 "max_tx_queues(%d)\n", nb_txq,
624 port->dev_info.max_tx_queues);
628 if (port_numa[pid] != NUMA_NO_CONFIG)
629 port->socket_id = port_numa[pid];
631 port->socket_id = rte_eth_dev_socket_id(pid);
633 /* if socket_id is invalid, set to 0 */
634 if (check_socket_id(port->socket_id) < 0)
639 if (socket_num == UMA_NO_CONFIG)
642 port->socket_id = socket_num;
646 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
647 if (nb_fwd_streams_new == nb_fwd_streams)
650 if (fwd_streams != NULL) {
651 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
652 if (fwd_streams[sm_id] == NULL)
654 rte_free(fwd_streams[sm_id]);
655 fwd_streams[sm_id] = NULL;
657 rte_free(fwd_streams);
662 nb_fwd_streams = nb_fwd_streams_new;
663 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
664 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
665 if (fwd_streams == NULL)
666 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
667 "failed\n", nb_fwd_streams);
669 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
670 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
671 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
672 if (fwd_streams[sm_id] == NULL)
673 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
680 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
682 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
684 unsigned int total_burst;
685 unsigned int nb_burst;
686 unsigned int burst_stats[3];
687 uint16_t pktnb_stats[3];
689 int burst_percent[3];
692 * First compute the total number of packet bursts and the
693 * two highest numbers of bursts of the same number of packets.
696 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
697 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
698 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
699 nb_burst = pbs->pkt_burst_spread[nb_pkt];
702 total_burst += nb_burst;
703 if (nb_burst > burst_stats[0]) {
704 burst_stats[1] = burst_stats[0];
705 pktnb_stats[1] = pktnb_stats[0];
706 burst_stats[0] = nb_burst;
707 pktnb_stats[0] = nb_pkt;
710 if (total_burst == 0)
712 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
713 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
714 burst_percent[0], (int) pktnb_stats[0]);
715 if (burst_stats[0] == total_burst) {
719 if (burst_stats[0] + burst_stats[1] == total_burst) {
720 printf(" + %d%% of %d pkts]\n",
721 100 - burst_percent[0], pktnb_stats[1]);
724 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
725 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
726 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
727 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
730 printf(" + %d%% of %d pkts + %d%% of others]\n",
731 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
733 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
736 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
738 struct rte_port *port;
741 static const char *fwd_stats_border = "----------------------";
743 port = &ports[port_id];
744 printf("\n %s Forward statistics for port %-2d %s\n",
745 fwd_stats_border, port_id, fwd_stats_border);
747 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
748 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
750 stats->ipackets, stats->imissed,
751 (uint64_t) (stats->ipackets + stats->imissed));
753 if (cur_fwd_eng == &csum_fwd_engine)
754 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
755 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
756 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
757 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
758 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
761 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
763 stats->opackets, port->tx_dropped,
764 (uint64_t) (stats->opackets + port->tx_dropped));
767 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
769 stats->ipackets, stats->imissed,
770 (uint64_t) (stats->ipackets + stats->imissed));
772 if (cur_fwd_eng == &csum_fwd_engine)
773 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
774 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
775 if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
776 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
777 printf(" RX-nombufs: %14"PRIu64"\n",
781 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
783 stats->opackets, port->tx_dropped,
784 (uint64_t) (stats->opackets + port->tx_dropped));
787 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
789 pkt_burst_stats_display("RX",
790 &port->rx_stream->rx_burst_stats);
792 pkt_burst_stats_display("TX",
793 &port->tx_stream->tx_burst_stats);
796 if (port->rx_queue_stats_mapping_enabled) {
798 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
799 printf(" Stats reg %2d RX-packets:%14"PRIu64
800 " RX-errors:%14"PRIu64
801 " RX-bytes:%14"PRIu64"\n",
802 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
806 if (port->tx_queue_stats_mapping_enabled) {
807 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
808 printf(" Stats reg %2d TX-packets:%14"PRIu64
809 " TX-bytes:%14"PRIu64"\n",
810 i, stats->q_opackets[i], stats->q_obytes[i]);
814 printf(" %s--------------------------------%s\n",
815 fwd_stats_border, fwd_stats_border);
819 fwd_stream_stats_display(streamid_t stream_id)
821 struct fwd_stream *fs;
822 static const char *fwd_top_stats_border = "-------";
824 fs = fwd_streams[stream_id];
825 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
826 (fs->fwd_dropped == 0))
828 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
829 "TX Port=%2d/Queue=%2d %s\n",
830 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
831 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
832 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
833 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
835 /* if checksum mode */
836 if (cur_fwd_eng == &csum_fwd_engine) {
837 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
838 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
841 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
842 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
843 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
848 flush_fwd_rx_queues(void)
850 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
858 for (j = 0; j < 2; j++) {
859 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
860 for (rxq = 0; rxq < nb_rxq; rxq++) {
861 port_id = fwd_ports_ids[rxp];
863 nb_rx = rte_eth_rx_burst(port_id, rxq,
864 pkts_burst, MAX_PKT_BURST);
865 for (i = 0; i < nb_rx; i++)
866 rte_pktmbuf_free(pkts_burst[i]);
870 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
875 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
877 struct fwd_stream **fsm;
881 fsm = &fwd_streams[fc->stream_idx];
882 nb_fs = fc->stream_nb;
884 for (sm_id = 0; sm_id < nb_fs; sm_id++)
885 (*pkt_fwd)(fsm[sm_id]);
886 } while (! fc->stopped);
890 start_pkt_forward_on_core(void *fwd_arg)
892 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
893 cur_fwd_config.fwd_eng->packet_fwd);
898 * Run the TXONLY packet forwarding engine to send a single burst of packets.
899 * Used to start communication flows in network loopback test configurations.
902 run_one_txonly_burst_on_core(void *fwd_arg)
904 struct fwd_lcore *fwd_lc;
905 struct fwd_lcore tmp_lcore;
907 fwd_lc = (struct fwd_lcore *) fwd_arg;
909 tmp_lcore.stopped = 1;
910 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
915 * Launch packet forwarding:
916 * - Setup per-port forwarding context.
917 * - launch logical cores with their forwarding configuration.
920 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
922 port_fwd_begin_t port_fwd_begin;
927 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
928 if (port_fwd_begin != NULL) {
929 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
930 (*port_fwd_begin)(fwd_ports_ids[i]);
932 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
933 lc_id = fwd_lcores_cpuids[i];
934 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
935 fwd_lcores[i]->stopped = 0;
936 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
937 fwd_lcores[i], lc_id);
939 printf("launch lcore %u failed - diag=%d\n",
946 * Launch packet forwarding configuration.
949 start_packet_forwarding(int with_tx_first)
951 port_fwd_begin_t port_fwd_begin;
952 port_fwd_end_t port_fwd_end;
953 struct rte_port *port;
958 if (all_ports_started() == 0) {
959 printf("Not all ports were started\n");
962 if (test_done == 0) {
963 printf("Packet forwarding already started\n");
967 for (i = 0; i < nb_fwd_ports; i++) {
968 pt_id = fwd_ports_ids[i];
969 port = &ports[pt_id];
970 if (!port->dcb_flag) {
971 printf("In DCB mode, all forwarding ports must "
972 "be configured in this mode.\n");
976 if (nb_fwd_lcores == 1) {
977 printf("In DCB mode,the nb forwarding cores "
978 "should be larger than 1.\n");
985 flush_fwd_rx_queues();
988 rxtx_config_display();
990 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
991 pt_id = fwd_ports_ids[i];
992 port = &ports[pt_id];
993 rte_eth_stats_get(pt_id, &port->stats);
994 port->tx_dropped = 0;
996 map_port_queue_stats_mapping_registers(pt_id, port);
998 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
999 fwd_streams[sm_id]->rx_packets = 0;
1000 fwd_streams[sm_id]->tx_packets = 0;
1001 fwd_streams[sm_id]->fwd_dropped = 0;
1002 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1003 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1005 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1006 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1007 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1008 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1009 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1011 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1012 fwd_streams[sm_id]->core_cycles = 0;
1015 if (with_tx_first) {
1016 port_fwd_begin = tx_only_engine.port_fwd_begin;
1017 if (port_fwd_begin != NULL) {
1018 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1019 (*port_fwd_begin)(fwd_ports_ids[i]);
1021 launch_packet_forwarding(run_one_txonly_burst_on_core);
1022 rte_eal_mp_wait_lcore();
1023 port_fwd_end = tx_only_engine.port_fwd_end;
1024 if (port_fwd_end != NULL) {
1025 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1026 (*port_fwd_end)(fwd_ports_ids[i]);
1029 launch_packet_forwarding(start_pkt_forward_on_core);
1033 stop_packet_forwarding(void)
1035 struct rte_eth_stats stats;
1036 struct rte_port *port;
1037 port_fwd_end_t port_fwd_end;
1042 uint64_t total_recv;
1043 uint64_t total_xmit;
1044 uint64_t total_rx_dropped;
1045 uint64_t total_tx_dropped;
1046 uint64_t total_rx_nombuf;
1047 uint64_t tx_dropped;
1048 uint64_t rx_bad_ip_csum;
1049 uint64_t rx_bad_l4_csum;
1050 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1051 uint64_t fwd_cycles;
1053 static const char *acc_stats_border = "+++++++++++++++";
1055 if (all_ports_started() == 0) {
1056 printf("Not all ports were started\n");
1060 printf("Packet forwarding not started\n");
1063 printf("Telling cores to stop...");
1064 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1065 fwd_lcores[lc_id]->stopped = 1;
1066 printf("\nWaiting for lcores to finish...\n");
1067 rte_eal_mp_wait_lcore();
1068 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1069 if (port_fwd_end != NULL) {
1070 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1071 pt_id = fwd_ports_ids[i];
1072 (*port_fwd_end)(pt_id);
1075 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1078 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1079 if (cur_fwd_config.nb_fwd_streams >
1080 cur_fwd_config.nb_fwd_ports) {
1081 fwd_stream_stats_display(sm_id);
1082 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1083 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1085 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1087 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1090 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1091 tx_dropped = (uint64_t) (tx_dropped +
1092 fwd_streams[sm_id]->fwd_dropped);
1093 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1096 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1097 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1098 fwd_streams[sm_id]->rx_bad_ip_csum);
1099 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1103 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1104 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1105 fwd_streams[sm_id]->rx_bad_l4_csum);
1106 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1109 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1110 fwd_cycles = (uint64_t) (fwd_cycles +
1111 fwd_streams[sm_id]->core_cycles);
1116 total_rx_dropped = 0;
1117 total_tx_dropped = 0;
1118 total_rx_nombuf = 0;
1119 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1120 pt_id = fwd_ports_ids[i];
1122 port = &ports[pt_id];
1123 rte_eth_stats_get(pt_id, &stats);
1124 stats.ipackets -= port->stats.ipackets;
1125 port->stats.ipackets = 0;
1126 stats.opackets -= port->stats.opackets;
1127 port->stats.opackets = 0;
1128 stats.ibytes -= port->stats.ibytes;
1129 port->stats.ibytes = 0;
1130 stats.obytes -= port->stats.obytes;
1131 port->stats.obytes = 0;
1132 stats.imissed -= port->stats.imissed;
1133 port->stats.imissed = 0;
1134 stats.oerrors -= port->stats.oerrors;
1135 port->stats.oerrors = 0;
1136 stats.rx_nombuf -= port->stats.rx_nombuf;
1137 port->stats.rx_nombuf = 0;
1139 total_recv += stats.ipackets;
1140 total_xmit += stats.opackets;
1141 total_rx_dropped += stats.imissed;
1142 total_tx_dropped += port->tx_dropped;
1143 total_rx_nombuf += stats.rx_nombuf;
1145 fwd_port_stats_display(pt_id, &stats);
1147 printf("\n %s Accumulated forward statistics for all ports"
1149 acc_stats_border, acc_stats_border);
1150 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1152 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1154 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1155 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1156 if (total_rx_nombuf > 0)
1157 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1158 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1160 acc_stats_border, acc_stats_border);
1161 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1163 printf("\n CPU cycles/packet=%u (total cycles="
1164 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1165 (unsigned int)(fwd_cycles / total_recv),
1166 fwd_cycles, total_recv);
1168 printf("\nDone.\n");
1173 dev_set_link_up(portid_t pid)
1175 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1176 printf("\nSet link up fail.\n");
1180 dev_set_link_down(portid_t pid)
1182 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1183 printf("\nSet link down fail.\n");
1187 all_ports_started(void)
1190 struct rte_port *port;
1192 FOREACH_PORT(pi, ports) {
1194 /* Check if there is a port which is not started */
1195 if ((port->port_status != RTE_PORT_STARTED) &&
1196 (port->slave_flag == 0))
1200 /* No port is not started */
1205 all_ports_stopped(void)
1208 struct rte_port *port;
1210 FOREACH_PORT(pi, ports) {
1212 if ((port->port_status != RTE_PORT_STOPPED) &&
1213 (port->slave_flag == 0))
1221 port_is_started(portid_t port_id)
1223 if (port_id_is_invalid(port_id, ENABLED_WARN))
1226 if (ports[port_id].port_status != RTE_PORT_STARTED)
1233 port_is_closed(portid_t port_id)
1235 if (port_id_is_invalid(port_id, ENABLED_WARN))
1238 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1245 start_port(portid_t pid)
1247 int diag, need_check_link_status = -1;
1250 struct rte_port *port;
1251 struct ether_addr mac_addr;
1253 if (test_done == 0) {
1254 printf("Please stop forwarding first\n");
1258 if (port_id_is_invalid(pid, ENABLED_WARN))
1261 if (init_fwd_streams() < 0) {
1262 printf("Fail from init_fwd_streams()\n");
1268 FOREACH_PORT(pi, ports) {
1269 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1272 need_check_link_status = 0;
1274 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1275 RTE_PORT_HANDLING) == 0) {
1276 printf("Port %d is now not stopped\n", pi);
1280 if (port->need_reconfig > 0) {
1281 port->need_reconfig = 0;
1283 printf("Configuring Port %d (socket %u)\n", pi,
1285 /* configure port */
1286 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1289 if (rte_atomic16_cmpset(&(port->port_status),
1290 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1291 printf("Port %d can not be set back "
1292 "to stopped\n", pi);
1293 printf("Fail to configure port %d\n", pi);
1294 /* try to reconfigure port next time */
1295 port->need_reconfig = 1;
1299 if (port->need_reconfig_queues > 0) {
1300 port->need_reconfig_queues = 0;
1301 /* setup tx queues */
1302 for (qi = 0; qi < nb_txq; qi++) {
1303 if ((numa_support) &&
1304 (txring_numa[pi] != NUMA_NO_CONFIG))
1305 diag = rte_eth_tx_queue_setup(pi, qi,
1306 nb_txd,txring_numa[pi],
1309 diag = rte_eth_tx_queue_setup(pi, qi,
1310 nb_txd,port->socket_id,
1316 /* Fail to setup tx queue, return */
1317 if (rte_atomic16_cmpset(&(port->port_status),
1319 RTE_PORT_STOPPED) == 0)
1320 printf("Port %d can not be set back "
1321 "to stopped\n", pi);
1322 printf("Fail to configure port %d tx queues\n", pi);
1323 /* try to reconfigure queues next time */
1324 port->need_reconfig_queues = 1;
1327 /* setup rx queues */
1328 for (qi = 0; qi < nb_rxq; qi++) {
1329 if ((numa_support) &&
1330 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1331 struct rte_mempool * mp =
1332 mbuf_pool_find(rxring_numa[pi]);
1334 printf("Failed to setup RX queue:"
1335 "No mempool allocation"
1336 "on the socket %d\n",
1341 diag = rte_eth_rx_queue_setup(pi, qi,
1342 nb_rxd,rxring_numa[pi],
1343 &(port->rx_conf),mp);
1346 diag = rte_eth_rx_queue_setup(pi, qi,
1347 nb_rxd,port->socket_id,
1349 mbuf_pool_find(port->socket_id));
1355 /* Fail to setup rx queue, return */
1356 if (rte_atomic16_cmpset(&(port->port_status),
1358 RTE_PORT_STOPPED) == 0)
1359 printf("Port %d can not be set back "
1360 "to stopped\n", pi);
1361 printf("Fail to configure port %d rx queues\n", pi);
1362 /* try to reconfigure queues next time */
1363 port->need_reconfig_queues = 1;
1368 if (rte_eth_dev_start(pi) < 0) {
1369 printf("Fail to start port %d\n", pi);
1371 /* Fail to setup rx queue, return */
1372 if (rte_atomic16_cmpset(&(port->port_status),
1373 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1374 printf("Port %d can not be set back to "
1379 if (rte_atomic16_cmpset(&(port->port_status),
1380 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1381 printf("Port %d can not be set into started\n", pi);
1383 rte_eth_macaddr_get(pi, &mac_addr);
1384 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1385 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1386 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1387 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1389 /* at least one port started, need checking link status */
1390 need_check_link_status = 1;
1393 if (need_check_link_status == 1 && !no_link_check)
1394 check_all_ports_link_status(RTE_PORT_ALL);
1395 else if (need_check_link_status == 0)
1396 printf("Please stop the ports first\n");
1403 stop_port(portid_t pid)
1406 struct rte_port *port;
1407 int need_check_link_status = 0;
1409 if (test_done == 0) {
1410 printf("Please stop forwarding first\n");
1418 if (port_id_is_invalid(pid, ENABLED_WARN))
1421 printf("Stopping ports...\n");
1423 FOREACH_PORT(pi, ports) {
1424 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1428 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1429 RTE_PORT_HANDLING) == 0)
1432 rte_eth_dev_stop(pi);
1434 if (rte_atomic16_cmpset(&(port->port_status),
1435 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1436 printf("Port %d can not be set into stopped\n", pi);
1437 need_check_link_status = 1;
1439 if (need_check_link_status && !no_link_check)
1440 check_all_ports_link_status(RTE_PORT_ALL);
1446 close_port(portid_t pid)
1449 struct rte_port *port;
1451 if (test_done == 0) {
1452 printf("Please stop forwarding first\n");
1456 if (port_id_is_invalid(pid, ENABLED_WARN))
1459 printf("Closing ports...\n");
1461 FOREACH_PORT(pi, ports) {
1462 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1466 if (rte_atomic16_cmpset(&(port->port_status),
1467 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1468 printf("Port %d is already closed\n", pi);
1472 if (rte_atomic16_cmpset(&(port->port_status),
1473 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1474 printf("Port %d is now not stopped\n", pi);
1478 rte_eth_dev_close(pi);
1480 if (rte_atomic16_cmpset(&(port->port_status),
1481 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1482 printf("Port %d can not be set into stopped\n", pi);
1489 attach_port(char *identifier)
1491 portid_t i, j, pi = 0;
1493 printf("Attaching a new port...\n");
1495 if (identifier == NULL) {
1496 printf("Invalid parameters are specified\n");
1500 if (test_done == 0) {
1501 printf("Please stop forwarding first\n");
1505 if (rte_eth_dev_attach(identifier, &pi))
1508 ports[pi].enabled = 1;
1509 reconfig(pi, rte_eth_dev_socket_id(pi));
1510 rte_eth_promiscuous_enable(pi);
1512 nb_ports = rte_eth_dev_count();
1514 /* set_default_fwd_ports_config(); */
1515 memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1517 FOREACH_PORT(j, ports) {
1518 fwd_ports_ids[i] = j;
1521 nb_cfg_ports = nb_ports;
1524 ports[pi].port_status = RTE_PORT_STOPPED;
1526 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1531 detach_port(uint8_t port_id)
1534 char name[RTE_ETH_NAME_MAX_LEN];
1536 printf("Detaching a port...\n");
1538 if (!port_is_closed(port_id)) {
1539 printf("Please close port first\n");
1543 if (rte_eth_dev_detach(port_id, name))
1546 ports[port_id].enabled = 0;
1547 nb_ports = rte_eth_dev_count();
1549 /* set_default_fwd_ports_config(); */
1550 memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1552 FOREACH_PORT(pi, ports) {
1553 fwd_ports_ids[i] = pi;
1556 nb_cfg_ports = nb_ports;
1559 printf("Port '%s' is detached. Now total ports is %d\n",
1571 stop_packet_forwarding();
1573 if (ports != NULL) {
1575 FOREACH_PORT(pt_id, ports) {
1576 printf("\nShutting down port %d...\n", pt_id);
1582 printf("\nBye...\n");
1585 typedef void (*cmd_func_t)(void);
1586 struct pmd_test_command {
1587 const char *cmd_name;
1588 cmd_func_t cmd_func;
1591 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1593 /* Check the link status of all ports in up to 9s, and print them finally */
1595 check_all_ports_link_status(uint32_t port_mask)
1597 #define CHECK_INTERVAL 100 /* 100ms */
1598 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1599 uint8_t portid, count, all_ports_up, print_flag = 0;
1600 struct rte_eth_link link;
1602 printf("Checking link statuses...\n");
1604 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1606 FOREACH_PORT(portid, ports) {
1607 if ((port_mask & (1 << portid)) == 0)
1609 memset(&link, 0, sizeof(link));
1610 rte_eth_link_get_nowait(portid, &link);
1611 /* print link status if flag set */
1612 if (print_flag == 1) {
1613 if (link.link_status)
1614 printf("Port %d Link Up - speed %u "
1615 "Mbps - %s\n", (uint8_t)portid,
1616 (unsigned)link.link_speed,
1617 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1618 ("full-duplex") : ("half-duplex\n"));
1620 printf("Port %d Link Down\n",
1624 /* clear all_ports_up flag if any link down */
1625 if (link.link_status == 0) {
1630 /* after finally printing all link status, get out */
1631 if (print_flag == 1)
1634 if (all_ports_up == 0) {
1636 rte_delay_ms(CHECK_INTERVAL);
1639 /* set the print_flag if all ports up or timeout */
1640 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1647 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1651 uint8_t mapping_found = 0;
1653 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1654 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1655 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1656 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1657 tx_queue_stats_mappings[i].queue_id,
1658 tx_queue_stats_mappings[i].stats_counter_id);
1665 port->tx_queue_stats_mapping_enabled = 1;
1670 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1674 uint8_t mapping_found = 0;
1676 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1677 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1678 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1679 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1680 rx_queue_stats_mappings[i].queue_id,
1681 rx_queue_stats_mappings[i].stats_counter_id);
1688 port->rx_queue_stats_mapping_enabled = 1;
1693 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1697 diag = set_tx_queue_stats_mapping_registers(pi, port);
1699 if (diag == -ENOTSUP) {
1700 port->tx_queue_stats_mapping_enabled = 0;
1701 printf("TX queue stats mapping not supported port id=%d\n", pi);
1704 rte_exit(EXIT_FAILURE,
1705 "set_tx_queue_stats_mapping_registers "
1706 "failed for port id=%d diag=%d\n",
1710 diag = set_rx_queue_stats_mapping_registers(pi, port);
1712 if (diag == -ENOTSUP) {
1713 port->rx_queue_stats_mapping_enabled = 0;
1714 printf("RX queue stats mapping not supported port id=%d\n", pi);
1717 rte_exit(EXIT_FAILURE,
1718 "set_rx_queue_stats_mapping_registers "
1719 "failed for port id=%d diag=%d\n",
1725 rxtx_port_config(struct rte_port *port)
1727 port->rx_conf = port->dev_info.default_rxconf;
1728 port->tx_conf = port->dev_info.default_txconf;
1730 /* Check if any RX/TX parameters have been passed */
1731 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1732 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1734 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1735 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1737 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1738 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1740 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1741 port->rx_conf.rx_free_thresh = rx_free_thresh;
1743 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1744 port->rx_conf.rx_drop_en = rx_drop_en;
1746 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1747 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1749 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1750 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1752 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1753 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1755 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1756 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1758 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1759 port->tx_conf.tx_free_thresh = tx_free_thresh;
1761 if (txq_flags != RTE_PMD_PARAM_UNSET)
1762 port->tx_conf.txq_flags = txq_flags;
1766 init_port_config(void)
1769 struct rte_port *port;
1771 FOREACH_PORT(pid, ports) {
1773 port->dev_conf.rxmode = rx_mode;
1774 port->dev_conf.fdir_conf = fdir_conf;
1776 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1777 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1779 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1780 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1783 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1784 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1785 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1787 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1790 if (port->dev_info.max_vfs != 0) {
1791 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1792 port->dev_conf.rxmode.mq_mode =
1795 port->dev_conf.rxmode.mq_mode =
1798 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1801 rxtx_port_config(port);
1803 rte_eth_macaddr_get(pid, &port->eth_addr);
1805 map_port_queue_stats_mapping_registers(pid, port);
1806 #ifdef RTE_NIC_BYPASS
1807 rte_eth_dev_bypass_init(pid);
1812 void set_port_slave_flag(portid_t slave_pid)
1814 struct rte_port *port;
1816 port = &ports[slave_pid];
1817 port->slave_flag = 1;
1820 void clear_port_slave_flag(portid_t slave_pid)
1822 struct rte_port *port;
1824 port = &ports[slave_pid];
1825 port->slave_flag = 0;
1828 const uint16_t vlan_tags[] = {
1829 0, 1, 2, 3, 4, 5, 6, 7,
1830 8, 9, 10, 11, 12, 13, 14, 15,
1831 16, 17, 18, 19, 20, 21, 22, 23,
1832 24, 25, 26, 27, 28, 29, 30, 31
1836 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1837 enum dcb_mode_enable dcb_mode,
1838 enum rte_eth_nb_tcs num_tcs,
1844 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1845 * given above, and the number of traffic classes available for use.
1847 if (dcb_mode == DCB_VT_ENABLED) {
1848 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1849 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1850 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1851 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1853 /* VMDQ+DCB RX and TX configrations */
1854 vmdq_rx_conf->enable_default_pool = 0;
1855 vmdq_rx_conf->default_pool = 0;
1856 vmdq_rx_conf->nb_queue_pools =
1857 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1858 vmdq_tx_conf->nb_queue_pools =
1859 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1861 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1862 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1863 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1864 vmdq_rx_conf->pool_map[i].pools =
1865 1 << (i % vmdq_rx_conf->nb_queue_pools);
1867 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1868 vmdq_rx_conf->dcb_tc[i] = i;
1869 vmdq_tx_conf->dcb_tc[i] = i;
1872 /* set DCB mode of RX and TX of multiple queues */
1873 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1874 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1876 struct rte_eth_dcb_rx_conf *rx_conf =
1877 ð_conf->rx_adv_conf.dcb_rx_conf;
1878 struct rte_eth_dcb_tx_conf *tx_conf =
1879 ð_conf->tx_adv_conf.dcb_tx_conf;
1881 rx_conf->nb_tcs = num_tcs;
1882 tx_conf->nb_tcs = num_tcs;
1884 for (i = 0; i < num_tcs; i++) {
1885 rx_conf->dcb_tc[i] = i;
1886 tx_conf->dcb_tc[i] = i;
1888 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1889 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1890 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1894 eth_conf->dcb_capability_en =
1895 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1897 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1903 init_port_dcb_config(portid_t pid,
1904 enum dcb_mode_enable dcb_mode,
1905 enum rte_eth_nb_tcs num_tcs,
1908 struct rte_eth_conf port_conf;
1909 struct rte_eth_dev_info dev_info;
1910 struct rte_port *rte_port;
1914 rte_eth_dev_info_get(pid, &dev_info);
1916 /* If dev_info.vmdq_pool_base is greater than 0,
1917 * the queue id of vmdq pools is started after pf queues.
1919 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1920 printf("VMDQ_DCB multi-queue mode is nonsensical"
1921 " for port %d.", pid);
1925 /* Assume the ports in testpmd have the same dcb capability
1926 * and has the same number of rxq and txq in dcb mode
1928 if (dcb_mode == DCB_VT_ENABLED) {
1929 nb_rxq = dev_info.max_rx_queues;
1930 nb_txq = dev_info.max_tx_queues;
1932 /*if vt is disabled, use all pf queues */
1933 if (dev_info.vmdq_pool_base == 0) {
1934 nb_rxq = dev_info.max_rx_queues;
1935 nb_txq = dev_info.max_tx_queues;
1937 nb_rxq = (queueid_t)num_tcs;
1938 nb_txq = (queueid_t)num_tcs;
1942 rx_free_thresh = 64;
1944 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1945 /* Enter DCB configuration status */
1948 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1949 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1953 rte_port = &ports[pid];
1954 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1956 rxtx_port_config(rte_port);
1958 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1959 for (i = 0; i < RTE_DIM(vlan_tags); i++)
1960 rx_vft_set(pid, vlan_tags[i], 1);
1962 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1963 map_port_queue_stats_mapping_registers(pid, rte_port);
1965 rte_port->dcb_flag = 1;
1975 /* Configuration of Ethernet ports. */
1976 ports = rte_zmalloc("testpmd: ports",
1977 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1978 RTE_CACHE_LINE_SIZE);
1979 if (ports == NULL) {
1980 rte_exit(EXIT_FAILURE,
1981 "rte_zmalloc(%d struct rte_port) failed\n",
1985 /* enabled allocated ports */
1986 for (pid = 0; pid < nb_ports; pid++)
1987 ports[pid].enabled = 1;
1998 signal_handler(int signum)
2000 if (signum == SIGINT || signum == SIGTERM) {
2001 printf("\nSignal %d received, preparing to exit...\n",
2004 /* exit with the expected status */
2005 signal(signum, SIG_DFL);
2006 kill(getpid(), signum);
2011 main(int argc, char** argv)
2016 signal(SIGINT, signal_handler);
2017 signal(SIGTERM, signal_handler);
2019 diag = rte_eal_init(argc, argv);
2021 rte_panic("Cannot init EAL\n");
2023 nb_ports = (portid_t) rte_eth_dev_count();
2025 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2027 /* allocate port structures, and init them */
2030 set_def_fwd_config();
2032 rte_panic("Empty set of forwarding logical cores - check the "
2033 "core mask supplied in the command parameters\n");
2038 launch_args_parse(argc, argv);
2040 if (nb_rxq > nb_txq)
2041 printf("Warning: nb_rxq=%d enables RSS configuration, "
2042 "but nb_txq=%d will prevent to fully test it.\n",
2046 if (start_port(RTE_PORT_ALL) != 0)
2047 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2049 /* set all ports to promiscuous mode by default */
2050 FOREACH_PORT(port_id, ports)
2051 rte_eth_promiscuous_enable(port_id);
2053 #ifdef RTE_LIBRTE_CMDLINE
2054 if (interactive == 1) {
2056 printf("Start automatic packet forwarding\n");
2057 start_packet_forwarding(0);
2066 printf("No commandline core given, start packet forwarding\n");
2067 start_packet_forwarding(0);
2068 printf("Press enter to exit\n");
2069 rc = read(0, &c, 1);