4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
80 #include "mempool_osdep.h"
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
290 .vlan_tci_mask = 0x0,
292 .src_ip = 0xFFFFFFFF,
293 .dst_ip = 0xFFFFFFFF,
296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 .src_port_mask = 0xFFFF,
300 .dst_port_mask = 0xFFFF,
301 .mac_addr_byte_mask = 0xFF,
302 .tunnel_type_mask = 1,
303 .tunnel_id_mask = 0xFFFFFFFF,
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
319 unsigned max_socket = 0;
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
326 * Check if all the ports are started.
327 * If yes, return positive value. If not, return zero.
329 static int all_ports_started(void);
332 * Find next enabled port
335 find_next_port(portid_t p, struct rte_port *ports, int size)
338 rte_exit(-EINVAL, "failed to find a next port id\n");
340 while ((p < size) && (ports[p].enabled == 0))
346 * Setup default configuration.
349 set_default_fwd_lcores_config(void)
353 unsigned int sock_num;
356 for (i = 0; i < RTE_MAX_LCORE; i++) {
357 sock_num = rte_lcore_to_socket_id(i) + 1;
358 if (sock_num > max_socket) {
359 if (sock_num > RTE_MAX_NUMA_NODES)
360 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361 max_socket = sock_num;
363 if (!rte_lcore_is_enabled(i))
365 if (i == rte_get_master_lcore())
367 fwd_lcores_cpuids[nb_lc++] = i;
369 nb_lcores = (lcoreid_t) nb_lc;
370 nb_cfg_lcores = nb_lcores;
375 set_def_peer_eth_addrs(void)
379 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381 peer_eth_addrs[i].addr_bytes[5] = i;
386 set_default_fwd_ports_config(void)
390 for (pt_id = 0; pt_id < nb_ports; pt_id++)
391 fwd_ports_ids[pt_id] = pt_id;
393 nb_cfg_ports = nb_ports;
394 nb_fwd_ports = nb_ports;
398 set_def_fwd_config(void)
400 set_default_fwd_lcores_config();
401 set_def_peer_eth_addrs();
402 set_default_fwd_ports_config();
406 * Configuration initialisation done once at init time.
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410 unsigned int socket_id)
412 char pool_name[RTE_MEMPOOL_NAMESIZE];
413 struct rte_mempool *rte_mp = NULL;
416 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
419 #ifdef RTE_LIBRTE_PMD_XENVIRT
420 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
421 (unsigned) mb_mempool_cache,
422 sizeof(struct rte_pktmbuf_pool_private),
423 rte_pktmbuf_pool_init, NULL,
424 rte_pktmbuf_init, NULL,
428 /* if the former XEN allocation failed fall back to normal allocation */
429 if (rte_mp == NULL) {
431 rte_mp = mempool_anon_create(pool_name, nb_mbuf,
432 mb_size, (unsigned) mb_mempool_cache,
433 sizeof(struct rte_pktmbuf_pool_private),
434 rte_pktmbuf_pool_init, NULL,
435 rte_pktmbuf_init, NULL,
438 /* wrapper to rte_mempool_create() */
439 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
440 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
443 if (rte_mp == NULL) {
444 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
445 "failed\n", socket_id);
446 } else if (verbose_level > 0) {
447 rte_mempool_dump(stdout, rte_mp);
452 * Check given socket id is valid or not with NUMA mode,
453 * if valid, return 0, else return -1
456 check_socket_id(const unsigned int socket_id)
458 static int warning_once = 0;
460 if (socket_id >= max_socket) {
461 if (!warning_once && numa_support)
462 printf("Warning: NUMA should be configured manually by"
463 " using --port-numa-config and"
464 " --ring-numa-config parameters along with"
476 struct rte_port *port;
477 struct rte_mempool *mbp;
478 unsigned int nb_mbuf_per_pool;
480 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
482 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
483 /* Configuration of logical cores. */
484 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
485 sizeof(struct fwd_lcore *) * nb_lcores,
486 RTE_CACHE_LINE_SIZE);
487 if (fwd_lcores == NULL) {
488 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
489 "failed\n", nb_lcores);
491 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
492 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
493 sizeof(struct fwd_lcore),
494 RTE_CACHE_LINE_SIZE);
495 if (fwd_lcores[lc_id] == NULL) {
496 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
499 fwd_lcores[lc_id]->cpuid_idx = lc_id;
503 * Create pools of mbuf.
504 * If NUMA support is disabled, create a single pool of mbuf in
505 * socket 0 memory by default.
506 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
508 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
509 * nb_txd can be configured at run time.
511 if (param_total_num_mbufs)
512 nb_mbuf_per_pool = param_total_num_mbufs;
514 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
515 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
519 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
523 if (socket_num == UMA_NO_CONFIG)
524 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
526 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
530 FOREACH_PORT(pid, ports) {
532 rte_eth_dev_info_get(pid, &port->dev_info);
535 if (port_numa[pid] != NUMA_NO_CONFIG)
536 port_per_socket[port_numa[pid]]++;
538 uint32_t socket_id = rte_eth_dev_socket_id(pid);
540 /* if socket_id is invalid, set to 0 */
541 if (check_socket_id(socket_id) < 0)
543 port_per_socket[socket_id]++;
547 /* set flag to initialize port/queue */
548 port->need_reconfig = 1;
549 port->need_reconfig_queues = 1;
554 unsigned int nb_mbuf;
556 if (param_total_num_mbufs)
557 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
559 for (i = 0; i < max_socket; i++) {
560 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
562 mbuf_pool_create(mbuf_data_size,
569 * Records which Mbuf pool to use by each logical core, if needed.
571 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
572 mbp = mbuf_pool_find(
573 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
576 mbp = mbuf_pool_find(0);
577 fwd_lcores[lc_id]->mbp = mbp;
580 /* Configuration of packet forwarding streams. */
581 if (init_fwd_streams() < 0)
582 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
587 reconfig(portid_t new_port_id, unsigned socket_id)
589 struct rte_port *port;
591 /* Reconfiguration of Ethernet ports. */
592 port = &ports[new_port_id];
593 rte_eth_dev_info_get(new_port_id, &port->dev_info);
595 /* set flag to initialize port/queue */
596 port->need_reconfig = 1;
597 port->need_reconfig_queues = 1;
598 port->socket_id = socket_id;
605 init_fwd_streams(void)
608 struct rte_port *port;
609 streamid_t sm_id, nb_fwd_streams_new;
612 /* set socket id according to numa or not */
613 FOREACH_PORT(pid, ports) {
615 if (nb_rxq > port->dev_info.max_rx_queues) {
616 printf("Fail: nb_rxq(%d) is greater than "
617 "max_rx_queues(%d)\n", nb_rxq,
618 port->dev_info.max_rx_queues);
621 if (nb_txq > port->dev_info.max_tx_queues) {
622 printf("Fail: nb_txq(%d) is greater than "
623 "max_tx_queues(%d)\n", nb_txq,
624 port->dev_info.max_tx_queues);
628 if (port_numa[pid] != NUMA_NO_CONFIG)
629 port->socket_id = port_numa[pid];
631 port->socket_id = rte_eth_dev_socket_id(pid);
633 /* if socket_id is invalid, set to 0 */
634 if (check_socket_id(port->socket_id) < 0)
639 if (socket_num == UMA_NO_CONFIG)
642 port->socket_id = socket_num;
646 q = RTE_MAX(nb_rxq, nb_txq);
648 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
651 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
652 if (nb_fwd_streams_new == nb_fwd_streams)
655 if (fwd_streams != NULL) {
656 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
657 if (fwd_streams[sm_id] == NULL)
659 rte_free(fwd_streams[sm_id]);
660 fwd_streams[sm_id] = NULL;
662 rte_free(fwd_streams);
667 nb_fwd_streams = nb_fwd_streams_new;
668 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
669 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
670 if (fwd_streams == NULL)
671 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
672 "failed\n", nb_fwd_streams);
674 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
675 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
676 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
677 if (fwd_streams[sm_id] == NULL)
678 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
685 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
687 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
689 unsigned int total_burst;
690 unsigned int nb_burst;
691 unsigned int burst_stats[3];
692 uint16_t pktnb_stats[3];
694 int burst_percent[3];
697 * First compute the total number of packet bursts and the
698 * two highest numbers of bursts of the same number of packets.
701 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
702 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
703 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
704 nb_burst = pbs->pkt_burst_spread[nb_pkt];
707 total_burst += nb_burst;
708 if (nb_burst > burst_stats[0]) {
709 burst_stats[1] = burst_stats[0];
710 pktnb_stats[1] = pktnb_stats[0];
711 burst_stats[0] = nb_burst;
712 pktnb_stats[0] = nb_pkt;
715 if (total_burst == 0)
717 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
718 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
719 burst_percent[0], (int) pktnb_stats[0]);
720 if (burst_stats[0] == total_burst) {
724 if (burst_stats[0] + burst_stats[1] == total_burst) {
725 printf(" + %d%% of %d pkts]\n",
726 100 - burst_percent[0], pktnb_stats[1]);
729 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
730 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
731 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
732 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
735 printf(" + %d%% of %d pkts + %d%% of others]\n",
736 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
738 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
741 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
743 struct rte_port *port;
746 static const char *fwd_stats_border = "----------------------";
748 port = &ports[port_id];
749 printf("\n %s Forward statistics for port %-2d %s\n",
750 fwd_stats_border, port_id, fwd_stats_border);
752 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
753 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
755 stats->ipackets, stats->imissed,
756 (uint64_t) (stats->ipackets + stats->imissed));
758 if (cur_fwd_eng == &csum_fwd_engine)
759 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
760 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
761 if ((stats->ierrors + stats->rx_nombuf) > 0) {
762 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
763 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
766 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
768 stats->opackets, port->tx_dropped,
769 (uint64_t) (stats->opackets + port->tx_dropped));
772 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
774 stats->ipackets, stats->imissed,
775 (uint64_t) (stats->ipackets + stats->imissed));
777 if (cur_fwd_eng == &csum_fwd_engine)
778 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
779 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
780 if ((stats->ierrors + stats->rx_nombuf) > 0) {
781 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
782 printf(" RX-nombufs: %14"PRIu64"\n",
786 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
788 stats->opackets, port->tx_dropped,
789 (uint64_t) (stats->opackets + port->tx_dropped));
792 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
794 pkt_burst_stats_display("RX",
795 &port->rx_stream->rx_burst_stats);
797 pkt_burst_stats_display("TX",
798 &port->tx_stream->tx_burst_stats);
801 if (port->rx_queue_stats_mapping_enabled) {
803 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
804 printf(" Stats reg %2d RX-packets:%14"PRIu64
805 " RX-errors:%14"PRIu64
806 " RX-bytes:%14"PRIu64"\n",
807 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
811 if (port->tx_queue_stats_mapping_enabled) {
812 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
813 printf(" Stats reg %2d TX-packets:%14"PRIu64
814 " TX-bytes:%14"PRIu64"\n",
815 i, stats->q_opackets[i], stats->q_obytes[i]);
819 printf(" %s--------------------------------%s\n",
820 fwd_stats_border, fwd_stats_border);
824 fwd_stream_stats_display(streamid_t stream_id)
826 struct fwd_stream *fs;
827 static const char *fwd_top_stats_border = "-------";
829 fs = fwd_streams[stream_id];
830 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
831 (fs->fwd_dropped == 0))
833 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
834 "TX Port=%2d/Queue=%2d %s\n",
835 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
836 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
837 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
838 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
840 /* if checksum mode */
841 if (cur_fwd_eng == &csum_fwd_engine) {
842 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
843 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
846 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
847 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
848 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
853 flush_fwd_rx_queues(void)
855 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
863 for (j = 0; j < 2; j++) {
864 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
865 for (rxq = 0; rxq < nb_rxq; rxq++) {
866 port_id = fwd_ports_ids[rxp];
868 nb_rx = rte_eth_rx_burst(port_id, rxq,
869 pkts_burst, MAX_PKT_BURST);
870 for (i = 0; i < nb_rx; i++)
871 rte_pktmbuf_free(pkts_burst[i]);
875 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
880 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
882 struct fwd_stream **fsm;
886 fsm = &fwd_streams[fc->stream_idx];
887 nb_fs = fc->stream_nb;
889 for (sm_id = 0; sm_id < nb_fs; sm_id++)
890 (*pkt_fwd)(fsm[sm_id]);
891 } while (! fc->stopped);
895 start_pkt_forward_on_core(void *fwd_arg)
897 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
898 cur_fwd_config.fwd_eng->packet_fwd);
903 * Run the TXONLY packet forwarding engine to send a single burst of packets.
904 * Used to start communication flows in network loopback test configurations.
907 run_one_txonly_burst_on_core(void *fwd_arg)
909 struct fwd_lcore *fwd_lc;
910 struct fwd_lcore tmp_lcore;
912 fwd_lc = (struct fwd_lcore *) fwd_arg;
914 tmp_lcore.stopped = 1;
915 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
920 * Launch packet forwarding:
921 * - Setup per-port forwarding context.
922 * - launch logical cores with their forwarding configuration.
925 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
927 port_fwd_begin_t port_fwd_begin;
932 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
933 if (port_fwd_begin != NULL) {
934 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
935 (*port_fwd_begin)(fwd_ports_ids[i]);
937 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
938 lc_id = fwd_lcores_cpuids[i];
939 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
940 fwd_lcores[i]->stopped = 0;
941 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
942 fwd_lcores[i], lc_id);
944 printf("launch lcore %u failed - diag=%d\n",
951 * Launch packet forwarding configuration.
954 start_packet_forwarding(int with_tx_first)
956 port_fwd_begin_t port_fwd_begin;
957 port_fwd_end_t port_fwd_end;
958 struct rte_port *port;
963 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
964 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
966 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
967 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
969 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
970 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
971 (!nb_rxq || !nb_txq))
972 rte_exit(EXIT_FAILURE,
973 "Either rxq or txq are 0, cannot use %s fwd mode\n",
974 cur_fwd_eng->fwd_mode_name);
976 if (all_ports_started() == 0) {
977 printf("Not all ports were started\n");
980 if (test_done == 0) {
981 printf("Packet forwarding already started\n");
985 for (i = 0; i < nb_fwd_ports; i++) {
986 pt_id = fwd_ports_ids[i];
987 port = &ports[pt_id];
988 if (!port->dcb_flag) {
989 printf("In DCB mode, all forwarding ports must "
990 "be configured in this mode.\n");
994 if (nb_fwd_lcores == 1) {
995 printf("In DCB mode,the nb forwarding cores "
996 "should be larger than 1.\n");
1003 flush_fwd_rx_queues();
1006 rxtx_config_display();
1008 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1009 pt_id = fwd_ports_ids[i];
1010 port = &ports[pt_id];
1011 rte_eth_stats_get(pt_id, &port->stats);
1012 port->tx_dropped = 0;
1014 map_port_queue_stats_mapping_registers(pt_id, port);
1016 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1017 fwd_streams[sm_id]->rx_packets = 0;
1018 fwd_streams[sm_id]->tx_packets = 0;
1019 fwd_streams[sm_id]->fwd_dropped = 0;
1020 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1021 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1023 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1024 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1025 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1026 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1027 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1029 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1030 fwd_streams[sm_id]->core_cycles = 0;
1033 if (with_tx_first) {
1034 port_fwd_begin = tx_only_engine.port_fwd_begin;
1035 if (port_fwd_begin != NULL) {
1036 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1037 (*port_fwd_begin)(fwd_ports_ids[i]);
1039 launch_packet_forwarding(run_one_txonly_burst_on_core);
1040 rte_eal_mp_wait_lcore();
1041 port_fwd_end = tx_only_engine.port_fwd_end;
1042 if (port_fwd_end != NULL) {
1043 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1044 (*port_fwd_end)(fwd_ports_ids[i]);
1047 launch_packet_forwarding(start_pkt_forward_on_core);
1051 stop_packet_forwarding(void)
1053 struct rte_eth_stats stats;
1054 struct rte_port *port;
1055 port_fwd_end_t port_fwd_end;
1060 uint64_t total_recv;
1061 uint64_t total_xmit;
1062 uint64_t total_rx_dropped;
1063 uint64_t total_tx_dropped;
1064 uint64_t total_rx_nombuf;
1065 uint64_t tx_dropped;
1066 uint64_t rx_bad_ip_csum;
1067 uint64_t rx_bad_l4_csum;
1068 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1069 uint64_t fwd_cycles;
1071 static const char *acc_stats_border = "+++++++++++++++";
1073 if (all_ports_started() == 0) {
1074 printf("Not all ports were started\n");
1078 printf("Packet forwarding not started\n");
1081 printf("Telling cores to stop...");
1082 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1083 fwd_lcores[lc_id]->stopped = 1;
1084 printf("\nWaiting for lcores to finish...\n");
1085 rte_eal_mp_wait_lcore();
1086 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1087 if (port_fwd_end != NULL) {
1088 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1089 pt_id = fwd_ports_ids[i];
1090 (*port_fwd_end)(pt_id);
1093 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1096 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1097 if (cur_fwd_config.nb_fwd_streams >
1098 cur_fwd_config.nb_fwd_ports) {
1099 fwd_stream_stats_display(sm_id);
1100 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1101 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1103 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1105 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1108 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1109 tx_dropped = (uint64_t) (tx_dropped +
1110 fwd_streams[sm_id]->fwd_dropped);
1111 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1114 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1115 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1116 fwd_streams[sm_id]->rx_bad_ip_csum);
1117 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1121 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1122 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1123 fwd_streams[sm_id]->rx_bad_l4_csum);
1124 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1127 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1128 fwd_cycles = (uint64_t) (fwd_cycles +
1129 fwd_streams[sm_id]->core_cycles);
1134 total_rx_dropped = 0;
1135 total_tx_dropped = 0;
1136 total_rx_nombuf = 0;
1137 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1138 pt_id = fwd_ports_ids[i];
1140 port = &ports[pt_id];
1141 rte_eth_stats_get(pt_id, &stats);
1142 stats.ipackets -= port->stats.ipackets;
1143 port->stats.ipackets = 0;
1144 stats.opackets -= port->stats.opackets;
1145 port->stats.opackets = 0;
1146 stats.ibytes -= port->stats.ibytes;
1147 port->stats.ibytes = 0;
1148 stats.obytes -= port->stats.obytes;
1149 port->stats.obytes = 0;
1150 stats.imissed -= port->stats.imissed;
1151 port->stats.imissed = 0;
1152 stats.oerrors -= port->stats.oerrors;
1153 port->stats.oerrors = 0;
1154 stats.rx_nombuf -= port->stats.rx_nombuf;
1155 port->stats.rx_nombuf = 0;
1157 total_recv += stats.ipackets;
1158 total_xmit += stats.opackets;
1159 total_rx_dropped += stats.imissed;
1160 total_tx_dropped += port->tx_dropped;
1161 total_rx_nombuf += stats.rx_nombuf;
1163 fwd_port_stats_display(pt_id, &stats);
1165 printf("\n %s Accumulated forward statistics for all ports"
1167 acc_stats_border, acc_stats_border);
1168 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1170 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1172 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1173 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1174 if (total_rx_nombuf > 0)
1175 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1176 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1178 acc_stats_border, acc_stats_border);
1179 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1181 printf("\n CPU cycles/packet=%u (total cycles="
1182 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1183 (unsigned int)(fwd_cycles / total_recv),
1184 fwd_cycles, total_recv);
1186 printf("\nDone.\n");
1191 dev_set_link_up(portid_t pid)
1193 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1194 printf("\nSet link up fail.\n");
1198 dev_set_link_down(portid_t pid)
1200 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1201 printf("\nSet link down fail.\n");
1205 all_ports_started(void)
1208 struct rte_port *port;
1210 FOREACH_PORT(pi, ports) {
1212 /* Check if there is a port which is not started */
1213 if ((port->port_status != RTE_PORT_STARTED) &&
1214 (port->slave_flag == 0))
1218 /* No port is not started */
1223 all_ports_stopped(void)
1226 struct rte_port *port;
1228 FOREACH_PORT(pi, ports) {
1230 if ((port->port_status != RTE_PORT_STOPPED) &&
1231 (port->slave_flag == 0))
1239 port_is_started(portid_t port_id)
1241 if (port_id_is_invalid(port_id, ENABLED_WARN))
1244 if (ports[port_id].port_status != RTE_PORT_STARTED)
1251 port_is_closed(portid_t port_id)
1253 if (port_id_is_invalid(port_id, ENABLED_WARN))
1256 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1263 start_port(portid_t pid)
1265 int diag, need_check_link_status = -1;
1268 struct rte_port *port;
1269 struct ether_addr mac_addr;
1271 if (test_done == 0) {
1272 printf("Please stop forwarding first\n");
1276 if (port_id_is_invalid(pid, ENABLED_WARN))
1279 if (init_fwd_streams() < 0) {
1280 printf("Fail from init_fwd_streams()\n");
1286 FOREACH_PORT(pi, ports) {
1287 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1290 need_check_link_status = 0;
1292 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1293 RTE_PORT_HANDLING) == 0) {
1294 printf("Port %d is now not stopped\n", pi);
1298 if (port->need_reconfig > 0) {
1299 port->need_reconfig = 0;
1301 printf("Configuring Port %d (socket %u)\n", pi,
1303 /* configure port */
1304 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1307 if (rte_atomic16_cmpset(&(port->port_status),
1308 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1309 printf("Port %d can not be set back "
1310 "to stopped\n", pi);
1311 printf("Fail to configure port %d\n", pi);
1312 /* try to reconfigure port next time */
1313 port->need_reconfig = 1;
1317 if (port->need_reconfig_queues > 0) {
1318 port->need_reconfig_queues = 0;
1319 /* setup tx queues */
1320 for (qi = 0; qi < nb_txq; qi++) {
1321 if ((numa_support) &&
1322 (txring_numa[pi] != NUMA_NO_CONFIG))
1323 diag = rte_eth_tx_queue_setup(pi, qi,
1324 nb_txd,txring_numa[pi],
1327 diag = rte_eth_tx_queue_setup(pi, qi,
1328 nb_txd,port->socket_id,
1334 /* Fail to setup tx queue, return */
1335 if (rte_atomic16_cmpset(&(port->port_status),
1337 RTE_PORT_STOPPED) == 0)
1338 printf("Port %d can not be set back "
1339 "to stopped\n", pi);
1340 printf("Fail to configure port %d tx queues\n", pi);
1341 /* try to reconfigure queues next time */
1342 port->need_reconfig_queues = 1;
1345 /* setup rx queues */
1346 for (qi = 0; qi < nb_rxq; qi++) {
1347 if ((numa_support) &&
1348 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1349 struct rte_mempool * mp =
1350 mbuf_pool_find(rxring_numa[pi]);
1352 printf("Failed to setup RX queue:"
1353 "No mempool allocation"
1354 "on the socket %d\n",
1359 diag = rte_eth_rx_queue_setup(pi, qi,
1360 nb_rxd,rxring_numa[pi],
1361 &(port->rx_conf),mp);
1364 diag = rte_eth_rx_queue_setup(pi, qi,
1365 nb_rxd,port->socket_id,
1367 mbuf_pool_find(port->socket_id));
1373 /* Fail to setup rx queue, return */
1374 if (rte_atomic16_cmpset(&(port->port_status),
1376 RTE_PORT_STOPPED) == 0)
1377 printf("Port %d can not be set back "
1378 "to stopped\n", pi);
1379 printf("Fail to configure port %d rx queues\n", pi);
1380 /* try to reconfigure queues next time */
1381 port->need_reconfig_queues = 1;
1386 if (rte_eth_dev_start(pi) < 0) {
1387 printf("Fail to start port %d\n", pi);
1389 /* Fail to setup rx queue, return */
1390 if (rte_atomic16_cmpset(&(port->port_status),
1391 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1392 printf("Port %d can not be set back to "
1397 if (rte_atomic16_cmpset(&(port->port_status),
1398 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1399 printf("Port %d can not be set into started\n", pi);
1401 rte_eth_macaddr_get(pi, &mac_addr);
1402 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1403 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1404 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1405 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1407 /* at least one port started, need checking link status */
1408 need_check_link_status = 1;
1411 if (need_check_link_status == 1 && !no_link_check)
1412 check_all_ports_link_status(RTE_PORT_ALL);
1413 else if (need_check_link_status == 0)
1414 printf("Please stop the ports first\n");
1421 stop_port(portid_t pid)
1424 struct rte_port *port;
1425 int need_check_link_status = 0;
1427 if (test_done == 0) {
1428 printf("Please stop forwarding first\n");
1436 if (port_id_is_invalid(pid, ENABLED_WARN))
1439 printf("Stopping ports...\n");
1441 FOREACH_PORT(pi, ports) {
1442 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1446 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1447 RTE_PORT_HANDLING) == 0)
1450 rte_eth_dev_stop(pi);
1452 if (rte_atomic16_cmpset(&(port->port_status),
1453 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1454 printf("Port %d can not be set into stopped\n", pi);
1455 need_check_link_status = 1;
1457 if (need_check_link_status && !no_link_check)
1458 check_all_ports_link_status(RTE_PORT_ALL);
1464 close_port(portid_t pid)
1467 struct rte_port *port;
1469 if (test_done == 0) {
1470 printf("Please stop forwarding first\n");
1474 if (port_id_is_invalid(pid, ENABLED_WARN))
1477 printf("Closing ports...\n");
1479 FOREACH_PORT(pi, ports) {
1480 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1484 if (rte_atomic16_cmpset(&(port->port_status),
1485 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1486 printf("Port %d is already closed\n", pi);
1490 if (rte_atomic16_cmpset(&(port->port_status),
1491 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1492 printf("Port %d is now not stopped\n", pi);
1496 rte_eth_dev_close(pi);
1498 if (rte_atomic16_cmpset(&(port->port_status),
1499 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1500 printf("Port %d can not be set into stopped\n", pi);
1507 attach_port(char *identifier)
1509 portid_t i, j, pi = 0;
1511 printf("Attaching a new port...\n");
1513 if (identifier == NULL) {
1514 printf("Invalid parameters are specified\n");
1518 if (test_done == 0) {
1519 printf("Please stop forwarding first\n");
1523 if (rte_eth_dev_attach(identifier, &pi))
1526 ports[pi].enabled = 1;
1527 reconfig(pi, rte_eth_dev_socket_id(pi));
1528 rte_eth_promiscuous_enable(pi);
1530 nb_ports = rte_eth_dev_count();
1532 /* set_default_fwd_ports_config(); */
1533 memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1535 FOREACH_PORT(j, ports) {
1536 fwd_ports_ids[i] = j;
1539 nb_cfg_ports = nb_ports;
1542 ports[pi].port_status = RTE_PORT_STOPPED;
1544 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1549 detach_port(uint8_t port_id)
1552 char name[RTE_ETH_NAME_MAX_LEN];
1554 printf("Detaching a port...\n");
1556 if (!port_is_closed(port_id)) {
1557 printf("Please close port first\n");
1561 if (rte_eth_dev_detach(port_id, name))
1564 ports[port_id].enabled = 0;
1565 nb_ports = rte_eth_dev_count();
1567 /* set_default_fwd_ports_config(); */
1568 memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1570 FOREACH_PORT(pi, ports) {
1571 fwd_ports_ids[i] = pi;
1574 nb_cfg_ports = nb_ports;
1577 printf("Port '%s' is detached. Now total ports is %d\n",
1589 stop_packet_forwarding();
1591 if (ports != NULL) {
1593 FOREACH_PORT(pt_id, ports) {
1594 printf("\nShutting down port %d...\n", pt_id);
1600 printf("\nBye...\n");
1603 typedef void (*cmd_func_t)(void);
1604 struct pmd_test_command {
1605 const char *cmd_name;
1606 cmd_func_t cmd_func;
1609 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1611 /* Check the link status of all ports in up to 9s, and print them finally */
1613 check_all_ports_link_status(uint32_t port_mask)
1615 #define CHECK_INTERVAL 100 /* 100ms */
1616 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1617 uint8_t portid, count, all_ports_up, print_flag = 0;
1618 struct rte_eth_link link;
1620 printf("Checking link statuses...\n");
1622 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1624 FOREACH_PORT(portid, ports) {
1625 if ((port_mask & (1 << portid)) == 0)
1627 memset(&link, 0, sizeof(link));
1628 rte_eth_link_get_nowait(portid, &link);
1629 /* print link status if flag set */
1630 if (print_flag == 1) {
1631 if (link.link_status)
1632 printf("Port %d Link Up - speed %u "
1633 "Mbps - %s\n", (uint8_t)portid,
1634 (unsigned)link.link_speed,
1635 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1636 ("full-duplex") : ("half-duplex\n"));
1638 printf("Port %d Link Down\n",
1642 /* clear all_ports_up flag if any link down */
1643 if (link.link_status == ETH_LINK_DOWN) {
1648 /* after finally printing all link status, get out */
1649 if (print_flag == 1)
1652 if (all_ports_up == 0) {
1654 rte_delay_ms(CHECK_INTERVAL);
1657 /* set the print_flag if all ports up or timeout */
1658 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1665 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1669 uint8_t mapping_found = 0;
1671 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1672 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1673 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1674 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1675 tx_queue_stats_mappings[i].queue_id,
1676 tx_queue_stats_mappings[i].stats_counter_id);
1683 port->tx_queue_stats_mapping_enabled = 1;
1688 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1692 uint8_t mapping_found = 0;
1694 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1695 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1696 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1697 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1698 rx_queue_stats_mappings[i].queue_id,
1699 rx_queue_stats_mappings[i].stats_counter_id);
1706 port->rx_queue_stats_mapping_enabled = 1;
1711 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1715 diag = set_tx_queue_stats_mapping_registers(pi, port);
1717 if (diag == -ENOTSUP) {
1718 port->tx_queue_stats_mapping_enabled = 0;
1719 printf("TX queue stats mapping not supported port id=%d\n", pi);
1722 rte_exit(EXIT_FAILURE,
1723 "set_tx_queue_stats_mapping_registers "
1724 "failed for port id=%d diag=%d\n",
1728 diag = set_rx_queue_stats_mapping_registers(pi, port);
1730 if (diag == -ENOTSUP) {
1731 port->rx_queue_stats_mapping_enabled = 0;
1732 printf("RX queue stats mapping not supported port id=%d\n", pi);
1735 rte_exit(EXIT_FAILURE,
1736 "set_rx_queue_stats_mapping_registers "
1737 "failed for port id=%d diag=%d\n",
1743 rxtx_port_config(struct rte_port *port)
1745 port->rx_conf = port->dev_info.default_rxconf;
1746 port->tx_conf = port->dev_info.default_txconf;
1748 /* Check if any RX/TX parameters have been passed */
1749 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1750 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1752 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1753 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1755 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1756 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1758 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1759 port->rx_conf.rx_free_thresh = rx_free_thresh;
1761 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1762 port->rx_conf.rx_drop_en = rx_drop_en;
1764 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1765 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1767 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1768 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1770 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1771 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1773 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1774 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1776 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1777 port->tx_conf.tx_free_thresh = tx_free_thresh;
1779 if (txq_flags != RTE_PMD_PARAM_UNSET)
1780 port->tx_conf.txq_flags = txq_flags;
1784 init_port_config(void)
1787 struct rte_port *port;
1789 FOREACH_PORT(pid, ports) {
1791 port->dev_conf.rxmode = rx_mode;
1792 port->dev_conf.fdir_conf = fdir_conf;
1794 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1795 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1797 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1798 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1801 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1802 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1803 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1805 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1808 if (port->dev_info.max_vfs != 0) {
1809 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1810 port->dev_conf.rxmode.mq_mode =
1813 port->dev_conf.rxmode.mq_mode =
1816 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1819 rxtx_port_config(port);
1821 rte_eth_macaddr_get(pid, &port->eth_addr);
1823 map_port_queue_stats_mapping_registers(pid, port);
1824 #ifdef RTE_NIC_BYPASS
1825 rte_eth_dev_bypass_init(pid);
1830 void set_port_slave_flag(portid_t slave_pid)
1832 struct rte_port *port;
1834 port = &ports[slave_pid];
1835 port->slave_flag = 1;
1838 void clear_port_slave_flag(portid_t slave_pid)
1840 struct rte_port *port;
1842 port = &ports[slave_pid];
1843 port->slave_flag = 0;
1846 const uint16_t vlan_tags[] = {
1847 0, 1, 2, 3, 4, 5, 6, 7,
1848 8, 9, 10, 11, 12, 13, 14, 15,
1849 16, 17, 18, 19, 20, 21, 22, 23,
1850 24, 25, 26, 27, 28, 29, 30, 31
1854 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1855 enum dcb_mode_enable dcb_mode,
1856 enum rte_eth_nb_tcs num_tcs,
1862 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1863 * given above, and the number of traffic classes available for use.
1865 if (dcb_mode == DCB_VT_ENABLED) {
1866 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1867 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1868 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1869 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1871 /* VMDQ+DCB RX and TX configrations */
1872 vmdq_rx_conf->enable_default_pool = 0;
1873 vmdq_rx_conf->default_pool = 0;
1874 vmdq_rx_conf->nb_queue_pools =
1875 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1876 vmdq_tx_conf->nb_queue_pools =
1877 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1879 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1880 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1881 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1882 vmdq_rx_conf->pool_map[i].pools =
1883 1 << (i % vmdq_rx_conf->nb_queue_pools);
1885 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1886 vmdq_rx_conf->dcb_tc[i] = i;
1887 vmdq_tx_conf->dcb_tc[i] = i;
1890 /* set DCB mode of RX and TX of multiple queues */
1891 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1892 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1894 struct rte_eth_dcb_rx_conf *rx_conf =
1895 ð_conf->rx_adv_conf.dcb_rx_conf;
1896 struct rte_eth_dcb_tx_conf *tx_conf =
1897 ð_conf->tx_adv_conf.dcb_tx_conf;
1899 rx_conf->nb_tcs = num_tcs;
1900 tx_conf->nb_tcs = num_tcs;
1902 for (i = 0; i < num_tcs; i++) {
1903 rx_conf->dcb_tc[i] = i;
1904 tx_conf->dcb_tc[i] = i;
1906 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1907 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1908 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1912 eth_conf->dcb_capability_en =
1913 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1915 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1921 init_port_dcb_config(portid_t pid,
1922 enum dcb_mode_enable dcb_mode,
1923 enum rte_eth_nb_tcs num_tcs,
1926 struct rte_eth_conf port_conf;
1927 struct rte_eth_dev_info dev_info;
1928 struct rte_port *rte_port;
1932 rte_eth_dev_info_get(pid, &dev_info);
1934 /* If dev_info.vmdq_pool_base is greater than 0,
1935 * the queue id of vmdq pools is started after pf queues.
1937 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1938 printf("VMDQ_DCB multi-queue mode is nonsensical"
1939 " for port %d.", pid);
1943 /* Assume the ports in testpmd have the same dcb capability
1944 * and has the same number of rxq and txq in dcb mode
1946 if (dcb_mode == DCB_VT_ENABLED) {
1947 nb_rxq = dev_info.max_rx_queues;
1948 nb_txq = dev_info.max_tx_queues;
1950 /*if vt is disabled, use all pf queues */
1951 if (dev_info.vmdq_pool_base == 0) {
1952 nb_rxq = dev_info.max_rx_queues;
1953 nb_txq = dev_info.max_tx_queues;
1955 nb_rxq = (queueid_t)num_tcs;
1956 nb_txq = (queueid_t)num_tcs;
1960 rx_free_thresh = 64;
1962 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1963 /* Enter DCB configuration status */
1966 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1967 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1971 rte_port = &ports[pid];
1972 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1974 rxtx_port_config(rte_port);
1976 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1977 for (i = 0; i < RTE_DIM(vlan_tags); i++)
1978 rx_vft_set(pid, vlan_tags[i], 1);
1980 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1981 map_port_queue_stats_mapping_registers(pid, rte_port);
1983 rte_port->dcb_flag = 1;
1993 /* Configuration of Ethernet ports. */
1994 ports = rte_zmalloc("testpmd: ports",
1995 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1996 RTE_CACHE_LINE_SIZE);
1997 if (ports == NULL) {
1998 rte_exit(EXIT_FAILURE,
1999 "rte_zmalloc(%d struct rte_port) failed\n",
2003 /* enabled allocated ports */
2004 for (pid = 0; pid < nb_ports; pid++)
2005 ports[pid].enabled = 1;
2016 signal_handler(int signum)
2018 if (signum == SIGINT || signum == SIGTERM) {
2019 printf("\nSignal %d received, preparing to exit...\n",
2022 /* exit with the expected status */
2023 signal(signum, SIG_DFL);
2024 kill(getpid(), signum);
2029 main(int argc, char** argv)
2034 signal(SIGINT, signal_handler);
2035 signal(SIGTERM, signal_handler);
2037 diag = rte_eal_init(argc, argv);
2039 rte_panic("Cannot init EAL\n");
2041 nb_ports = (portid_t) rte_eth_dev_count();
2043 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2045 /* allocate port structures, and init them */
2048 set_def_fwd_config();
2050 rte_panic("Empty set of forwarding logical cores - check the "
2051 "core mask supplied in the command parameters\n");
2056 launch_args_parse(argc, argv);
2058 if (!nb_rxq && !nb_txq)
2059 printf("Warning: Either rx or tx queues should be non-zero\n");
2061 if (nb_rxq > 1 && nb_rxq > nb_txq)
2062 printf("Warning: nb_rxq=%d enables RSS configuration, "
2063 "but nb_txq=%d will prevent to fully test it.\n",
2067 if (start_port(RTE_PORT_ALL) != 0)
2068 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2070 /* set all ports to promiscuous mode by default */
2071 FOREACH_PORT(port_id, ports)
2072 rte_eth_promiscuous_enable(port_id);
2074 #ifdef RTE_LIBRTE_CMDLINE
2075 if (interactive == 1) {
2077 printf("Start automatic packet forwarding\n");
2078 start_packet_forwarding(0);
2087 printf("No commandline core given, start packet forwarding\n");
2088 start_packet_forwarding(0);
2089 printf("Press enter to exit\n");
2090 rc = read(0, &c, 1);