4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
153 #ifdef RTE_LIBRTE_IEEE1588
154 &ieee1588_fwd_engine,
159 struct fwd_config cur_fwd_config;
160 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
161 uint32_t retry_enabled;
162 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
163 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
165 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
166 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
167 * specified on command-line. */
170 * Configuration of packet segments used by the "txonly" processing engine.
172 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
173 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
174 TXONLY_DEF_PACKET_LEN,
176 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
178 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
179 /**< Split policy for packets to TX. */
181 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
182 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
184 /* current configuration is in DCB or not,0 means it is not in DCB mode */
185 uint8_t dcb_config = 0;
187 /* Whether the dcb is in testing status */
188 uint8_t dcb_test = 0;
191 * Configurable number of RX/TX queues.
193 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
194 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
197 * Configurable number of RX/TX ring descriptors.
199 #define RTE_TEST_RX_DESC_DEFAULT 128
200 #define RTE_TEST_TX_DESC_DEFAULT 512
201 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
202 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
204 #define RTE_PMD_PARAM_UNSET -1
206 * Configurable values of RX and TX ring threshold registers.
209 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
214 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
215 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
218 * Configurable value of RX free threshold.
220 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
223 * Configurable value of RX drop enable.
225 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
228 * Configurable value of TX free threshold.
230 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
233 * Configurable value of TX RS bit threshold.
235 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
238 * Configurable value of TX queue flags.
240 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
243 * Receive Side Scaling (RSS) configuration.
245 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
248 * Port topology configuration
250 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
253 * Avoids to flush all the RX streams before starts forwarding.
255 uint8_t no_flush_rx = 0; /* flush by default */
258 * Avoids to check link status when starting/stopping a port.
260 uint8_t no_link_check = 0; /* check by default */
263 * NIC bypass mode configuration options.
265 #ifdef RTE_NIC_BYPASS
267 /* The NIC bypass watchdog timeout. */
268 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
273 * Ethernet device configuration.
275 struct rte_eth_rxmode rx_mode = {
276 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
278 .header_split = 0, /**< Header Split disabled. */
279 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
280 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
281 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
282 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
283 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
284 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
287 struct rte_fdir_conf fdir_conf = {
288 .mode = RTE_FDIR_MODE_NONE,
289 .pballoc = RTE_FDIR_PBALLOC_64K,
290 .status = RTE_FDIR_REPORT_STATUS,
292 .vlan_tci_mask = 0x0,
294 .src_ip = 0xFFFFFFFF,
295 .dst_ip = 0xFFFFFFFF,
298 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
301 .src_port_mask = 0xFFFF,
302 .dst_port_mask = 0xFFFF,
303 .mac_addr_byte_mask = 0xFF,
304 .tunnel_type_mask = 1,
305 .tunnel_id_mask = 0xFFFFFFFF,
310 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
312 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
315 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
316 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
318 uint16_t nb_tx_queue_stats_mappings = 0;
319 uint16_t nb_rx_queue_stats_mappings = 0;
321 unsigned max_socket = 0;
323 /* Forward function declarations */
324 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
325 static void check_all_ports_link_status(uint32_t port_mask);
328 * Check if all the ports are started.
329 * If yes, return positive value. If not, return zero.
331 static int all_ports_started(void);
334 * Find next enabled port
337 find_next_port(portid_t p, struct rte_port *ports, int size)
340 rte_exit(-EINVAL, "failed to find a next port id\n");
342 while ((p < size) && (ports[p].enabled == 0))
348 * Setup default configuration.
351 set_default_fwd_lcores_config(void)
355 unsigned int sock_num;
358 for (i = 0; i < RTE_MAX_LCORE; i++) {
359 sock_num = rte_lcore_to_socket_id(i) + 1;
360 if (sock_num > max_socket) {
361 if (sock_num > RTE_MAX_NUMA_NODES)
362 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
363 max_socket = sock_num;
365 if (!rte_lcore_is_enabled(i))
367 if (i == rte_get_master_lcore())
369 fwd_lcores_cpuids[nb_lc++] = i;
371 nb_lcores = (lcoreid_t) nb_lc;
372 nb_cfg_lcores = nb_lcores;
377 set_def_peer_eth_addrs(void)
381 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
382 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
383 peer_eth_addrs[i].addr_bytes[5] = i;
388 set_default_fwd_ports_config(void)
392 for (pt_id = 0; pt_id < nb_ports; pt_id++)
393 fwd_ports_ids[pt_id] = pt_id;
395 nb_cfg_ports = nb_ports;
396 nb_fwd_ports = nb_ports;
400 set_def_fwd_config(void)
402 set_default_fwd_lcores_config();
403 set_def_peer_eth_addrs();
404 set_default_fwd_ports_config();
408 * Configuration initialisation done once at init time.
411 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
412 unsigned int socket_id)
414 char pool_name[RTE_MEMPOOL_NAMESIZE];
415 struct rte_mempool *rte_mp = NULL;
418 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
419 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
422 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
423 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
425 #ifdef RTE_LIBRTE_PMD_XENVIRT
426 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
427 (unsigned) mb_mempool_cache,
428 sizeof(struct rte_pktmbuf_pool_private),
429 rte_pktmbuf_pool_init, NULL,
430 rte_pktmbuf_init, NULL,
434 /* if the former XEN allocation failed fall back to normal allocation */
435 if (rte_mp == NULL) {
437 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
438 mb_size, (unsigned) mb_mempool_cache,
439 sizeof(struct rte_pktmbuf_pool_private),
442 if (rte_mempool_populate_anon(rte_mp) == 0) {
443 rte_mempool_free(rte_mp);
446 rte_pktmbuf_pool_init(rte_mp, NULL);
447 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
449 /* wrapper to rte_mempool_create() */
450 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
451 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
455 if (rte_mp == NULL) {
456 rte_exit(EXIT_FAILURE,
457 "Creation of mbuf pool for socket %u failed: %s\n",
458 socket_id, rte_strerror(rte_errno));
459 } else if (verbose_level > 0) {
460 rte_mempool_dump(stdout, rte_mp);
465 * Check given socket id is valid or not with NUMA mode,
466 * if valid, return 0, else return -1
469 check_socket_id(const unsigned int socket_id)
471 static int warning_once = 0;
473 if (socket_id >= max_socket) {
474 if (!warning_once && numa_support)
475 printf("Warning: NUMA should be configured manually by"
476 " using --port-numa-config and"
477 " --ring-numa-config parameters along with"
489 struct rte_port *port;
490 struct rte_mempool *mbp;
491 unsigned int nb_mbuf_per_pool;
493 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
495 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
496 /* Configuration of logical cores. */
497 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
498 sizeof(struct fwd_lcore *) * nb_lcores,
499 RTE_CACHE_LINE_SIZE);
500 if (fwd_lcores == NULL) {
501 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
502 "failed\n", nb_lcores);
504 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
505 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
506 sizeof(struct fwd_lcore),
507 RTE_CACHE_LINE_SIZE);
508 if (fwd_lcores[lc_id] == NULL) {
509 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
512 fwd_lcores[lc_id]->cpuid_idx = lc_id;
516 * Create pools of mbuf.
517 * If NUMA support is disabled, create a single pool of mbuf in
518 * socket 0 memory by default.
519 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
521 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
522 * nb_txd can be configured at run time.
524 if (param_total_num_mbufs)
525 nb_mbuf_per_pool = param_total_num_mbufs;
527 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
528 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
532 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
536 if (socket_num == UMA_NO_CONFIG)
537 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
539 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
543 FOREACH_PORT(pid, ports) {
545 rte_eth_dev_info_get(pid, &port->dev_info);
548 if (port_numa[pid] != NUMA_NO_CONFIG)
549 port_per_socket[port_numa[pid]]++;
551 uint32_t socket_id = rte_eth_dev_socket_id(pid);
553 /* if socket_id is invalid, set to 0 */
554 if (check_socket_id(socket_id) < 0)
556 port_per_socket[socket_id]++;
560 /* set flag to initialize port/queue */
561 port->need_reconfig = 1;
562 port->need_reconfig_queues = 1;
567 unsigned int nb_mbuf;
569 if (param_total_num_mbufs)
570 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
572 for (i = 0; i < max_socket; i++) {
573 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
575 mbuf_pool_create(mbuf_data_size,
582 * Records which Mbuf pool to use by each logical core, if needed.
584 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
585 mbp = mbuf_pool_find(
586 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
589 mbp = mbuf_pool_find(0);
590 fwd_lcores[lc_id]->mbp = mbp;
593 /* Configuration of packet forwarding streams. */
594 if (init_fwd_streams() < 0)
595 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
602 reconfig(portid_t new_port_id, unsigned socket_id)
604 struct rte_port *port;
606 /* Reconfiguration of Ethernet ports. */
607 port = &ports[new_port_id];
608 rte_eth_dev_info_get(new_port_id, &port->dev_info);
610 /* set flag to initialize port/queue */
611 port->need_reconfig = 1;
612 port->need_reconfig_queues = 1;
613 port->socket_id = socket_id;
620 init_fwd_streams(void)
623 struct rte_port *port;
624 streamid_t sm_id, nb_fwd_streams_new;
627 /* set socket id according to numa or not */
628 FOREACH_PORT(pid, ports) {
630 if (nb_rxq > port->dev_info.max_rx_queues) {
631 printf("Fail: nb_rxq(%d) is greater than "
632 "max_rx_queues(%d)\n", nb_rxq,
633 port->dev_info.max_rx_queues);
636 if (nb_txq > port->dev_info.max_tx_queues) {
637 printf("Fail: nb_txq(%d) is greater than "
638 "max_tx_queues(%d)\n", nb_txq,
639 port->dev_info.max_tx_queues);
643 if (port_numa[pid] != NUMA_NO_CONFIG)
644 port->socket_id = port_numa[pid];
646 port->socket_id = rte_eth_dev_socket_id(pid);
648 /* if socket_id is invalid, set to 0 */
649 if (check_socket_id(port->socket_id) < 0)
654 if (socket_num == UMA_NO_CONFIG)
657 port->socket_id = socket_num;
661 q = RTE_MAX(nb_rxq, nb_txq);
663 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
666 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
667 if (nb_fwd_streams_new == nb_fwd_streams)
670 if (fwd_streams != NULL) {
671 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
672 if (fwd_streams[sm_id] == NULL)
674 rte_free(fwd_streams[sm_id]);
675 fwd_streams[sm_id] = NULL;
677 rte_free(fwd_streams);
682 nb_fwd_streams = nb_fwd_streams_new;
683 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
684 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
685 if (fwd_streams == NULL)
686 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
687 "failed\n", nb_fwd_streams);
689 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
690 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
691 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
692 if (fwd_streams[sm_id] == NULL)
693 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
700 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
702 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
704 unsigned int total_burst;
705 unsigned int nb_burst;
706 unsigned int burst_stats[3];
707 uint16_t pktnb_stats[3];
709 int burst_percent[3];
712 * First compute the total number of packet bursts and the
713 * two highest numbers of bursts of the same number of packets.
716 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
717 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
718 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
719 nb_burst = pbs->pkt_burst_spread[nb_pkt];
722 total_burst += nb_burst;
723 if (nb_burst > burst_stats[0]) {
724 burst_stats[1] = burst_stats[0];
725 pktnb_stats[1] = pktnb_stats[0];
726 burst_stats[0] = nb_burst;
727 pktnb_stats[0] = nb_pkt;
730 if (total_burst == 0)
732 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
733 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
734 burst_percent[0], (int) pktnb_stats[0]);
735 if (burst_stats[0] == total_burst) {
739 if (burst_stats[0] + burst_stats[1] == total_burst) {
740 printf(" + %d%% of %d pkts]\n",
741 100 - burst_percent[0], pktnb_stats[1]);
744 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
745 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
746 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
747 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
750 printf(" + %d%% of %d pkts + %d%% of others]\n",
751 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
753 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
756 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
758 struct rte_port *port;
761 static const char *fwd_stats_border = "----------------------";
763 port = &ports[port_id];
764 printf("\n %s Forward statistics for port %-2d %s\n",
765 fwd_stats_border, port_id, fwd_stats_border);
767 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
768 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
770 stats->ipackets, stats->imissed,
771 (uint64_t) (stats->ipackets + stats->imissed));
773 if (cur_fwd_eng == &csum_fwd_engine)
774 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
775 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
776 if ((stats->ierrors + stats->rx_nombuf) > 0) {
777 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
778 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
781 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
783 stats->opackets, port->tx_dropped,
784 (uint64_t) (stats->opackets + port->tx_dropped));
787 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
789 stats->ipackets, stats->imissed,
790 (uint64_t) (stats->ipackets + stats->imissed));
792 if (cur_fwd_eng == &csum_fwd_engine)
793 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
794 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
795 if ((stats->ierrors + stats->rx_nombuf) > 0) {
796 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
797 printf(" RX-nombufs: %14"PRIu64"\n",
801 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
803 stats->opackets, port->tx_dropped,
804 (uint64_t) (stats->opackets + port->tx_dropped));
807 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
809 pkt_burst_stats_display("RX",
810 &port->rx_stream->rx_burst_stats);
812 pkt_burst_stats_display("TX",
813 &port->tx_stream->tx_burst_stats);
816 if (port->rx_queue_stats_mapping_enabled) {
818 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
819 printf(" Stats reg %2d RX-packets:%14"PRIu64
820 " RX-errors:%14"PRIu64
821 " RX-bytes:%14"PRIu64"\n",
822 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
826 if (port->tx_queue_stats_mapping_enabled) {
827 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828 printf(" Stats reg %2d TX-packets:%14"PRIu64
829 " TX-bytes:%14"PRIu64"\n",
830 i, stats->q_opackets[i], stats->q_obytes[i]);
834 printf(" %s--------------------------------%s\n",
835 fwd_stats_border, fwd_stats_border);
839 fwd_stream_stats_display(streamid_t stream_id)
841 struct fwd_stream *fs;
842 static const char *fwd_top_stats_border = "-------";
844 fs = fwd_streams[stream_id];
845 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
846 (fs->fwd_dropped == 0))
848 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
849 "TX Port=%2d/Queue=%2d %s\n",
850 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
851 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
852 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
853 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
855 /* if checksum mode */
856 if (cur_fwd_eng == &csum_fwd_engine) {
857 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
858 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
861 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
862 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
863 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
868 flush_fwd_rx_queues(void)
870 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
878 for (j = 0; j < 2; j++) {
879 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
880 for (rxq = 0; rxq < nb_rxq; rxq++) {
881 port_id = fwd_ports_ids[rxp];
883 nb_rx = rte_eth_rx_burst(port_id, rxq,
884 pkts_burst, MAX_PKT_BURST);
885 for (i = 0; i < nb_rx; i++)
886 rte_pktmbuf_free(pkts_burst[i]);
890 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
895 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
897 struct fwd_stream **fsm;
901 fsm = &fwd_streams[fc->stream_idx];
902 nb_fs = fc->stream_nb;
904 for (sm_id = 0; sm_id < nb_fs; sm_id++)
905 (*pkt_fwd)(fsm[sm_id]);
906 } while (! fc->stopped);
910 start_pkt_forward_on_core(void *fwd_arg)
912 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
913 cur_fwd_config.fwd_eng->packet_fwd);
918 * Run the TXONLY packet forwarding engine to send a single burst of packets.
919 * Used to start communication flows in network loopback test configurations.
922 run_one_txonly_burst_on_core(void *fwd_arg)
924 struct fwd_lcore *fwd_lc;
925 struct fwd_lcore tmp_lcore;
927 fwd_lc = (struct fwd_lcore *) fwd_arg;
929 tmp_lcore.stopped = 1;
930 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
935 * Launch packet forwarding:
936 * - Setup per-port forwarding context.
937 * - launch logical cores with their forwarding configuration.
940 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
942 port_fwd_begin_t port_fwd_begin;
947 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
948 if (port_fwd_begin != NULL) {
949 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
950 (*port_fwd_begin)(fwd_ports_ids[i]);
952 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
953 lc_id = fwd_lcores_cpuids[i];
954 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
955 fwd_lcores[i]->stopped = 0;
956 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
957 fwd_lcores[i], lc_id);
959 printf("launch lcore %u failed - diag=%d\n",
966 * Launch packet forwarding configuration.
969 start_packet_forwarding(int with_tx_first)
971 port_fwd_begin_t port_fwd_begin;
972 port_fwd_end_t port_fwd_end;
973 struct rte_port *port;
978 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
979 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
981 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
982 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
984 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
985 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
986 (!nb_rxq || !nb_txq))
987 rte_exit(EXIT_FAILURE,
988 "Either rxq or txq are 0, cannot use %s fwd mode\n",
989 cur_fwd_eng->fwd_mode_name);
991 if (all_ports_started() == 0) {
992 printf("Not all ports were started\n");
995 if (test_done == 0) {
996 printf("Packet forwarding already started\n");
1000 if (init_fwd_streams() < 0) {
1001 printf("Fail from init_fwd_streams()\n");
1006 for (i = 0; i < nb_fwd_ports; i++) {
1007 pt_id = fwd_ports_ids[i];
1008 port = &ports[pt_id];
1009 if (!port->dcb_flag) {
1010 printf("In DCB mode, all forwarding ports must "
1011 "be configured in this mode.\n");
1015 if (nb_fwd_lcores == 1) {
1016 printf("In DCB mode,the nb forwarding cores "
1017 "should be larger than 1.\n");
1024 flush_fwd_rx_queues();
1027 rxtx_config_display();
1029 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1030 pt_id = fwd_ports_ids[i];
1031 port = &ports[pt_id];
1032 rte_eth_stats_get(pt_id, &port->stats);
1033 port->tx_dropped = 0;
1035 map_port_queue_stats_mapping_registers(pt_id, port);
1037 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1038 fwd_streams[sm_id]->rx_packets = 0;
1039 fwd_streams[sm_id]->tx_packets = 0;
1040 fwd_streams[sm_id]->fwd_dropped = 0;
1041 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1042 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1044 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1045 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1046 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1047 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1048 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1050 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1051 fwd_streams[sm_id]->core_cycles = 0;
1054 if (with_tx_first) {
1055 port_fwd_begin = tx_only_engine.port_fwd_begin;
1056 if (port_fwd_begin != NULL) {
1057 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1058 (*port_fwd_begin)(fwd_ports_ids[i]);
1060 while (with_tx_first--) {
1061 launch_packet_forwarding(
1062 run_one_txonly_burst_on_core);
1063 rte_eal_mp_wait_lcore();
1065 port_fwd_end = tx_only_engine.port_fwd_end;
1066 if (port_fwd_end != NULL) {
1067 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1068 (*port_fwd_end)(fwd_ports_ids[i]);
1071 launch_packet_forwarding(start_pkt_forward_on_core);
1075 stop_packet_forwarding(void)
1077 struct rte_eth_stats stats;
1078 struct rte_port *port;
1079 port_fwd_end_t port_fwd_end;
1084 uint64_t total_recv;
1085 uint64_t total_xmit;
1086 uint64_t total_rx_dropped;
1087 uint64_t total_tx_dropped;
1088 uint64_t total_rx_nombuf;
1089 uint64_t tx_dropped;
1090 uint64_t rx_bad_ip_csum;
1091 uint64_t rx_bad_l4_csum;
1092 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1093 uint64_t fwd_cycles;
1095 static const char *acc_stats_border = "+++++++++++++++";
1098 printf("Packet forwarding not started\n");
1101 printf("Telling cores to stop...");
1102 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1103 fwd_lcores[lc_id]->stopped = 1;
1104 printf("\nWaiting for lcores to finish...\n");
1105 rte_eal_mp_wait_lcore();
1106 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1107 if (port_fwd_end != NULL) {
1108 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1109 pt_id = fwd_ports_ids[i];
1110 (*port_fwd_end)(pt_id);
1113 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1116 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1117 if (cur_fwd_config.nb_fwd_streams >
1118 cur_fwd_config.nb_fwd_ports) {
1119 fwd_stream_stats_display(sm_id);
1120 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1121 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1123 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1125 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1128 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1129 tx_dropped = (uint64_t) (tx_dropped +
1130 fwd_streams[sm_id]->fwd_dropped);
1131 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1134 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1135 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1136 fwd_streams[sm_id]->rx_bad_ip_csum);
1137 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1141 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1142 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1143 fwd_streams[sm_id]->rx_bad_l4_csum);
1144 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1147 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1148 fwd_cycles = (uint64_t) (fwd_cycles +
1149 fwd_streams[sm_id]->core_cycles);
1154 total_rx_dropped = 0;
1155 total_tx_dropped = 0;
1156 total_rx_nombuf = 0;
1157 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1158 pt_id = fwd_ports_ids[i];
1160 port = &ports[pt_id];
1161 rte_eth_stats_get(pt_id, &stats);
1162 stats.ipackets -= port->stats.ipackets;
1163 port->stats.ipackets = 0;
1164 stats.opackets -= port->stats.opackets;
1165 port->stats.opackets = 0;
1166 stats.ibytes -= port->stats.ibytes;
1167 port->stats.ibytes = 0;
1168 stats.obytes -= port->stats.obytes;
1169 port->stats.obytes = 0;
1170 stats.imissed -= port->stats.imissed;
1171 port->stats.imissed = 0;
1172 stats.oerrors -= port->stats.oerrors;
1173 port->stats.oerrors = 0;
1174 stats.rx_nombuf -= port->stats.rx_nombuf;
1175 port->stats.rx_nombuf = 0;
1177 total_recv += stats.ipackets;
1178 total_xmit += stats.opackets;
1179 total_rx_dropped += stats.imissed;
1180 total_tx_dropped += port->tx_dropped;
1181 total_rx_nombuf += stats.rx_nombuf;
1183 fwd_port_stats_display(pt_id, &stats);
1185 printf("\n %s Accumulated forward statistics for all ports"
1187 acc_stats_border, acc_stats_border);
1188 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1190 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1192 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1193 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1194 if (total_rx_nombuf > 0)
1195 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1196 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1198 acc_stats_border, acc_stats_border);
1199 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1201 printf("\n CPU cycles/packet=%u (total cycles="
1202 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1203 (unsigned int)(fwd_cycles / total_recv),
1204 fwd_cycles, total_recv);
1206 printf("\nDone.\n");
1211 dev_set_link_up(portid_t pid)
1213 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1214 printf("\nSet link up fail.\n");
1218 dev_set_link_down(portid_t pid)
1220 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1221 printf("\nSet link down fail.\n");
1225 all_ports_started(void)
1228 struct rte_port *port;
1230 FOREACH_PORT(pi, ports) {
1232 /* Check if there is a port which is not started */
1233 if ((port->port_status != RTE_PORT_STARTED) &&
1234 (port->slave_flag == 0))
1238 /* No port is not started */
1243 all_ports_stopped(void)
1246 struct rte_port *port;
1248 FOREACH_PORT(pi, ports) {
1250 if ((port->port_status != RTE_PORT_STOPPED) &&
1251 (port->slave_flag == 0))
1259 port_is_started(portid_t port_id)
1261 if (port_id_is_invalid(port_id, ENABLED_WARN))
1264 if (ports[port_id].port_status != RTE_PORT_STARTED)
1271 port_is_closed(portid_t port_id)
1273 if (port_id_is_invalid(port_id, ENABLED_WARN))
1276 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1283 start_port(portid_t pid)
1285 int diag, need_check_link_status = -1;
1288 struct rte_port *port;
1289 struct ether_addr mac_addr;
1291 if (port_id_is_invalid(pid, ENABLED_WARN))
1296 FOREACH_PORT(pi, ports) {
1297 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1300 need_check_link_status = 0;
1302 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1303 RTE_PORT_HANDLING) == 0) {
1304 printf("Port %d is now not stopped\n", pi);
1308 if (port->need_reconfig > 0) {
1309 port->need_reconfig = 0;
1311 printf("Configuring Port %d (socket %u)\n", pi,
1313 /* configure port */
1314 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1317 if (rte_atomic16_cmpset(&(port->port_status),
1318 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1319 printf("Port %d can not be set back "
1320 "to stopped\n", pi);
1321 printf("Fail to configure port %d\n", pi);
1322 /* try to reconfigure port next time */
1323 port->need_reconfig = 1;
1327 if (port->need_reconfig_queues > 0) {
1328 port->need_reconfig_queues = 0;
1329 /* setup tx queues */
1330 for (qi = 0; qi < nb_txq; qi++) {
1331 if ((numa_support) &&
1332 (txring_numa[pi] != NUMA_NO_CONFIG))
1333 diag = rte_eth_tx_queue_setup(pi, qi,
1334 nb_txd,txring_numa[pi],
1337 diag = rte_eth_tx_queue_setup(pi, qi,
1338 nb_txd,port->socket_id,
1344 /* Fail to setup tx queue, return */
1345 if (rte_atomic16_cmpset(&(port->port_status),
1347 RTE_PORT_STOPPED) == 0)
1348 printf("Port %d can not be set back "
1349 "to stopped\n", pi);
1350 printf("Fail to configure port %d tx queues\n", pi);
1351 /* try to reconfigure queues next time */
1352 port->need_reconfig_queues = 1;
1355 /* setup rx queues */
1356 for (qi = 0; qi < nb_rxq; qi++) {
1357 if ((numa_support) &&
1358 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1359 struct rte_mempool * mp =
1360 mbuf_pool_find(rxring_numa[pi]);
1362 printf("Failed to setup RX queue:"
1363 "No mempool allocation"
1364 " on the socket %d\n",
1369 diag = rte_eth_rx_queue_setup(pi, qi,
1370 nb_rxd,rxring_numa[pi],
1371 &(port->rx_conf),mp);
1373 struct rte_mempool *mp =
1374 mbuf_pool_find(port->socket_id);
1376 printf("Failed to setup RX queue:"
1377 "No mempool allocation"
1378 " on the socket %d\n",
1382 diag = rte_eth_rx_queue_setup(pi, qi,
1383 nb_rxd,port->socket_id,
1384 &(port->rx_conf), mp);
1389 /* Fail to setup rx queue, return */
1390 if (rte_atomic16_cmpset(&(port->port_status),
1392 RTE_PORT_STOPPED) == 0)
1393 printf("Port %d can not be set back "
1394 "to stopped\n", pi);
1395 printf("Fail to configure port %d rx queues\n", pi);
1396 /* try to reconfigure queues next time */
1397 port->need_reconfig_queues = 1;
1402 if (rte_eth_dev_start(pi) < 0) {
1403 printf("Fail to start port %d\n", pi);
1405 /* Fail to setup rx queue, return */
1406 if (rte_atomic16_cmpset(&(port->port_status),
1407 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1408 printf("Port %d can not be set back to "
1413 if (rte_atomic16_cmpset(&(port->port_status),
1414 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1415 printf("Port %d can not be set into started\n", pi);
1417 rte_eth_macaddr_get(pi, &mac_addr);
1418 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1419 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1420 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1421 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1423 /* at least one port started, need checking link status */
1424 need_check_link_status = 1;
1427 if (need_check_link_status == 1 && !no_link_check)
1428 check_all_ports_link_status(RTE_PORT_ALL);
1429 else if (need_check_link_status == 0)
1430 printf("Please stop the ports first\n");
1437 stop_port(portid_t pid)
1440 struct rte_port *port;
1441 int need_check_link_status = 0;
1448 if (port_id_is_invalid(pid, ENABLED_WARN))
1451 printf("Stopping ports...\n");
1453 FOREACH_PORT(pi, ports) {
1454 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1457 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1458 printf("Please remove port %d from forwarding configuration.\n", pi);
1462 if (port_is_bonding_slave(pi)) {
1463 printf("Please remove port %d from bonded device.\n", pi);
1468 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1469 RTE_PORT_HANDLING) == 0)
1472 rte_eth_dev_stop(pi);
1474 if (rte_atomic16_cmpset(&(port->port_status),
1475 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1476 printf("Port %d can not be set into stopped\n", pi);
1477 need_check_link_status = 1;
1479 if (need_check_link_status && !no_link_check)
1480 check_all_ports_link_status(RTE_PORT_ALL);
1486 close_port(portid_t pid)
1489 struct rte_port *port;
1491 if (port_id_is_invalid(pid, ENABLED_WARN))
1494 printf("Closing ports...\n");
1496 FOREACH_PORT(pi, ports) {
1497 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1500 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1501 printf("Please remove port %d from forwarding configuration.\n", pi);
1505 if (port_is_bonding_slave(pi)) {
1506 printf("Please remove port %d from bonded device.\n", pi);
1511 if (rte_atomic16_cmpset(&(port->port_status),
1512 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1513 printf("Port %d is already closed\n", pi);
1517 if (rte_atomic16_cmpset(&(port->port_status),
1518 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1519 printf("Port %d is now not stopped\n", pi);
1523 rte_eth_dev_close(pi);
1525 if (rte_atomic16_cmpset(&(port->port_status),
1526 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1527 printf("Port %d cannot be set to closed\n", pi);
1534 attach_port(char *identifier)
1537 unsigned int socket_id;
1539 printf("Attaching a new port...\n");
1541 if (identifier == NULL) {
1542 printf("Invalid parameters are specified\n");
1546 if (rte_eth_dev_attach(identifier, &pi))
1549 ports[pi].enabled = 1;
1550 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1551 /* if socket_id is invalid, set to 0 */
1552 if (check_socket_id(socket_id) < 0)
1554 reconfig(pi, socket_id);
1555 rte_eth_promiscuous_enable(pi);
1557 nb_ports = rte_eth_dev_count();
1559 ports[pi].port_status = RTE_PORT_STOPPED;
1561 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1566 detach_port(uint8_t port_id)
1568 char name[RTE_ETH_NAME_MAX_LEN];
1570 printf("Detaching a port...\n");
1572 if (!port_is_closed(port_id)) {
1573 printf("Please close port first\n");
1577 if (rte_eth_dev_detach(port_id, name))
1580 ports[port_id].enabled = 0;
1581 nb_ports = rte_eth_dev_count();
1583 printf("Port '%s' is detached. Now total ports is %d\n",
1595 stop_packet_forwarding();
1597 if (ports != NULL) {
1599 FOREACH_PORT(pt_id, ports) {
1600 printf("\nShutting down port %d...\n", pt_id);
1606 printf("\nBye...\n");
1609 typedef void (*cmd_func_t)(void);
1610 struct pmd_test_command {
1611 const char *cmd_name;
1612 cmd_func_t cmd_func;
1615 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1617 /* Check the link status of all ports in up to 9s, and print them finally */
1619 check_all_ports_link_status(uint32_t port_mask)
1621 #define CHECK_INTERVAL 100 /* 100ms */
1622 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1623 uint8_t portid, count, all_ports_up, print_flag = 0;
1624 struct rte_eth_link link;
1626 printf("Checking link statuses...\n");
1628 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1630 FOREACH_PORT(portid, ports) {
1631 if ((port_mask & (1 << portid)) == 0)
1633 memset(&link, 0, sizeof(link));
1634 rte_eth_link_get_nowait(portid, &link);
1635 /* print link status if flag set */
1636 if (print_flag == 1) {
1637 if (link.link_status)
1638 printf("Port %d Link Up - speed %u "
1639 "Mbps - %s\n", (uint8_t)portid,
1640 (unsigned)link.link_speed,
1641 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1642 ("full-duplex") : ("half-duplex\n"));
1644 printf("Port %d Link Down\n",
1648 /* clear all_ports_up flag if any link down */
1649 if (link.link_status == ETH_LINK_DOWN) {
1654 /* after finally printing all link status, get out */
1655 if (print_flag == 1)
1658 if (all_ports_up == 0) {
1660 rte_delay_ms(CHECK_INTERVAL);
1663 /* set the print_flag if all ports up or timeout */
1664 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1671 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1675 uint8_t mapping_found = 0;
1677 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1678 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1679 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1680 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1681 tx_queue_stats_mappings[i].queue_id,
1682 tx_queue_stats_mappings[i].stats_counter_id);
1689 port->tx_queue_stats_mapping_enabled = 1;
1694 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1698 uint8_t mapping_found = 0;
1700 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1701 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1702 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1703 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1704 rx_queue_stats_mappings[i].queue_id,
1705 rx_queue_stats_mappings[i].stats_counter_id);
1712 port->rx_queue_stats_mapping_enabled = 1;
1717 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1721 diag = set_tx_queue_stats_mapping_registers(pi, port);
1723 if (diag == -ENOTSUP) {
1724 port->tx_queue_stats_mapping_enabled = 0;
1725 printf("TX queue stats mapping not supported port id=%d\n", pi);
1728 rte_exit(EXIT_FAILURE,
1729 "set_tx_queue_stats_mapping_registers "
1730 "failed for port id=%d diag=%d\n",
1734 diag = set_rx_queue_stats_mapping_registers(pi, port);
1736 if (diag == -ENOTSUP) {
1737 port->rx_queue_stats_mapping_enabled = 0;
1738 printf("RX queue stats mapping not supported port id=%d\n", pi);
1741 rte_exit(EXIT_FAILURE,
1742 "set_rx_queue_stats_mapping_registers "
1743 "failed for port id=%d diag=%d\n",
1749 rxtx_port_config(struct rte_port *port)
1751 port->rx_conf = port->dev_info.default_rxconf;
1752 port->tx_conf = port->dev_info.default_txconf;
1754 /* Check if any RX/TX parameters have been passed */
1755 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1756 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1758 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1759 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1761 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1762 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1764 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1765 port->rx_conf.rx_free_thresh = rx_free_thresh;
1767 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1768 port->rx_conf.rx_drop_en = rx_drop_en;
1770 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1771 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1773 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1774 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1776 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1777 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1779 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1780 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1782 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1783 port->tx_conf.tx_free_thresh = tx_free_thresh;
1785 if (txq_flags != RTE_PMD_PARAM_UNSET)
1786 port->tx_conf.txq_flags = txq_flags;
1790 init_port_config(void)
1793 struct rte_port *port;
1795 FOREACH_PORT(pid, ports) {
1797 port->dev_conf.rxmode = rx_mode;
1798 port->dev_conf.fdir_conf = fdir_conf;
1800 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1801 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1803 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1804 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1807 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1808 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1809 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1811 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1814 if (port->dev_info.max_vfs != 0) {
1815 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1816 port->dev_conf.rxmode.mq_mode =
1819 port->dev_conf.rxmode.mq_mode =
1822 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1825 rxtx_port_config(port);
1827 rte_eth_macaddr_get(pid, &port->eth_addr);
1829 map_port_queue_stats_mapping_registers(pid, port);
1830 #ifdef RTE_NIC_BYPASS
1831 rte_eth_dev_bypass_init(pid);
1836 void set_port_slave_flag(portid_t slave_pid)
1838 struct rte_port *port;
1840 port = &ports[slave_pid];
1841 port->slave_flag = 1;
1844 void clear_port_slave_flag(portid_t slave_pid)
1846 struct rte_port *port;
1848 port = &ports[slave_pid];
1849 port->slave_flag = 0;
1852 uint8_t port_is_bonding_slave(portid_t slave_pid)
1854 struct rte_port *port;
1856 port = &ports[slave_pid];
1857 return port->slave_flag;
1860 const uint16_t vlan_tags[] = {
1861 0, 1, 2, 3, 4, 5, 6, 7,
1862 8, 9, 10, 11, 12, 13, 14, 15,
1863 16, 17, 18, 19, 20, 21, 22, 23,
1864 24, 25, 26, 27, 28, 29, 30, 31
1868 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1869 enum dcb_mode_enable dcb_mode,
1870 enum rte_eth_nb_tcs num_tcs,
1876 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1877 * given above, and the number of traffic classes available for use.
1879 if (dcb_mode == DCB_VT_ENABLED) {
1880 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1881 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1882 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1883 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1885 /* VMDQ+DCB RX and TX configrations */
1886 vmdq_rx_conf->enable_default_pool = 0;
1887 vmdq_rx_conf->default_pool = 0;
1888 vmdq_rx_conf->nb_queue_pools =
1889 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1890 vmdq_tx_conf->nb_queue_pools =
1891 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1893 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1894 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1895 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1896 vmdq_rx_conf->pool_map[i].pools =
1897 1 << (i % vmdq_rx_conf->nb_queue_pools);
1899 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1900 vmdq_rx_conf->dcb_tc[i] = i;
1901 vmdq_tx_conf->dcb_tc[i] = i;
1904 /* set DCB mode of RX and TX of multiple queues */
1905 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1906 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1908 struct rte_eth_dcb_rx_conf *rx_conf =
1909 ð_conf->rx_adv_conf.dcb_rx_conf;
1910 struct rte_eth_dcb_tx_conf *tx_conf =
1911 ð_conf->tx_adv_conf.dcb_tx_conf;
1913 rx_conf->nb_tcs = num_tcs;
1914 tx_conf->nb_tcs = num_tcs;
1916 for (i = 0; i < num_tcs; i++) {
1917 rx_conf->dcb_tc[i] = i;
1918 tx_conf->dcb_tc[i] = i;
1920 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1921 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1922 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1926 eth_conf->dcb_capability_en =
1927 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1929 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1935 init_port_dcb_config(portid_t pid,
1936 enum dcb_mode_enable dcb_mode,
1937 enum rte_eth_nb_tcs num_tcs,
1940 struct rte_eth_conf port_conf;
1941 struct rte_eth_dev_info dev_info;
1942 struct rte_port *rte_port;
1946 rte_eth_dev_info_get(pid, &dev_info);
1948 /* If dev_info.vmdq_pool_base is greater than 0,
1949 * the queue id of vmdq pools is started after pf queues.
1951 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1952 printf("VMDQ_DCB multi-queue mode is nonsensical"
1953 " for port %d.", pid);
1957 /* Assume the ports in testpmd have the same dcb capability
1958 * and has the same number of rxq and txq in dcb mode
1960 if (dcb_mode == DCB_VT_ENABLED) {
1961 nb_rxq = dev_info.max_rx_queues;
1962 nb_txq = dev_info.max_tx_queues;
1964 /*if vt is disabled, use all pf queues */
1965 if (dev_info.vmdq_pool_base == 0) {
1966 nb_rxq = dev_info.max_rx_queues;
1967 nb_txq = dev_info.max_tx_queues;
1969 nb_rxq = (queueid_t)num_tcs;
1970 nb_txq = (queueid_t)num_tcs;
1974 rx_free_thresh = 64;
1976 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1977 /* Enter DCB configuration status */
1980 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1981 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1985 rte_port = &ports[pid];
1986 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1988 rxtx_port_config(rte_port);
1990 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1991 for (i = 0; i < RTE_DIM(vlan_tags); i++)
1992 rx_vft_set(pid, vlan_tags[i], 1);
1994 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1995 map_port_queue_stats_mapping_registers(pid, rte_port);
1997 rte_port->dcb_flag = 1;
2007 /* Configuration of Ethernet ports. */
2008 ports = rte_zmalloc("testpmd: ports",
2009 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2010 RTE_CACHE_LINE_SIZE);
2011 if (ports == NULL) {
2012 rte_exit(EXIT_FAILURE,
2013 "rte_zmalloc(%d struct rte_port) failed\n",
2017 /* enabled allocated ports */
2018 for (pid = 0; pid < nb_ports; pid++)
2019 ports[pid].enabled = 1;
2030 signal_handler(int signum)
2032 if (signum == SIGINT || signum == SIGTERM) {
2033 printf("\nSignal %d received, preparing to exit...\n",
2036 /* exit with the expected status */
2037 signal(signum, SIG_DFL);
2038 kill(getpid(), signum);
2043 main(int argc, char** argv)
2048 signal(SIGINT, signal_handler);
2049 signal(SIGTERM, signal_handler);
2051 diag = rte_eal_init(argc, argv);
2053 rte_panic("Cannot init EAL\n");
2055 nb_ports = (portid_t) rte_eth_dev_count();
2057 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2059 /* allocate port structures, and init them */
2062 set_def_fwd_config();
2064 rte_panic("Empty set of forwarding logical cores - check the "
2065 "core mask supplied in the command parameters\n");
2070 launch_args_parse(argc, argv);
2072 if (!nb_rxq && !nb_txq)
2073 printf("Warning: Either rx or tx queues should be non-zero\n");
2075 if (nb_rxq > 1 && nb_rxq > nb_txq)
2076 printf("Warning: nb_rxq=%d enables RSS configuration, "
2077 "but nb_txq=%d will prevent to fully test it.\n",
2081 if (start_port(RTE_PORT_ALL) != 0)
2082 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2084 /* set all ports to promiscuous mode by default */
2085 FOREACH_PORT(port_id, ports)
2086 rte_eth_promiscuous_enable(port_id);
2088 #ifdef RTE_LIBRTE_CMDLINE
2089 if (interactive == 1) {
2091 printf("Start automatic packet forwarding\n");
2092 start_packet_forwarding(0);
2101 printf("No commandline core given, start packet forwarding\n");
2102 start_packet_forwarding(0);
2103 printf("Press enter to exit\n");
2104 rc = read(0, &c, 1);